text
stringlengths
4
1.02M
meta
dict
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== .. currentmodule:: sklearn.kernel_approximation An example shows how to use :class:`RBFSampler` and :class:`Nystrom` to appoximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem)` for the approximate mapping are shown. Please not that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: Simplified BSD # Standard scientific Python imports import pylab as pl import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2] # Now predict the value of the digit on the second half: data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:] #data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: pl.figure(figsize=(8, 8)) accuracy = pl.subplot(211) # second y axis for timeings timescale = pl.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Gemerate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] pl.tight_layout() pl.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will asign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. pl.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) pl.contourf(multiples, multiples, Z, cmap=pl.cm.Paired) pl.axis('off') # Plot also the training points pl.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=pl.cm.Paired) pl.title(titles[i]) pl.tight_layout() pl.show()
{ "content_hash": "9b5ea78ef4c35188dbc9e484f08efdfc", "timestamp": "", "source": "github", "line_count": 207, "max_line_length": 79, "avg_line_length": 38.17874396135266, "alnum_prop": 0.7070732633177275, "repo_name": "kmike/scikit-learn", "id": "6ce66fece207f9268bffd8b15a9b8aa5acec4be7", "size": "7903", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "examples/plot_kernel_approximation.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "11070763" }, { "name": "C++", "bytes": "257092" }, { "name": "JavaScript", "bytes": "4775" }, { "name": "Python", "bytes": "3808272" }, { "name": "Shell", "bytes": "3770" } ], "symlink_target": "" }
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from google.cloud.vision_v1p4beta1.proto import ( product_search_service_pb2 as google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2, ) from google.longrunning import ( operations_pb2 as google_dot_longrunning_dot_operations__pb2, ) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class ProductSearchStub(object): """Manages Products and ProductSets of reference images for use in product search. It uses the following resource model: - The API has a collection of [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] resources, named `projects/*/locations/*/productSets/*`, which acts as a way to put different products into groups to limit identification. In parallel, - The API has a collection of [Product][google.cloud.vision.v1p4beta1.Product] resources, named `projects/*/locations/*/products/*` - Each [Product][google.cloud.vision.v1p4beta1.Product] has a collection of [ReferenceImage][google.cloud.vision.v1p4beta1.ReferenceImage] resources, named `projects/*/locations/*/products/*/referenceImages/*` """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.CreateProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/CreateProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateProductSetRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.FromString, ) self.ListProductSets = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/ListProductSets", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductSetsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductSetsResponse.FromString, ) self.GetProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/GetProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetProductSetRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.FromString, ) self.UpdateProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/UpdateProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.UpdateProductSetRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.FromString, ) self.DeleteProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/DeleteProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteProductSetRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreateProduct = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/CreateProduct", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateProductRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.FromString, ) self.ListProducts = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/ListProducts", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsResponse.FromString, ) self.GetProduct = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/GetProduct", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetProductRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.FromString, ) self.UpdateProduct = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/UpdateProduct", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.UpdateProductRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.FromString, ) self.DeleteProduct = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/DeleteProduct", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteProductRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CreateReferenceImage = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/CreateReferenceImage", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateReferenceImageRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ReferenceImage.FromString, ) self.DeleteReferenceImage = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/DeleteReferenceImage", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteReferenceImageRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListReferenceImages = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/ListReferenceImages", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListReferenceImagesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListReferenceImagesResponse.FromString, ) self.GetReferenceImage = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/GetReferenceImage", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetReferenceImageRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ReferenceImage.FromString, ) self.AddProductToProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/AddProductToProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.AddProductToProductSetRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.RemoveProductFromProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/RemoveProductFromProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.RemoveProductFromProductSetRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.ListProductsInProductSet = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/ListProductsInProductSet", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsInProductSetRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsInProductSetResponse.FromString, ) self.ImportProductSets = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/ImportProductSets", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ImportProductSetsRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.PurgeProducts = channel.unary_unary( "/google.cloud.vision.v1p4beta1.ProductSearch/PurgeProducts", request_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.PurgeProductsRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) class ProductSearchServicer(object): """Manages Products and ProductSets of reference images for use in product search. It uses the following resource model: - The API has a collection of [ProductSet][google.cloud.vision.v1p4beta1.ProductSet] resources, named `projects/*/locations/*/productSets/*`, which acts as a way to put different products into groups to limit identification. In parallel, - The API has a collection of [Product][google.cloud.vision.v1p4beta1.Product] resources, named `projects/*/locations/*/products/*` - Each [Product][google.cloud.vision.v1p4beta1.Product] has a collection of [ReferenceImage][google.cloud.vision.v1p4beta1.ReferenceImage] resources, named `projects/*/locations/*/products/*/referenceImages/*` """ def CreateProductSet(self, request, context): """Creates and returns a new ProductSet resource. Possible errors: * Returns INVALID_ARGUMENT if display_name is missing, or is longer than 4096 characters. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListProductSets(self, request, context): """Lists ProductSets in an unspecified order. Possible errors: * Returns INVALID_ARGUMENT if page_size is greater than 100, or less than 1. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetProductSet(self, request, context): """Gets information associated with a ProductSet. Possible errors: * Returns NOT_FOUND if the ProductSet does not exist. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateProductSet(self, request, context): """Makes changes to a ProductSet resource. Only display_name can be updated currently. Possible errors: * Returns NOT_FOUND if the ProductSet does not exist. * Returns INVALID_ARGUMENT if display_name is present in update_mask but missing from the request or longer than 4096 characters. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteProductSet(self, request, context): """Permanently deletes a ProductSet. Products and ReferenceImages in the ProductSet are not deleted. The actual image files are not deleted from Google Cloud Storage. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateProduct(self, request, context): """Creates and returns a new product resource. Possible errors: * Returns INVALID_ARGUMENT if display_name is missing or longer than 4096 characters. * Returns INVALID_ARGUMENT if description is longer than 4096 characters. * Returns INVALID_ARGUMENT if product_category is missing or invalid. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListProducts(self, request, context): """Lists products in an unspecified order. Possible errors: * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetProduct(self, request, context): """Gets information associated with a Product. Possible errors: * Returns NOT_FOUND if the Product does not exist. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def UpdateProduct(self, request, context): """Makes changes to a Product resource. Only the `display_name`, `description`, and `labels` fields can be updated right now. If labels are updated, the change will not be reflected in queries until the next index time. Possible errors: * Returns NOT_FOUND if the Product does not exist. * Returns INVALID_ARGUMENT if display_name is present in update_mask but is missing from the request or longer than 4096 characters. * Returns INVALID_ARGUMENT if description is present in update_mask but is longer than 4096 characters. * Returns INVALID_ARGUMENT if product_category is present in update_mask. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteProduct(self, request, context): """Permanently deletes a product and its reference images. Metadata of the product and all its images will be deleted right away, but search queries against ProductSets containing the product may still work until all related caches are refreshed. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def CreateReferenceImage(self, request, context): """Creates and returns a new ReferenceImage resource. The `bounding_poly` field is optional. If `bounding_poly` is not specified, the system will try to detect regions of interest in the image that are compatible with the product_category on the parent product. If it is specified, detection is ALWAYS skipped. The system converts polygons into non-rotated rectangles. Note that the pipeline will resize the image if the image resolution is too large to process (above 50MP). Possible errors: * Returns INVALID_ARGUMENT if the image_uri is missing or longer than 4096 characters. * Returns INVALID_ARGUMENT if the product does not exist. * Returns INVALID_ARGUMENT if bounding_poly is not provided, and nothing compatible with the parent product's product_category is detected. * Returns INVALID_ARGUMENT if bounding_poly contains more than 10 polygons. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def DeleteReferenceImage(self, request, context): """Permanently deletes a reference image. The image metadata will be deleted right away, but search queries against ProductSets containing the image may still work until all related caches are refreshed. The actual image files are not deleted from Google Cloud Storage. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListReferenceImages(self, request, context): """Lists reference images. Possible errors: * Returns NOT_FOUND if the parent product does not exist. * Returns INVALID_ARGUMENT if the page_size is greater than 100, or less than 1. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetReferenceImage(self, request, context): """Gets information associated with a ReferenceImage. Possible errors: * Returns NOT_FOUND if the specified image does not exist. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def AddProductToProductSet(self, request, context): """Adds a Product to the specified ProductSet. If the Product is already present, no change is made. One Product can be added to at most 100 ProductSets. Possible errors: * Returns NOT_FOUND if the Product or the ProductSet doesn't exist. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def RemoveProductFromProductSet(self, request, context): """Removes a Product from the specified ProductSet. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ListProductsInProductSet(self, request, context): """Lists the Products in a ProductSet, in an unspecified order. If the ProductSet does not exist, the products field of the response will be empty. Possible errors: * Returns INVALID_ARGUMENT if page_size is greater than 100 or less than 1. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def ImportProductSets(self, request, context): """Asynchronous API that imports a list of reference images to specified product sets based on a list of image information. The [google.longrunning.Operation][google.longrunning.Operation] API can be used to keep track of the progress and results of the request. `Operation.metadata` contains `BatchOperationMetadata`. (progress) `Operation.response` contains `ImportProductSetsResponse`. (results) The input source of this method is a csv file on Google Cloud Storage. For the format of the csv file please see [ImportProductSetsGcsSource.csv_file_uri][google.cloud.vision.v1p4beta1.ImportProductSetsGcsSource.csv_file_uri]. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def PurgeProducts(self, request, context): """Asynchronous API to delete all Products in a ProductSet or all Products that are in no ProductSet. If a Product is a member of the specified ProductSet in addition to other ProductSets, the Product will still be deleted. It is recommended to not delete the specified ProductSet until after this operation has completed. It is also recommended to not add any of the Products involved in the batch delete to a new ProductSet while this operation is running because those Products may still end up deleted. It's not possible to undo the PurgeProducts operation. Therefore, it is recommended to keep the csv files used in ImportProductSets (if that was how you originally built the Product Set) before starting PurgeProducts, in case you need to re-import the data after deletion. If the plan is to purge all of the Products from a ProductSet and then re-use the empty ProductSet to re-import new Products into the empty ProductSet, you must wait until the PurgeProducts operation has finished for that ProductSet. The [google.longrunning.Operation][google.longrunning.Operation] API can be used to keep track of the progress and results of the request. `Operation.metadata` contains `BatchOperationMetadata`. (progress) """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def add_ProductSearchServicer_to_server(servicer, server): rpc_method_handlers = { "CreateProductSet": grpc.unary_unary_rpc_method_handler( servicer.CreateProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateProductSetRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.SerializeToString, ), "ListProductSets": grpc.unary_unary_rpc_method_handler( servicer.ListProductSets, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductSetsRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductSetsResponse.SerializeToString, ), "GetProductSet": grpc.unary_unary_rpc_method_handler( servicer.GetProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetProductSetRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.SerializeToString, ), "UpdateProductSet": grpc.unary_unary_rpc_method_handler( servicer.UpdateProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.UpdateProductSetRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ProductSet.SerializeToString, ), "DeleteProductSet": grpc.unary_unary_rpc_method_handler( servicer.DeleteProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteProductSetRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "CreateProduct": grpc.unary_unary_rpc_method_handler( servicer.CreateProduct, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateProductRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.SerializeToString, ), "ListProducts": grpc.unary_unary_rpc_method_handler( servicer.ListProducts, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsResponse.SerializeToString, ), "GetProduct": grpc.unary_unary_rpc_method_handler( servicer.GetProduct, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetProductRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.SerializeToString, ), "UpdateProduct": grpc.unary_unary_rpc_method_handler( servicer.UpdateProduct, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.UpdateProductRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.Product.SerializeToString, ), "DeleteProduct": grpc.unary_unary_rpc_method_handler( servicer.DeleteProduct, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteProductRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "CreateReferenceImage": grpc.unary_unary_rpc_method_handler( servicer.CreateReferenceImage, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.CreateReferenceImageRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ReferenceImage.SerializeToString, ), "DeleteReferenceImage": grpc.unary_unary_rpc_method_handler( servicer.DeleteReferenceImage, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.DeleteReferenceImageRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "ListReferenceImages": grpc.unary_unary_rpc_method_handler( servicer.ListReferenceImages, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListReferenceImagesRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListReferenceImagesResponse.SerializeToString, ), "GetReferenceImage": grpc.unary_unary_rpc_method_handler( servicer.GetReferenceImage, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.GetReferenceImageRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ReferenceImage.SerializeToString, ), "AddProductToProductSet": grpc.unary_unary_rpc_method_handler( servicer.AddProductToProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.AddProductToProductSetRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "RemoveProductFromProductSet": grpc.unary_unary_rpc_method_handler( servicer.RemoveProductFromProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.RemoveProductFromProductSetRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), "ListProductsInProductSet": grpc.unary_unary_rpc_method_handler( servicer.ListProductsInProductSet, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsInProductSetRequest.FromString, response_serializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ListProductsInProductSetResponse.SerializeToString, ), "ImportProductSets": grpc.unary_unary_rpc_method_handler( servicer.ImportProductSets, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.ImportProductSetsRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), "PurgeProducts": grpc.unary_unary_rpc_method_handler( servicer.PurgeProducts, request_deserializer=google_dot_cloud_dot_vision__v1p4beta1_dot_proto_dot_product__search__service__pb2.PurgeProductsRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( "google.cloud.vision.v1p4beta1.ProductSearch", rpc_method_handlers ) server.add_generic_rpc_handlers((generic_handler,))
{ "content_hash": "05f3914f4d13553f7bbb519fe197eeff", "timestamp": "", "source": "github", "line_count": 530, "max_line_length": 167, "avg_line_length": 54.85660377358491, "alnum_prop": 0.726078283001995, "repo_name": "tseaver/google-cloud-python", "id": "0d1f9704cb5ae89daef1613f8c0353c65ed82aa3", "size": "29074", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "vision/google/cloud/vision_v1p4beta1/proto/product_search_service_pb2_grpc.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "1094" }, { "name": "Python", "bytes": "30519057" }, { "name": "Shell", "bytes": "9148" } ], "symlink_target": "" }
import six from tempest.api.network import base from tempest.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test class QuotasTest(base.BaseAdminNetworkTest): """Tests the following operations in the Neutron API: list quotas for tenants who have non-default quota values show quotas for a specified tenant update quotas for a specified tenant reset quotas to default values for a specified tenant v2.0 of the API is assumed. It is also assumed that the per-tenant quota extension API is configured in /etc/neutron/neutron.conf as follows: quota_driver = neutron.db.quota_db.DbQuotaDriver """ @classmethod def skip_checks(cls): super(QuotasTest, cls).skip_checks() if not test.is_extension_enabled('quotas', 'network'): msg = "quotas extension not enabled." raise cls.skipException(msg) @classmethod def setup_clients(cls): super(QuotasTest, cls).setup_clients() cls.identity_admin_client = cls.os_adm.identity_client def _check_quotas(self, new_quotas): # Add a tenant to conduct the test project = data_utils.rand_name('test_project_') description = data_utils.rand_name('desc_') project = self.identity_utils.create_project(name=project, description=description) project_id = project['id'] self.addCleanup(self.identity_utils.delete_project, project_id) # Change quotas for tenant quota_set = self.admin_quotas_client.update_quotas( project_id, **new_quotas)['quota'] self.addCleanup(self._cleanup_quotas, project_id) for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Confirm our tenant is listed among tenants with non default quotas non_default_quotas = self.admin_quotas_client.list_quotas() found = False for qs in non_default_quotas['quotas']: if qs['tenant_id'] == project_id: found = True self.assertTrue(found) # Confirm from API quotas were changed as requested for tenant quota_set = self.admin_quotas_client.show_quotas(project_id) quota_set = quota_set['quota'] for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Reset quotas to default and confirm self.admin_quotas_client.reset_quotas(project_id) non_default_quotas = self.admin_quotas_client.list_quotas() for q in non_default_quotas['quotas']: self.assertNotEqual(project_id, q['tenant_id']) @test.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb') def test_quotas(self): new_quotas = {'network': 0, 'security_group': 0} self._check_quotas(new_quotas) def _cleanup_quotas(self, project_id): # try to clean up the resources.If it fails, then # assume that everything was already deleted, so # it is OK to continue. try: self.admin_quotas_client.reset_quotas(project_id) except lib_exc.NotFound: pass
{ "content_hash": "f04f266822f6ecf90a0023efcc8bbcb4", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 77, "avg_line_length": 38.666666666666664, "alnum_prop": 0.6434729064039408, "repo_name": "zsoltdudas/lis-tempest", "id": "d72e960d26936b7b88798b023d7785ced7dc98de", "size": "3884", "binary": false, "copies": "3", "ref": "refs/heads/LIS", "path": "tempest/api/network/admin/test_quotas.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3681961" }, { "name": "Shell", "bytes": "106383" } ], "symlink_target": "" }
import re import config import util from datetime import date def nick_tracker(log_dict, track_users_on_channels = False): """ Tracks all nicks and the identifies nicks which point to same user Args: log_dict(dictionary): with key as dateTime.date object and value as {"data":datalist,"channel_name":channels name} Returns: nicks(list): all nicks nick_same_list(list): list of lists with each list corresponding to nicks of same user """ nicks = [] # list of all the nicknames nick_same_list = [[] for i in range(config.MAX_EXPECTED_DIFF_NICKS)] nick_channel_dict = [] channels_for_user = [] nicks_hash = [] channels_hash = [] #Getting all the nicknames in a list def nick_append(nick, nicks, nicks_today_on_this_channel, track_users_on_channels): if track_users_on_channels and (nick not in nicks_today_on_this_channel): nicks_today_on_this_channel.append(nick) #not nicks as there are same nicks spread across multiple channels nicks.append(nick) elif nick not in nicks: nicks.append(nick) return nicks, nicks_today_on_this_channel for day_content_all_channels in log_dict.values(): #traverse over data of different channels for that day channels_for_user_day = {}#empty for next day usage for day_content in day_content_all_channels: day_log = day_content["log_data"] channel_name = day_content["auxiliary_data"]["channel"] nicks_today_on_this_channel = [] for i in day_log: # use regex to get the string between <> and appended it to the nicks list if(util.check_if_msg_line (i)): m = re.search(r"\<(.*?)\>", i) nick = util.correctLastCharCR(m.group(0)[1:-1]) nicks, nicks_today_on_this_channel = nick_append(nick, nicks, nicks_today_on_this_channel, track_users_on_channels) ''' Forming list of lists for avoiding nickname duplicacy ''' for line in day_log: if(line[0] == '=' and "changed the topic of" not in line): old_nick = util.splice_find(line, "=", " is", 3) new_nick = util.splice_find(line, "wn as", "\n", 5) nicks, nicks_today_on_this_channel = nick_append(old_nick, nicks, nicks_today_on_this_channel, track_users_on_channels) nicks, nicks_today_on_this_channel = nick_append(new_nick, nicks, nicks_today_on_this_channel, track_users_on_channels) #nicks.append(new_nick) for i in range(config.MAX_EXPECTED_DIFF_NICKS): if old_nick in nick_same_list[i] or new_nick in nick_same_list[i]: if old_nick not in nick_same_list[i]: nick_same_list[i].append(old_nick) if new_nick not in nick_same_list[i]: nick_same_list[i].append(new_nick) break if not nick_same_list[i]: if old_nick not in nick_same_list[i]: nick_same_list[i].append(old_nick) if new_nick not in nick_same_list[i]: nick_same_list[i].append(new_nick) break if track_users_on_channels: ''' Creating list of dictionaries nick_channel_dict of the format : [{'nickname':'rohan', 'channels':['[#abc', 0],['#bcd', 0]]},{}] ''' considered_nicks = [] if config.DEBUGGER: print "Analysis on", (str(day_content["auxiliary_data"]["day"]) + "-" + str(day_content["auxiliary_data"]["month"])), channel_name for user in nicks_today_on_this_channel: f = 1 for nick_tuple in nick_same_list: if user in nick_tuple: user_nick = nick_tuple[0] f = 0 break if f: user_nick = user '''for channels of user on a day''' if channels_for_user_day.has_key(user_nick) and channel_name not in channels_for_user_day[user_nick]: channels_for_user_day[user_nick].append(channel_name) else: channels_for_user_day[user_nick] = [channel_name] flag = 1 for dictionary in nick_channel_dict: if dictionary['nickname'] == user_nick and user_nick not in considered_nicks: index = searchChannel(channel_name, dictionary['channels']) if index == -1: dictionary['channels'].append([channel_name,1]) else: dictionary['channels'][index][1]+=1 flag = 0 considered_nicks.append(user_nick) break if flag: nick_channel_dict.append({'nickname':user_nick, 'channels': [[channel_name, 1]]}) considered_nicks.append(user_nick) channels_for_user.append(channels_for_user_day) for nick in nicks: for index in range(config.MAX_EXPECTED_DIFF_NICKS): if nick in nick_same_list[index]: break if not nick_same_list[index]: nick_same_list[index].append(nick) break if config.DEBUGGER: print "========> 30 on " + str(len(nicks)) + " nicks" print nicks[:30] print "========> 30 on " + str(len(nick_same_list)) + " nick_same_list" print nick_same_list[:30] if not track_users_on_channels: return [nicks, nick_same_list] else: for dicts in nick_channel_dict: nick = dicts['nickname'] if nick not in nicks_hash: nicks_hash.append(nick) for channel in dicts['channels']: if channel[0] not in channels_hash: channels_hash.append(channel[0]) return [nicks, nick_same_list, channels_for_user, nick_channel_dict, nicks_hash, channels_hash] def searchChannel(channel, channel_list): ans = -1 i = 0 for c_tuple in channel_list: if c_tuple[0] == channel: ans = i break i += 1 return ans
{ "content_hash": "eaaf6174c202b0deea5aa47313c3c527", "timestamp": "", "source": "github", "line_count": 158, "max_line_length": 155, "avg_line_length": 43.5, "alnum_prop": 0.5063291139240507, "repo_name": "rohangoel96/IRCLogParser", "id": "b4bd11b34571f7e9936a1b6cd43b4d917cfa2f22", "size": "6873", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "IRCLogParser/lib/nickTracker.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "217196" }, { "name": "Shell", "bytes": "2929" } ], "symlink_target": "" }
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wireless', '0002_standardize_id_fields'), ] operations = [ migrations.AlterField( model_name='wirelesslan', name='created', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='wirelesslangroup', name='created', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.AlterField( model_name='wirelesslink', name='created', field=models.DateTimeField(auto_now_add=True, null=True), ), ]
{ "content_hash": "fcaa12916fdcdb7a30e8be9daff7a827", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 69, "avg_line_length": 28, "alnum_prop": 0.5728021978021978, "repo_name": "digitalocean/netbox", "id": "fe251248c02135886f1b63176814ef03aec9cc42", "size": "777", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "netbox/wireless/migrations/0003_created_datetimefield.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "189339" }, { "name": "HTML", "bytes": "570800" }, { "name": "JavaScript", "bytes": "326125" }, { "name": "Python", "bytes": "1815170" }, { "name": "Shell", "bytes": "2786" } ], "symlink_target": "" }
from antlr4.IntervalSet import IntervalSet from antlr4.Token import Token # need forward declarations from antlr4.atn.SemanticContext import Predicate, PrecedencePredicate ATNState = None RuleStartState = None class Transition (object): __slots__ = ('target','isEpsilon','label') # constants for serialization EPSILON = 1 RANGE = 2 RULE = 3 PREDICATE = 4 # e.g., {isType(input.LT(1))}? ATOM = 5 ACTION = 6 SET = 7 # ~(A|B) or ~atom, wildcard, which convert to next 2 NOT_SET = 8 WILDCARD = 9 PRECEDENCE = 10 serializationNames = [ "INVALID", "EPSILON", "RANGE", "RULE", "PREDICATE", "ATOM", "ACTION", "SET", "NOT_SET", "WILDCARD", "PRECEDENCE" ] serializationTypes = dict() def __init__(self, target:ATNState): # The target of this transition. if target is None: raise Exception("target cannot be null.") self.target = target # Are we epsilon, action, sempred? self.isEpsilon = False self.label = None # TODO: make all transitions sets? no, should remove set edges class AtomTransition(Transition): __slots__ = ('label_', 'serializationType') def __init__(self, target:ATNState, label:int): super().__init__(target) self.label_ = label # The token type or character value; or, signifies special label. self.label = self.makeLabel() self.serializationType = self.ATOM def makeLabel(self): s = IntervalSet() s.addOne(self.label_) return s def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return self.label_ == symbol def __str__(self): return str(self.label_) class RuleTransition(Transition): __slots__ = ('ruleIndex', 'precedence', 'followState', 'serializationType') def __init__(self, ruleStart:RuleStartState, ruleIndex:int, precedence:int, followState:ATNState): super().__init__(ruleStart) self.ruleIndex = ruleIndex # ptr to the rule definition object for this rule ref self.precedence = precedence self.followState = followState # what node to begin computations following ref to rule self.serializationType = self.RULE self.isEpsilon = True def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return False class EpsilonTransition(Transition): __slots__ = ('serializationType', 'outermostPrecedenceReturn') def __init__(self, target, outermostPrecedenceReturn=-1): super(EpsilonTransition, self).__init__(target) self.serializationType = self.EPSILON self.isEpsilon = True self.outermostPrecedenceReturn = outermostPrecedenceReturn def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return False def __str__(self): return "epsilon" class RangeTransition(Transition): __slots__ = ('serializationType', 'start', 'stop') def __init__(self, target:ATNState, start:int, stop:int): super().__init__(target) self.serializationType = self.RANGE self.start = start self.stop = stop self.label = self.makeLabel() def makeLabel(self): s = IntervalSet() s.addRange(range(self.start, self.stop + 1)) return s def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return symbol >= self.start and symbol <= self.stop def __str__(self): return "'" + chr(self.start) + "'..'" + chr(self.stop) + "'" class AbstractPredicateTransition(Transition): def __init__(self, target:ATNState): super().__init__(target) class PredicateTransition(AbstractPredicateTransition): __slots__ = ('serializationType', 'ruleIndex', 'predIndex', 'isCtxDependent') def __init__(self, target:ATNState, ruleIndex:int, predIndex:int, isCtxDependent:bool): super().__init__(target) self.serializationType = self.PREDICATE self.ruleIndex = ruleIndex self.predIndex = predIndex self.isCtxDependent = isCtxDependent # e.g., $i ref in pred self.isEpsilon = True def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return False def getPredicate(self): return Predicate(self.ruleIndex, self.predIndex, self.isCtxDependent) def __str__(self): return "pred_" + str(self.ruleIndex) + ":" + str(self.predIndex) class ActionTransition(Transition): __slots__ = ('serializationType', 'ruleIndex', 'actionIndex', 'isCtxDependent') def __init__(self, target:ATNState, ruleIndex:int, actionIndex:int=-1, isCtxDependent:bool=False): super().__init__(target) self.serializationType = self.ACTION self.ruleIndex = ruleIndex self.actionIndex = actionIndex self.isCtxDependent = isCtxDependent # e.g., $i ref in pred self.isEpsilon = True def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return False def __str__(self): return "action_"+self.ruleIndex+":"+self.actionIndex # A transition containing a set of values. class SetTransition(Transition): __slots__ = 'serializationType' def __init__(self, target:ATNState, set:IntervalSet): super().__init__(target) self.serializationType = self.SET if set is not None: self.label = set else: self.label = IntervalSet() self.label.addRange(range(Token.INVALID_TYPE, Token.INVALID_TYPE + 1)) def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return symbol in self.label def __str__(self): return str(self.label) class NotSetTransition(SetTransition): def __init__(self, target:ATNState, set:IntervalSet): super().__init__(target, set) self.serializationType = self.NOT_SET def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return symbol >= minVocabSymbol \ and symbol <= maxVocabSymbol \ and not super(type(self), self).matches(symbol, minVocabSymbol, maxVocabSymbol) def __str__(self): return '~' + super(type(self), self).__str__() class WildcardTransition(Transition): __slots__ = 'serializationType' def __init__(self, target:ATNState): super().__init__(target) self.serializationType = self.WILDCARD def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return symbol >= minVocabSymbol and symbol <= maxVocabSymbol def __str__(self): return "." class PrecedencePredicateTransition(AbstractPredicateTransition): __slots__ = ('serializationType', 'precedence') def __init__(self, target:ATNState, precedence:int): super().__init__(target) self.serializationType = self.PRECEDENCE self.precedence = precedence self.isEpsilon = True def matches( self, symbol:int, minVocabSymbol:int, maxVocabSymbol:int): return False def getPredicate(self): return PrecedencePredicate(self.precedence) def __str__(self): return self.precedence + " >= _p" Transition.serializationTypes = { EpsilonTransition: Transition.EPSILON, RangeTransition: Transition.RANGE, RuleTransition: Transition.RULE, PredicateTransition: Transition.PREDICATE, AtomTransition: Transition.ATOM, ActionTransition: Transition.ACTION, SetTransition: Transition.SET, NotSetTransition: Transition.NOT_SET, WildcardTransition: Transition.WILDCARD, PrecedencePredicateTransition: Transition.PRECEDENCE } del ATNState del RuleStartState from antlr4.atn.ATNState import *
{ "content_hash": "6087166d77d612a3aef940dacf60163d", "timestamp": "", "source": "github", "line_count": 250, "max_line_length": 102, "avg_line_length": 31.944, "alnum_prop": 0.6342349110944152, "repo_name": "ericvergnaud/antlr4", "id": "2e4c9971763c34dbb2690660434c5c99d44193e1", "size": "8762", "binary": false, "copies": "7", "ref": "refs/heads/dev", "path": "runtime/Python3/src/antlr4/atn/Transition.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "ANTLR", "bytes": "113458" }, { "name": "Assembly", "bytes": "92577" }, { "name": "Batchfile", "bytes": "6675" }, { "name": "C", "bytes": "17187" }, { "name": "C#", "bytes": "1042222" }, { "name": "C++", "bytes": "1026862" }, { "name": "CMake", "bytes": "35026" }, { "name": "Dart", "bytes": "586998" }, { "name": "Dockerfile", "bytes": "859" }, { "name": "GAP", "bytes": "109926" }, { "name": "Go", "bytes": "891140" }, { "name": "Java", "bytes": "2406964" }, { "name": "JavaScript", "bytes": "445032" }, { "name": "Objective-C", "bytes": "408" }, { "name": "Objective-C++", "bytes": "28237" }, { "name": "Python", "bytes": "1433367" }, { "name": "Shell", "bytes": "6270" }, { "name": "Smalltalk", "bytes": "3325" }, { "name": "Swift", "bytes": "873265" } ], "symlink_target": "" }
from django.contrib import admin from django import forms from django.db import models from dailyrings.models import Image class DifferentlySizedTextarea(forms.Textarea): def __init__(self, *args, **kwargs): attrs = kwargs.setdefault('attrs', {}) attrs.setdefault('cols', 40) attrs.setdefault('rows', 3) super(DifferentlySizedTextarea, self).__init__(*args, **kwargs) class ImageAdmin(admin.ModelAdmin): list_display = ('name','planet','thumbnail','title','tweet','pub_date','pub_order') list_editable = ('tweet','pub_order') search_fields = ('name', 'title') list_filter = ('pub_date',) ordering = ('pub_order',) formfield_overrides = { models.CharField: {'widget': DifferentlySizedTextarea}} def thumbnail(self, instance): if settings.DEBUG == True: return '<img src="/static_media/%s" width="100"/>' % instance.jpg return '<img src="' + settings.ADMIN_MEDIA_PREFIX + '%s" width="200"/>' % instance.jpg thumbnail.allow_tags=True admin.site.register(Image, ImageAdmin)
{ "content_hash": "83928460974637c7660c6b4924245775", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 95, "avg_line_length": 35.516129032258064, "alnum_prop": 0.6394187102633969, "repo_name": "basilleaf/dailyrings", "id": "422d5c9fafe92bd0ae49d796ce3693e94416ea92", "size": "1101", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "dailyrings_project/dailyrings/admin.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "11292" }, { "name": "HTML", "bytes": "28821" }, { "name": "JavaScript", "bytes": "2745" }, { "name": "PHP", "bytes": "3056" }, { "name": "Python", "bytes": "51435" }, { "name": "Shell", "bytes": "178" } ], "symlink_target": "" }
from azure.identity import DefaultAzureCredential from azure.mgmt.synapse import SynapseManagementClient """ # PREREQUISITES pip install azure-identity pip install azure-mgmt-synapse # USAGE python kusto_pools_list_by_workspace.py Before run the sample, please set the values of the client ID, tenant ID and client secret of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET. For more info about how to get the value, please see: https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal """ def main(): client = SynapseManagementClient( credential=DefaultAzureCredential(), subscription_id="12345678-1234-1234-1234-123456789098", ) response = client.kusto_pools.list_by_workspace( resource_group_name="kustorptest", workspace_name="kustorptest", ) print(response) # x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/preview/2021-06-01-preview/examples/KustoPoolsListByWorkspace.json if __name__ == "__main__": main()
{ "content_hash": "226da960381300b1ee3b6d843f5d2c5c", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 145, "avg_line_length": 34.06060606060606, "alnum_prop": 0.7375444839857651, "repo_name": "Azure/azure-sdk-for-python", "id": "739de25d2543022cfa15c35fff7e498c54910e79", "size": "1592", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/synapse/azure-mgmt-synapse/generated_samples/kusto_pools_list_by_workspace.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
"""TPUEstimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import os import signal import threading import time import traceback import numpy as np import six from six.moves import queue as Queue # pylint: disable=redefined-builtin from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.contrib.tpu.python.ops import tpu_ops from tensorflow.contrib.tpu.python.tpu import session_support from tensorflow.contrib.tpu.python.tpu import tpu from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_context from tensorflow.contrib.tpu.python.tpu import tpu_feed from tensorflow.contrib.tpu.python.tpu import training_loop from tensorflow.contrib.tpu.python.tpu import util as util_lib from tensorflow.contrib.training.python.training import hparam from tensorflow.core.framework import variable_pb2 from tensorflow.core.framework.summary_pb2 import Summary from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.estimator import estimator as estimator_lib from tensorflow.python.estimator import model_fn as model_fn_lib from tensorflow.python.estimator.export import export_output as export_output_lib from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import summary_ops_v2 as contrib_summary from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.saved_model import tag_constants from tensorflow.python.summary import summary from tensorflow.python.training import basic_session_run_hooks from tensorflow.python.training import evaluation from tensorflow.python.training import session_run_hook from tensorflow.python.training import training from tensorflow.python.training import training_util from tensorflow.python.util import function_utils from tensorflow.python.util import nest from tensorflow.python.util import tf_inspect _INITIAL_LOSS = 1e7 _ZERO_LOSS = 0. _TPU_ESTIMATOR = 'tpu_estimator' _ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop' _BATCH_SIZE_KEY = 'batch_size' _CTX_KEY = 'context' _CROSS_REPLICA_SUM_OP = 'CrossReplicaSum' _ONE_GIGABYTE = 1024 * 1024 * 1024 _TPU_ENQUEUE_OPS = '_tpu_enqueue_ops' _TPU_TRAIN_OP = '_tpu_train_op' _REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference' _RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY] # TODO(b/65703635): Flip the value and remove all dead code. Currently, this is # only used for per-core based deployments. For per-host based pipelines, if a # user returns a Dataset instance it will be automatically wrapped in a # tf.while_loop (This can be disabled by returning features and labels # explicitly). _WRAP_INPUT_FN_INTO_WHILE_LOOP = False ops.register_proto_function( '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR), proto_type=variable_pb2.VariableDef, to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access def _create_global_step(graph): graph = graph or ops.get_default_graph() if training.get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, use_resource=True, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) def _create_or_get_iterations_per_loop(): graph = ops.get_default_graph() collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR) iter_vars = graph.get_collection(collection_name) if len(iter_vars) == 1: return iter_vars[0] elif len(iter_vars) > 1: raise RuntimeError('Multiple iterations_per_loop_var in collection.') with ops.colocate_with(training_util.get_global_step()): with variable_scope.variable_scope( _TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE): return variable_scope.get_variable( _ITERATIONS_PER_LOOP_VAR, initializer=init_ops.zeros_initializer(), shape=[], dtype=dtypes.int32, trainable=False, collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES], use_resource=True) def _sync_variables_ops(): # Gets the variables back from TPU nodes. This means the variables updated # by TPU will now be *synced* to host memory. return [ array_ops.check_numerics(v.read_value(), 'Gradient for %s is NaN' % v.name).op for v in variables.trainable_variables() ] def _increase_eval_step_op(iterations_per_loop): """Returns an op to increase the eval step for TPU evaluation. Args: iterations_per_loop: Tensor. The number of eval steps running in TPU system before returning to CPU host for each `Session.run`. Returns: An operation """ eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access # Estimator evaluate increases 1 by default. So, we increase the difference. return state_ops.assign_add( eval_step, math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype), use_locking=True) class _SIGNAL(object): """Signal used to control the thread of infeed/outfeed. All preserved signals must be negative numbers. Positive numbers are used to indicate the number of iterations for next training/evaluation loop. """ NEXT_BATCH = -1 STOP = -2 class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access """Ops and objects returned from a `model_fn` and passed to `TPUEstimator`. See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and 'export_outputs`. For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where `metric_fn` runs on CPU to generate metrics and `tensors` represents the `Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`. To be precise, TPU evaluation expects a slightly different signature from the @{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`. The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The `tensors` usually specify the model logits, which are transferred back from TPU system to CPU host. All tensors must have be batch-major, i.e., the batch size is the first dimension. Once all tensors are available at CPU host from all shards, they are concatenated (on CPU) and passed as positional arguments to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is dict. `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the `eval_metrics`. `scaffold_fn` is a function running on CPU to generate the `Scaffold`. This function should not capture any Tensors in `model_fn`. `host_call` is a tuple of a `function` and a list or dictionary of `tensors` to pass to that function and returns a list of Tensors. `host_call` currently works for train() and evaluate(). The Tensors returned by the function is executed on the CPU on every step, so there is communication overhead when sending tensors from TPU to CPU. To reduce the overhead, try reducing the size of the tensors. The `tensors` are concatenated along their major (batch) dimension, and so must be >= rank 1. The `host_call` is useful for writing summaries with @{tf.contrib.summary.create_file_writer}. """ def __new__(cls, mode, predictions=None, loss=None, train_op=None, eval_metrics=None, export_outputs=None, scaffold_fn=None, host_call=None): """Creates a validated `TPUEstimatorSpec` instance.""" host_calls = {} if eval_metrics is not None: host_calls['eval_metrics'] = eval_metrics if host_call is not None: host_calls['host_call'] = host_call _OutfeedHostCall.validate(host_calls) return super(TPUEstimatorSpec, cls).__new__( cls, mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metrics=eval_metrics, export_outputs=export_outputs, scaffold_fn=scaffold_fn, host_call=host_call) def as_estimator_spec(self): """Creates an equivalent `EstimatorSpec` used by CPU train/eval.""" host_calls = {} if self.eval_metrics is not None: host_calls['eval_metrics'] = self.eval_metrics if self.host_call is not None: host_calls['host_call'] = self.host_call host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls) eval_metric_ops = None if self.eval_metrics is not None: eval_metric_ops = host_call_ret['eval_metrics'] hooks = None if self.host_call is not None: hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])] scaffold = self.scaffold_fn() if self.scaffold_fn else None return model_fn_lib.EstimatorSpec( mode=self.mode, predictions=self.predictions, loss=self.loss, train_op=self.train_op, eval_metric_ops=eval_metric_ops, export_outputs=self.export_outputs, scaffold=scaffold, training_hooks=hooks, evaluation_hooks=hooks, prediction_hooks=hooks) class _OpQueueContext(object): """Manages work queue and thread for a infeed/outfeed thread.""" def __init__(self, name, target, args): self._name = name self._queue = Queue.Queue() args = (self,) + args self._thread = threading.Thread(name=name, target=target, args=args) self._thread.daemon = True self._thread.start() def stop(self): self._queue.put(_SIGNAL.STOP) def send_next_batch_signal(self, iterations): self._queue.put(iterations) def read_iteration_counts(self): while True: iterations = self._queue.get(block=True) logging.debug('%s read iterations %s', self._name, iterations) if iterations == _SIGNAL.STOP: logging.info('%s received shutdown signal, stopping.', self._name) return yield iterations def join(self): logging.info('Shutting down %s thread.' % self._name) self.stop() self._thread.join() class _OpSignalOnceQueueContext(_OpQueueContext): """Manages work queue and thread for a infeed/outfeed thread. This subclass only signals once. """ def __init__(self, name, target, args): super(_OpSignalOnceQueueContext, self).__init__(name, target, args) self._has_signaled = False def send_next_batch_signal(self, iterations): if not self._has_signaled: self._queue.put(iterations) self._has_signaled = True class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook): """A Session hook setting up the TPU initialization, infeed, and outfeed. This hook does two major things: 1. initialize and shutdown TPU system. 2. launch and join the threads for infeed enqueue and (optional) outfeed dequeue. """ def __init__(self, ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=True): self._master_job = ctx.master_job self._enqueue_ops = enqueue_ops self._dequeue_ops = dequeue_ops self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator self._initial_infeed_sleep_secs = ( ctx.config.tpu_config.initial_infeed_sleep_secs) self._session_cancel_timer = None self._feed_error = None self._finished = False def begin(self): logging.info('TPU job name %s', self._master_job) self._iterations_per_loop_var = _create_or_get_iterations_per_loop() self._init_ops = [tpu.initialize_system(job=self._master_job)] self._finalize_ops = [tpu.shutdown_system(job=self._master_job)] summary_writer_init_ops = contrib_summary.summary_writer_initializer_op() self._init_ops.extend(summary_writer_init_ops) # Get all the writer resources from the initializer, so we know what to # flush. for op in summary_writer_init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def _log_error(self, session, error): """Log an infeed or outfeed error. This logs a short error message immediately, and schedules a timer to emit the full stack trace and error message after a short period of time. If the main session has terminated by the time the timer triggers, we assume the real source of the error was from the main session and avoid emitting a stack trace for the infeed. Args: session: `tf.Session`, session to be terminated error: exception that triggered logging. error: the Exception to log. """ logging.warning( '\n\n' 'Error occurred during infeed/outfeed. This may be due to a compile ' 'error in the main session. Waiting for a short time for the main ' 'session to come back.\n\n%s', error) self._feed_error = traceback.format_exc() # If we've already encountered a feed error, don't schedule another # cancellation op. if self._session_cancel_timer: return def _cancel_session(): # Close the session to avoid the main thread from hanging. If input # pipeline triggers any error, the infeed thread dies but the main thread # for TPU computation waits for the infeed enqueue forever. Close the # Session to cancel the main thread Session.run execution. # # We sleep for a few seconds before closing to give some time # for the TPU compilation error, if any, propagating, from TPU to CPU # host. Compilation errors should be reported by the main thread so that # the program can be interrupted and users can take action. Due to a race # condition, the infeed thread might see an error first. Closing the # session here immediately would result in a session cancellation # exception in the main thread, instead of the expected compile error. # User code that depends on having the proper exception type will # therefore be confused. time.sleep(5) # If the main session is still running, the infeed/outfeed errors are # legitimate, and should be logged. if not self._finished and self._feed_error: logging.error('Feed error: %s', self._feed_error) logging.error('Closing session. A RuntimeError should follow.') session.close() self._session_cancel_timer = threading.Thread(target=_cancel_session) self._session_cancel_timer.daemon = True self._session_cancel_timer.start() def _run_infeed(self, queue_ctx, session): logging.info('Starting infeed thread controller.') if self._initial_infeed_sleep_secs: logging.info('%s thread sleeping for %d seconds.', self._name, self._initial_infeed_sleep_secs) time.sleep(self._initial_infeed_sleep_secs) logging.info('%s thread starting after sleep', self._name) try: if self._run_infeed_loop_on_coordinator: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Infeed enqueue for iteration (%d, %d)', count, i) session.run(self._enqueue_ops) else: for _ in queue_ctx.read_iteration_counts(): session.run(self._enqueue_ops) logging.info('Infeed thread finished, shutting down.') except Exception as e: # pylint: disable=broad-except self._log_error(session, e) def _run_outfeed(self, queue_ctx, session): logging.info('Starting outfeed thread controller.') try: for count, steps in enumerate(queue_ctx.read_iteration_counts()): for i in xrange(steps): logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i) session.run(self._dequeue_ops) logging.info('Outfeed thread finished, shutting down.') except Exception as e: # pylint: disable=broad-except self._log_error(session, e) def _create_infeed_controller(self, name, target, args): return _OpQueueContext(name=name, target=target, args=args) def after_create_session(self, session, coord): logging.info('Init TPU system') session.run(self._init_ops, options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000)) self._infeed_controller = self._create_infeed_controller( name='InfeedController', target=self._run_infeed, args=(session,)) self._outfeed_controller = _OpQueueContext( name='OutfeedController', target=self._run_outfeed, args=(session,)) def before_run(self, run_context): self._feed_error = None # Wait for the cancellation timer to complete before continuing. if self._session_cancel_timer: self._session_cancel_timer.join() self._session_cancel_timer = None iterations = run_context.session.run(self._iterations_per_loop_var) logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations) self._infeed_controller.send_next_batch_signal(iterations) logging.info('Dequeue next (%d) batch(es) of data from outfeed.', iterations) self._outfeed_controller.send_next_batch_signal(iterations) def end(self, session): if self._session_cancel_timer: logging.warning('Feed error occurred; waiting for message.') self._session_cancel_timer.join() self._finished = True logging.info('Stop infeed thread controller') self._infeed_controller.join() logging.info('Stop output thread controller') self._outfeed_controller.join() logging.info('Shutdown TPU system.') session.run(self._finalize_ops) class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook): def __init__(self, ctx, enqueue_ops, dequeue_ops): super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__( ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False) def _create_infeed_controller(self, name, target, args): return _OpSignalOnceQueueContext(name=name, target=target, args=args) class _TPUStopAtStepHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step. This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with following differences for TPU training: 1. This hook sets the variable for iterations_per_loop, which is used by `TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed. As the hook execution order is not guaranteed, the variable update is handled in `after_create_session` and `after_run` as `TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`. 2. For each training loop (session.run), the global step could be increased multiple times on TPU. The global step tensor value will be explicitly read again in `after_run` to ensure the latest value is retrieved to avoid race condition. """ def __init__(self, iterations, num_steps=None, last_step=None): """Initializes a `StopAtStepHook`. Args: iterations: The number of iterations to run optimizer per training loop. num_steps: Number of steps to execute. last_step: Step after which to stop. Raises: ValueError: If one of the arguments is invalid. """ if num_steps is None and last_step is None: raise ValueError('One of num_steps or last_step must be specified.') if num_steps is not None and last_step is not None: raise ValueError('Only one of num_steps or last_step can be specified.') self._num_steps = num_steps self._last_step = last_step self._iterations = iterations def _next_iterations(self, global_step, last_step): gap = last_step - global_step return min(gap, self._iterations) def begin(self): self._global_step_tensor = training_util.get_global_step() if self._global_step_tensor is None: raise RuntimeError('Global step should be created.') self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): global_step = session.run(self._global_step_tensor) if self._last_step is None: self._last_step = global_step + self._num_steps iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load(iterations, session=session) def after_run(self, run_context, run_values): # Global step cannot be retrieved via SessionRunArgs and before_run due to # race condition. global_step = run_context.session.run(self._global_step_tensor) if global_step >= self._last_step: run_context.request_stop() else: iterations = self._next_iterations(global_step, self._last_step) self._iterations_per_loop_var.load( iterations, session=run_context.session) class _SetEvalIterationsHook(session_run_hook.SessionRunHook): """Hook that requests stop at a specified step.""" def __init__(self, num_steps): """Initializes a `_SetEvalIterationsHook`. Args: num_steps: Number of steps to execute. """ self._num_steps = num_steps def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): self._iterations_per_loop_var.load(self._num_steps, session=session) class _StoppingPredictHook(session_run_hook.SessionRunHook): """Hook that requests stop according to the stopping signal in prediction.""" def __init__(self, scalar_stopping_signal): self._scalar_stopping_signal = scalar_stopping_signal def begin(self): self._iterations_per_loop_var = _create_or_get_iterations_per_loop() def after_create_session(self, session, coord): # This is not necessary as we do not run infeed enqueue and outfeed dequeue # in side threads for prediction model. But it makes the # TPUInfeedOutfeedSessionHook prints nice message. self._iterations_per_loop_var.load(1, session=session) def before_run(self, run_context): return session_run_hook.SessionRunArgs(self._scalar_stopping_signal) def after_run(self, run_context, run_values): _ = run_context scalar_stopping_signal = run_values.results if _StopSignals.should_stop(scalar_stopping_signal): # NOTE(xiejw): In prediction, stopping signals are inserted for each # batch. And we append one more batch to signal the system it should stop. # The data flow might look like # # batch 0: images, labels, stop = 0 (user provided) # batch 1: images, labels, stop = 0 (user provided) # ... # batch 99: images, labels, stop = 0 (user provided) # batch 100: images, labels, stop = 1 (TPUEstimator appended) # # where the final batch (id = 100) is appended by TPUEstimator, so we # should drop it before returning the predictions to user. # To achieve that, we throw the OutOfRangeError in after_run. Once # Monitored Session sees this error in SessionRunHook.after_run, the # "current" prediction, i.e., batch with id=100, will be discarded # immediately raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.') def generate_per_core_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, host_device, host_id): """Generates infeed enqueue ops for per-core input_fn on a single host.""" captured_infeed_queue = _CapturedObject() def enqueue_ops_fn(): """A fn returns enqueue_ops.""" num_cores_per_host = ctx.num_of_cores_per_host per_host_sharded_inputs = [] for core_ordinal in range(num_cores_per_host): with ops.name_scope('ordinal_%d' % (core_ordinal)): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=host_device, invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal ) inputs = _Inputs.from_input_fn(input_fn(user_context)) if inputs.is_dataset: raise TypeError( '`input_fn` returning `Dataset` is not yet supported in ' 'per-Core input pipeline deployment yet. Please set ' 'TPUConfig.per_host_input_for_training to True or return ' '`features` and `labels` from `input_fn`') features, labels = inputs.features_and_labels() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue def generate_per_host_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() hooks = [] with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if ctx.mode == model_fn_lib.ModeKeys.PREDICT: if not is_dataset: raise TypeError( 'For mode PREDICT, `input_fn` must return `Dataset` instead of ' '`features` and `labels`.') if batch_axis is not None: raise TypeError('For mode PREDICT, batch_axis is not supported yet.') inputs = _InputsWithStoppingSignals( dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn, add_padding=True) if is_dataset: hooks.append(inputs.dataset_initializer_hook()) # TODO(ylc): Refactoring the code to merge the tpu ordinal logic here and the # _InternalTPUContext.tpu_ordinal_function. We should either introduce another # abstraction or a different helper method. def _tpu_ordinal_function_impl(shard_index_in_host): # We put both enqueue/dequeue op at tpu.core(0) in each replica. replica = ctx.device_assignment.lookup_replicas( host_id, (0, 0, 0))[shard_index_in_host] return ctx.device_assignment.tpu_ordinal(replica=replica) if ctx.model_parallelism_enabled: tpu_ordinal_function = _tpu_ordinal_function_impl else: tpu_ordinal_function = None def enqueue_ops_fn(): with ops.device(device): num_of_replicas_per_host = ctx.num_of_replicas_per_host # Convert user input to features and labels. If the user returns a # dataset, it is initialized and the features and labels extracted via # `dataset.iterator.get_next()` features, labels = inputs.features_and_labels() signals = inputs.signals() inputs_structure_recorder.validate_and_record_structure( features, labels, signals) unsharded_tensor_list = ( inputs_structure_recorder.flatten_features_and_labels( features, labels, signals)) infeed_queue = tpu_feed.InfeedQueue( tuple_types=[t.dtype for t in unsharded_tensor_list], tuple_shapes=[t.shape for t in unsharded_tensor_list], shard_dimensions=batch_axis) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_number_of_shards(num_of_replicas_per_host) per_host_enqueue_ops = ( infeed_queue.split_inputs_and_generate_enqueue_ops( unsharded_tensor_list, placement_function=lambda x: device, tpu_ordinal_function=tpu_ordinal_function)) if signals is None: return per_host_enqueue_ops else: return { 'ops': per_host_enqueue_ops, 'signals': signals, } return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset def generate_per_host_v2_enqueue_ops_fn_for_host( ctx, input_fn, inputs_structure_recorder, device, host_id): """Generates infeed enqueue ops for per-host input_fn on a single host.""" captured_infeed_queue = _CapturedObject() hooks = [] with ops.device(device): user_context = tpu_context.TPUContext( internal_ctx=ctx, input_device=device, invocation_index=host_id) inputs = _Inputs.from_input_fn(input_fn(user_context)) is_dataset = inputs.is_dataset if not is_dataset: raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 ' 'input pipeline configuration.') if ctx.mode == model_fn_lib.ModeKeys.PREDICT: # TODO(b/XXX): Add predict support for PER_HOST_V2 raise TypeError('Most PREDICT not yet supported in PER_HOST_V2 mode.') hooks.append(inputs.dataset_initializer_hook()) def enqueue_ops_fn(): """Generates the per_host enqueue ops.""" control_deps = [] per_host_sharded_inputs = [] num_replicas_per_host = ctx.num_of_replicas_per_host with ops.device(device): if not inputs.is_dataset: raise TypeError('`input_fn` must return a `Dataset` for this mode.') for _ in range(num_replicas_per_host): # Use control dependencies to ensure a deterministic ordering. with ops.control_dependencies(control_deps): features, labels = inputs.features_and_labels() # Calls get_next() inputs_structure_recorder.validate_and_record_structure( features, labels) flattened_inputs = ( inputs_structure_recorder.flatten_features_and_labels( features, labels)) control_deps.extend(flattened_inputs) per_host_sharded_inputs.append(flattened_inputs) infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(per_host_sharded_inputs[0])) captured_infeed_queue.capture(infeed_queue) infeed_queue.set_configuration_from_sharded_input_tensors( per_host_sharded_inputs) per_host_enqueue_ops = infeed_queue.generate_enqueue_ops( per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function) return per_host_enqueue_ops return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset class _InputPipeline(object): """`_InputPipeline` handles invoking `input_fn` and piping to infeed queue. `_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from call site. To be precise, based on the configuration in `_InternalTPUContext`, it invokes `input_fn` for all cores (usually multi-host TPU training) or for one host (usually for single-host TPU evaluation), and sends all `features` and `labels` returned by `input_fn` to TPU infeed. For per-core invocation, `features` and `labels` are piped to infeed directly, one tuple for each core. For per-host invocation, `features` and `labels` are split at host (with respect to `batch_axis`) and piped to all cores accordingly. In addition, flatten/unflatten are handled by `_InputPipeline` also. Model inputs returned by the `input_fn` can have one of the following forms: 1. features 2. (features, labels) Internally, form 1 is reformed to `(features, None)` as features and labels are passed separately to underlying methods. For TPU training, TPUEstimator may expect multiple `features` and `labels` tuples one for each core. TPUEstimator allows various different structures for inputs (namely `features` and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`, and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`. TPU infeed/outfeed library expects flattened tensor list. So, `features` and `labels` need to be flattened, before infeed enqueue, and the structure of them needs to be recorded, in order to restore them after infeed dequeue. """ class InputsStructureRecorder(object): """The recorder to record inputs structure.""" def __init__(self): # Holds the structure of inputs self._feature_names = [] self._label_names = [] self._has_labels = False self._signals_helper = None # Internal state. self._initialized = False def has_labels(self): return self._has_labels def validate_and_record_structure(self, features, labels, signals=None): """Validates and records the structure of features` and `labels`.""" def _extract_key_names(tensor_or_dict): if tensor_or_dict is None: return [] return sorted(tensor_or_dict.keys()) if isinstance( tensor_or_dict, dict) else [] # Extract structure. has_labels = labels is not None feature_names = _extract_key_names(features) label_names = _extract_key_names(labels) if signals is not None and self._signals_helper is None: # Record signals helper. self._signals_helper = _SignalsHelper(signals) if self._initialized: # Verify the structure is same. The following should never happen. assert feature_names == self._feature_names, 'feature keys mismatched' assert label_names == self._label_names, 'label keys mismatched' assert has_labels == self._has_labels, 'label presence mismatched' else: # Record structure. self._initialized = True self._feature_names = feature_names self._label_names = label_names self._has_labels = has_labels def flatten_features_and_labels(self, features, labels, signals=None): """Flattens the `features` and `labels` to a single tensor list.""" flattened_inputs = [] if self._feature_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend( [features[name] for name in self._feature_names]) else: flattened_inputs.append(features) if labels is not None: if self._label_names: # We need a fixed ordering for enqueueing and dequeueing. flattened_inputs.extend([labels[name] for name in self._label_names]) else: flattened_inputs.append(labels) if signals is not None: flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals)) return flattened_inputs def unflatten_features_and_labels(self, flattened_inputs): """Restores the flattened inputs to original features and labels form. Args: flattened_inputs: Flattened inputs for each shard. Returns: A tuple of (`features`, `labels`), where `labels` could be None. Each one, if present, should have identical structure (single tensor vs dict) as the one returned by input_fn. Raises: ValueError: If the number of expected tensors from `flattened_inputs` mismatches the recorded structure. """ expected_num_features = ( len(self._feature_names) if self._feature_names else 1) if self._has_labels: expected_num_labels = ( len(self._label_names) if self._label_names else 1) else: expected_num_labels = 0 expected_num_signals = ( self._signals_helper.num_signals if self._signals_helper else 0) expected_num_tensors = ( expected_num_features + expected_num_labels + expected_num_signals) if expected_num_tensors != len(flattened_inputs): raise ValueError( 'The number of flattened tensors mismatches expected num. ' 'Expected {}, got {}'.format(expected_num_tensors, len(flattened_inputs))) if self._feature_names: unflattened_features = dict( zip(self._feature_names, flattened_inputs[:expected_num_features])) else: # Single tensor case unflattened_features = flattened_inputs[0] if expected_num_labels == 0: unflattened_label = None elif self._label_names: label_list = flattened_inputs[ expected_num_features:expected_num_features + expected_num_labels] unflattened_label = dict(zip(self._label_names, label_list)) else: # Single tensor case. unflattened_label = flattened_inputs[expected_num_features] signals = None if expected_num_signals != 0: tensor_list_for_signals = flattened_inputs[ expected_num_features + expected_num_labels:] signals = self._signals_helper.unflatten(tensor_list_for_signals) return _Inputs(unflattened_features, unflattened_label, signals=signals) def __init__(self, input_fn, batch_axis, ctx): """Constructor. Args: input_fn: input fn for train or eval. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. ctx: A `_InternalTPUContext` instance with mode. Raises: ValueError: If both `sharded_features` and `num_cores` are `None`. """ self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder() self._sharded_per_core = ctx.is_input_sharded_per_core() self._input_fn = input_fn self._infeed_queue = None self._ctx = ctx self._batch_axis = batch_axis def generate_infeed_enqueue_ops_and_dequeue_fn(self): """Generates infeed enqueue ops and dequeue_fn.""" # While tf.while_loop is called, the body function, which invokes # `enqueue_fn` passed in, is called to construct the graph. So, input_fn # structure is recorded. enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = ( self._invoke_input_fn_and_record_structure()) self._validate_input_pipeline() def dequeue_fn(): """dequeue_fn is used by TPU to retrieve the tensors.""" # In the model-parallel case, both the host-side and device-side # computations must agree on the core on which infeed takes place. We # choose to perform infeed on logical core 0 of each replica. values = self._infeed_queue.generate_dequeue_op(tpu_device=0) # The unflatten process uses the structure information recorded above. return self._inputs_structure_recorder.unflatten_features_and_labels( values) return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator) def _invoke_input_fn_and_record_structure(self): """Deploys the input pipeline and record input structure.""" enqueue_ops = [] infeed_queues = [] all_hooks = [] num_hosts = self._ctx.num_hosts tpu_host_placement_fn = self._ctx.tpu_host_placement_function run_infeed_loop_on_coordinator = True if self._sharded_per_core: # Per-Core input pipeline deployment. # Invoke input pipeline for each core and placed on the corresponding # host. for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): enqueue_ops_fn, captured_infeed_queue = ( generate_per_core_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) if _WRAP_INPUT_FN_INTO_WHILE_LOOP: run_infeed_loop_on_coordinator = False enqueue_ops.append( _wrap_computation_in_while_loop( device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) # Infeed_queue_getter must be called after enqueue_ops_fn is called. infeed_queues.append(captured_infeed_queue.get()) else: for host_id in range(num_hosts): host_device = tpu_host_placement_fn(host_id=host_id) with ops.device(host_device): with ops.name_scope('input_pipeline_task%d' % (host_id)): if self._ctx.is_input_per_host_with_iterators(): enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_v2_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, host_device, host_id)) else: enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = ( generate_per_host_enqueue_ops_fn_for_host( self._ctx, self._input_fn, self._inputs_structure_recorder, self._batch_axis, host_device, host_id)) all_hooks.extend(hooks) # NOTE(xiejw): We dispatch here based on the return type of the # users `input_fn`. # # 1. If input_fn returns a Dataset instance, we initialize the # iterator outside of tf.while_loop, and call the iterator.get_next # inside tf.while_loop. This should be always safe. # # 2. If input_fn returns (features, labels), it is too late to wrap # them inside tf.while_loop, as resource initialization cannot be # handled in TF control flow properly. In this case, we will use # python loop to enqueue the data into TPU system. This may be # slow compared to the previous case. if is_dataset: run_infeed_loop_on_coordinator = False wrap_fn = ( _wrap_computation_in_while_loop if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else _wrap_computation_in_while_loop_with_stopping_signals) enqueue_ops.append( wrap_fn(device=host_device, op_fn=enqueue_ops_fn)) else: enqueue_ops.append(enqueue_ops_fn()) infeed_queues.append(captured_infeed_queue.get()) # infeed_queue is used to generate dequeue ops. The only thing it uses for # dequeue is dtypes and types. So, any one can be used. Here, grab the # first one. self._infeed_queue = infeed_queues[0] return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator def _validate_input_pipeline(self): # Perform some sanity checks to log user friendly information. We should # error out to give users better error message. But, if # _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break # user code, so, log a warning. if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS): err_msg = ('Input pipeline contains one or more QueueRunners. ' 'It could be slow and not scalable. Please consider ' 'converting your input pipeline to use `tf.data` instead (see ' 'https://www.tensorflow.org/programmers_guide/datasets for ' 'instructions.') if _WRAP_INPUT_FN_INTO_WHILE_LOOP: raise RuntimeError(err_msg) else: logging.warn(err_msg) class _ModelFnWrapper(object): """A `model_fn` wrapper. This makes calling model_fn on CPU and TPU easier and more consistent and performs necessary check and mutation required by TPU training and evaluation. In addition, this wrapper manages converting the `model_fn` to a single TPU train and eval step. """ def __init__(self, model_fn, config, params, ctx): self._model_fn = model_fn self._config = config self._params = params self._ctx = ctx def call_without_tpu(self, features, labels, is_export_mode): return self._call_model_fn(features, labels, is_export_mode=is_export_mode) def convert_to_single_tpu_train_step(self, dequeue_fn): """Converts user provided model_fn` as a single train step on TPU. The user provided `model_fn` takes input tuple (features, labels) and produces the EstimatorSpec with train_op and loss for train `mode`. This usually represents a single train computation on CPU. For TPU training, a train (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input should be taken from TPU infeed rather than input pipeline (input_fn) directly. To fit TPU loop and replicate pattern, the original train computation should be reformed, which is the returned `train_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn representing the train step for TPU. """ host_call = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def train_step(loss): """Training step function for use inside a while loop.""" del loss # unused; required in function signature. inputs = dequeue_fn() features, labels = inputs.features_and_labels() estimator_spec = self._verify_estimator_spec( self._call_model_fn(features, labels)) loss, train_op = estimator_spec.loss, estimator_spec.train_op if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access captured_scaffold_fn.capture(estimator_spec.scaffold_fn) else: captured_scaffold_fn.capture(None) # We must run train_op to update the variables prior to running the # outfeed. with ops.control_dependencies([train_op]): host_call_outfeed_ops = [] if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access and estimator_spec.host_call is not None): host_call.record({'host_call': estimator_spec.host_call}) host_call_outfeed_ops = host_call.create_enqueue_op() with ops.control_dependencies(host_call_outfeed_ops): return array_ops.identity(loss) return train_step, host_call, captured_scaffold_fn def convert_to_single_tpu_eval_step(self, dequeue_fn): """Converts user provided model_fn` as a single eval step on TPU. Similar to training, the user provided `model_fn` takes input tuple (features, labels) and produces the TPUEstimatorSpec with eval_metrics for eval `mode`. This usually represents a single evaluation computation on CPU. For TPU evaluation, a eval (computation) step is first wrapped in a tf.while_loop control flow to repeat for many times and then replicated to all TPU shards. Besides the input and output are slightly different. Input, features and labels, should be taken from TPU infeed rather than input pipeline (input_fn) directly. Output is managed in two stages. First, the model outputs as the result of evaluation computation, usually model logits, should be transferred from TPU system to CPU. Then, all model outputs are concatenated first on CPU and sent to the metric_fn for metrics computation. To fit TPU evaluation pattern, the original eval computation should be reformed, which is the returned `eval_step`. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn representing the eval step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def eval_step(total_loss): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() tpu_estimator_spec = self._call_model_fn(features, labels) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU evaluation must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) loss = tpu_estimator_spec.loss captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) to_record = {} to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics if tpu_estimator_spec.host_call is not None: # We assume that evaluate won't update global step, so we don't wrap # this host_call. to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return math_ops.add(total_loss, loss) return eval_step, host_calls, captured_scaffold_fn def convert_to_single_tpu_predict_step(self, dequeue_fn): """Converts user provided model_fn` as a single predict step on TPU. Args: dequeue_fn: The function to retrieve inputs, features and labels, from TPU infeed dequeue channel. Returns: A tuple of predict_fn, host_calls, and captured scaffold_fn. The predict_fn representing the predict step for TPU. """ host_calls = _OutfeedHostCall(self._ctx) captured_scaffold_fn = _CapturedObject() def predict_step(unused_scalar_stopping_signal): """Evaluation step function for use inside a while loop.""" inputs = dequeue_fn() features, labels = inputs.features_and_labels() stopping_signals = inputs.signals() assert stopping_signals is not None, ( 'Internal Error: `signals` is missing.') tpu_estimator_spec = self._call_model_fn( features, labels, is_export_mode=False) if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access raise RuntimeError( 'estimator_spec used by TPU prediction must have type' '`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec))) self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions) captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn) to_record = {} identity_fn = lambda **kwargs: kwargs to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions] to_record['signals'] = [identity_fn, stopping_signals] if tpu_estimator_spec.host_call is not None: to_record['host_call'] = tpu_estimator_spec.host_call host_calls.record(to_record) with ops.control_dependencies(host_calls.create_enqueue_op()): return _StopSignals.as_scalar_stopping_signal(stopping_signals) return predict_step, host_calls, captured_scaffold_fn def _verify_tpu_spec_predictions(self, predictions): """Validates TPUEstimatorSpec.predictions dict.""" # TODO(xiejw): Adds validation for prediction dictionrary. # TODO(xiejw): Adds support for single tensor as predictions. if not isinstance(predictions, dict): raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.') for (key, tensor) in predictions.items(): if tensor.shape[0].value is None: raise ValueError( 'The tensor with key ({}) in TPUEstimatorSpec.predictions has ' 'dynamic shape (should be static). Tensor: {}'.format( key, tensor)) return predictions def _call_model_fn(self, features, labels, is_export_mode=False): """Calls the model_fn with required parameters.""" model_fn_args = function_utils.fn_args(self._model_fn) kwargs = {} # Makes deep copy with `config` and params` in case user mutates them. config = copy.deepcopy(self._config) params = copy.deepcopy(self._params) if 'labels' in model_fn_args: kwargs['labels'] = labels elif labels is not None: raise ValueError( 'model_fn does not take labels, but input_fn returns labels.') if 'mode' in model_fn_args: kwargs['mode'] = self._ctx.mode if 'config' in model_fn_args: kwargs['config'] = config if 'params' in model_fn_args: kwargs['params'] = params if 'params' not in model_fn_args: raise ValueError('model_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params[\'batch_size\']'.format(self._model_fn)) if is_export_mode: batch_size_for_model_fn = None else: batch_size_for_model_fn = self._ctx.batch_size_for_model_fn if batch_size_for_model_fn is not None: _add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn) estimator_spec = self._model_fn(features=features, **kwargs) if (self._ctx.is_running_on_cpu(is_export_mode) and isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access # The estimator_spec will be passed to `Estimator` directly, which expects # type `EstimatorSpec`. return estimator_spec.as_estimator_spec() else: return estimator_spec def _verify_estimator_spec(self, estimator_spec): """Validates the estimator_spec.""" if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access return estimator_spec err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.' if estimator_spec.training_chief_hooks: raise ValueError(err_msg.format('training_chief_hooks')) if estimator_spec.training_hooks: raise ValueError(err_msg.format('training_hooks')) if estimator_spec.evaluation_hooks: raise ValueError(err_msg.format('evaluation_hooks')) if estimator_spec.scaffold: logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. ' 'Please use TPUEstimatorSpec.') return estimator_spec class _OutfeedHostCall(object): """Support for `eval_metrics` and `host_call` in TPUEstimatorSpec.""" def __init__(self, ctx): self._ctx = ctx self._names = [] # All of these are dictionaries of lists keyed on the name. self._host_fns = {} self._tensor_keys = collections.defaultdict(list) self._tensors = collections.defaultdict(list) self._tensor_dtypes = collections.defaultdict(list) self._tensor_shapes = collections.defaultdict(list) @staticmethod def validate(host_calls): """Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`.""" for name, host_call in host_calls.items(): if not isinstance(host_call, (tuple, list)): raise ValueError('{} should be tuple or list'.format(name)) if len(host_call) != 2: raise ValueError('{} should have two elements.'.format(name)) if not callable(host_call[0]): raise TypeError('{}[0] should be callable.'.format(name)) if not isinstance(host_call[1], (tuple, list, dict)): raise ValueError('{}[1] should be tuple or list, or dict.'.format(name)) if isinstance(host_call[1], (tuple, list)): fullargspec = tf_inspect.getfullargspec(host_call[0]) fn_args = function_utils.fn_args(host_call[0]) # wrapped_hostcall_with_global_step uses varargs, so we allow that. if fullargspec.varargs is None and len(host_call[1]) != len(fn_args): raise RuntimeError( 'In TPUEstimatorSpec.{}, length of tensors {} does not match ' 'method args of the function, which takes {}.'.format( name, len(host_call[1]), len(fn_args))) @staticmethod def create_cpu_hostcall(host_calls): """Runs on the host_call on CPU instead of TPU when use_tpu=False.""" _OutfeedHostCall.validate(host_calls) ret = {} for name, host_call in host_calls.items(): host_fn, tensors = host_call if isinstance(tensors, (tuple, list)): ret[name] = host_fn(*tensors) else: # Must be dict. try: ret[name] = host_fn(**tensors) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e return ret def record(self, host_calls): """Records the host_call structure.""" for name, host_call in host_calls.items(): host_fn, tensor_list_or_dict = host_call self._names.append(name) self._host_fns[name] = host_fn if isinstance(tensor_list_or_dict, dict): for (key, tensor) in six.iteritems(tensor_list_or_dict): self._tensor_keys[name].append(key) self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) else: # List or tuple. self._tensor_keys[name] = None for tensor in tensor_list_or_dict: self._tensors[name].append(tensor) self._tensor_dtypes[name].append(tensor.dtype) self._tensor_shapes[name].append(tensor.shape) def create_enqueue_op(self): """Create the op to enqueue the recorded host_calls. Returns: A list of enqueue ops, which is empty if there are no host calls. """ if not self._names: return [] tensors = [] # TODO(jhseu): Consider deduping tensors. for name in self._names: tensors.extend(self._tensors[name]) with ops.device(tpu.core(0)): return [tpu_ops.outfeed_enqueue_tuple(tensors)] def create_tpu_hostcall(self): """Sends the tensors through outfeed and runs the host_fn on CPU. The tensors are concatenated along dimension 0 to form a global tensor across all shards. The concatenated function is passed to the host_fn and executed on the first host. Returns: A dictionary mapping name to the return type of the host_call by that name. Raises: RuntimeError: If outfeed tensor is scalar. """ if not self._names: return [] ret = {} # For each i, dequeue_ops[i] is a list containing the tensors from all # shards. This list is concatenated later. dequeue_ops = [] tensor_dtypes = [] tensor_shapes = [] for name in self._names: for _ in self._tensors[name]: dequeue_ops.append([]) for dtype in self._tensor_dtypes[name]: tensor_dtypes.append(dtype) for shape in self._tensor_shapes[name]: tensor_shapes.append(shape) # Outfeed ops execute on each replica's first logical core. Note: we must # constraint it such that we have at most one outfeed dequeue and enqueue # per replica. tpu_device_placement_fn = self._ctx.tpu_device_placement_function for i in xrange(self._ctx.num_replicas): with ops.device(tpu_device_placement_fn(i)): outfeed_tensors = tpu_ops.outfeed_dequeue_tuple( dtypes=tensor_dtypes, shapes=tensor_shapes) for j, item in enumerate(outfeed_tensors): dequeue_ops[j].append(item) # Deconstruct dequeue ops. dequeue_ops_by_name = {} pos = 0 for name in self._names: dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])] pos += len(self._tensors[name]) # It is assumed evaluation always happens on single host TPU system. So, # place all ops on tpu host if possible. # # TODO(jhseu): Evaluate whether this is right for summaries. with ops.device(self._ctx.tpu_host_placement_function(core_id=0)): for name in self._names: dequeue_ops = dequeue_ops_by_name[name] for i, item in enumerate(dequeue_ops): if dequeue_ops[i][0].shape.ndims == 0: raise RuntimeError( 'All tensors outfed from TPU should preserve batch size ' 'dimension, but got scalar {}'.format(dequeue_ops[i][0])) # TODO(xiejw): Allow users to specify the axis for batch size # dimension. dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0) if self._tensor_keys[name] is not None: # The user-provided eval_metrics[1] is a dict. dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops)) try: ret[name] = self._host_fns[name](**dequeue_ops) except TypeError as e: logging.warning( 'Exception while calling %s: %s. It is likely the tensors ' '(%s[1]) do not match the ' 'function\'s arguments', name, e, name) raise e else: ret[name] = self._host_fns[name](*dequeue_ops) return ret class _OutfeedHostCallHook(session_run_hook.SessionRunHook): """Hook to run host calls when use_tpu=False.""" def __init__(self, tensors): self._tensors = tensors def begin(self): # We duplicate this code from the TPUInfeedOutfeedSessionHook rather than # create a separate hook to guarantee execution order, because summaries # need to be initialized before the outfeed thread starts. # TODO(jhseu): Make a wrapper hook instead? self._init_ops = contrib_summary.summary_writer_initializer_op() # Get all the writer resources from the initializer, so we know what to # flush. self._finalize_ops = [] for op in self._init_ops: self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0])) def after_create_session(self, session, coord): session.run(self._init_ops) def before_run(self, run_context): return basic_session_run_hooks.SessionRunArgs(self._tensors) def end(self, session): session.run(self._finalize_ops) class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook): """Calculate and report global_step/sec and examples/sec during runtime.""" def __init__(self, batch_size, every_n_steps=100, every_n_secs=None, output_dir=None, summary_writer=None): self._batch_size = batch_size super(ExamplesPerSecondHook, self).__init__( every_n_steps=every_n_steps, every_n_secs=every_n_secs, output_dir=output_dir, summary_writer=summary_writer) def _log_and_record(self, elapsed_steps, elapsed_time, global_step): global_step_per_sec = elapsed_steps / elapsed_time examples_per_sec = self._batch_size * global_step_per_sec if self._summary_writer is not None: global_step_summary = Summary(value=[ Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec) ]) example_summary = Summary(value=[ Summary.Value(tag='examples/sec', simple_value=examples_per_sec) ]) self._summary_writer.add_summary(global_step_summary, global_step) self._summary_writer.add_summary(example_summary, global_step) logging.info('global_step/sec: %g', global_step_per_sec) logging.info('examples/sec: %g', examples_per_sec) class InstallSignalHandlerHook(session_run_hook.SessionRunHook): """Change SIGINT (CTRL^C) handler to force quit the process. The default behavior often results in hanging processes. The original handler is restored after training/evaluation. """ def __init__(self): self._signal_fn = signal.getsignal(signal.SIGINT) def before_run(self, run_context): signal.signal(signal.SIGINT, signal.SIG_DFL) def end(self, session): signal.signal(signal.SIGINT, self._signal_fn) class TPUEstimator(estimator_lib.Estimator): """Estimator with TPU support. TPUEstimator handles many of the details of running on TPU devices, such as replicating inputs and models for each core, and returning to host periodically to run hooks. TPUEstimator transforms a global batch size in params to a per-shard batch size when calling the `input_fn` and `model_fn`. Users should specify global batch size in constructor, and then get the batch size for each shard in `input_fn` and `model_fn` by `params['batch_size']`. - For training, `model_fn` gets per-core batch size; `input_fn` may get per-core or per-host batch size depending on `per_host_input_for_training` in `TPUConfig` (See docstring for TPUConfig for details). - For evaluation and prediction, `model_fn` gets per-core batch size and `input_fn` get per-host batch size. Evaluation ========== `model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics` for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return `EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case the following discussion on TPU evaluation does not apply. `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See `TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns a dict from metric string name to the result of calling a metric function, namely a `(metric_tensor, update_op)` tuple. One can set `use_tpu` to `False` for testing. All training, evaluation, and predict will be executed on CPU. `input_fn` and `model_fn` will receive `train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`. Current limitations: -------------------- 1. TPU evaluation only works on a single host (one TPU worker). 2. `input_fn` for evaluation should **NOT** raise an end-of-input exception (`OutOfRangeError` or `StopIteration`). And all evaluation steps and all batches should have the same size. Example (MNIST): ---------------- ``` # The metric Fn which runs on CPU. def metric_fn(labels, logits): predictions = tf.argmax(logits, 1) return { 'accuracy': tf.metrics.precision( labels=labels, predictions=predictions), } # Your model Fn which runs on TPU (eval_metrics is list in this example) def model_fn(features, labels, mode, config, params): ... logits = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, [labels, logits])) # or specify the eval_metrics tensors as dict. def model_fn(features, labels, mode, config, params): ... final_layer_output = ... if mode = tf.estimator.ModeKeys.EVAL: return tpu_estimator.TPUEstimatorSpec( mode=mode, loss=loss, eval_metrics=(metric_fn, { 'labels': labels, 'logits': final_layer_output, })) ``` Prediction ========== Prediction on TPU is an experimental feature to support large batch inference. It is not designed for latency-critical system. In addition, due to some usability issues, for prediction with small dataset, CPU `.predict`, i.e., creating a new `TPUEstimator` instance with `use_tpu=False`, might be more convenient. Note: In contrast to TPU training/evaluation, the `input_fn` for prediction *should* raise an end-of-input exception (`OutOfRangeError` or `StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be precise, the ops created by `input_fn` produce one batch of the data. The `predict()` API processes one batch at a time. When reaching the end of the data source, an end-of-input exception should be raised by one of these operations. The user usually does not need to do this manually. As long as the dataset is not repeated forever, the `tf.data` API will raise an end-of-input exception automatically after the last batch has been produced. Note: Estimator.predict returns a Python generator. Please consume all the data from the generator so that TPUEstimator can shutdown the TPU system properly for user. Current limitations: -------------------- 1. TPU prediction only works on a single host (one TPU worker). 2. `input_fn` must return a `Dataset` instance rather than `features`. In fact, .train() and .evaluate() also support Dataset as return value. Example (MNIST): ---------------- ``` height = 32 width = 32 total_examples = 100 def predict_input_fn(params): batch_size = params['batch_size'] images = tf.random_uniform( [total_examples, height, width, 3], minval=-1, maxval=1) dataset = tf.data.Dataset.from_tensor_slices(images) dataset = dataset.map(lambda images: {'image': images}) dataset = dataset.batch(batch_size) return dataset def model_fn(features, labels, params, mode): # Generate predictions, called 'output', from features['image'] if mode == tf.estimator.ModeKeys.PREDICT: return tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions={ 'predictions': output, 'is_padding': features['is_padding'] }) tpu_est = TPUEstimator( model_fn=model_fn, ..., predict_batch_size=16) # Fully consume the generator so that TPUEstimator can shutdown the TPU # system. for item in tpu_est.predict(input_fn=input_fn): # Filter out item if the `is_padding` is 1. # Process the 'predictions' ``` Exporting ========= `export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`, and another with `tag_constants.SERVING` and `tag_constants.TPU`. At serving time, these tags are used to select metagraph to load. Before running the graph on TPU, TPU system needs to be initialized. If TensorFlow Serving model-server is used, this is done automatically. If not, please call `session.run(tpu.initialize_system())`. `tpu.outside_compilation` can be used to wrap TPU incompatible ops in `model_fn`. Example: ---------------- ``` def model_fn(features, labels, mode, config, params): ... logits = ... export_outputs = { 'logits': export_output_lib.PredictOutput( {'logits': logits}) } def host_call(logits): class_ids = math_ops.argmax(logits) classes = string_ops.as_string(class_ids) export_outputs['classes'] = export_output_lib.ClassificationOutput(classes=classes) tpu.outside_compilation(host_call, logits) ... ``` Current limitations: -------------------- 1. Outside compilation does not work yet (b/79991729). """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None, use_tpu=True, train_batch_size=None, eval_batch_size=None, predict_batch_size=None, batch_axis=None, eval_on_tpu=True, warm_start_from=None): """Constructs an `TPUEstimator` instance. Args: model_fn: Model function as required by `Estimator`. For training, the returned `EstimatorSpec` cannot have hooks as it is not supported in `TPUEstimator`. model_dir: Directory to save model parameters, graph and etc. This can also be used to load checkpoints from the directory into a estimator to continue training a previously saved model. If `None`, the model_dir in `config` will be used if set. If both are set, they must be same. If both are `None`, a temporary directory will be used. config: An `tpu_config.RunConfig` configuration object. Cannot be `None`. params: An optional `dict` of hyper parameters that will be passed into `input_fn` and `model_fn`. Keys are names of parameters, values are basic python types. There are reserved keys for `TPUEstimator`, including 'batch_size'. use_tpu: A bool indicating whether TPU support is enabled. Currently, - TPU training and evaluation respect this bit, but eval_on_tpu can override execution of eval. See below. - Predict still happens on CPU. train_batch_size: An int representing the global training batch size. TPUEstimator transforms this global batch size to a per-shard batch size, as params['batch_size'], when calling `input_fn` and `model_fn`. Cannot be `None` if `use_tpu` is `True`. Must be divisible by total number of replicas. eval_batch_size: An int representing evaluation batch size. Must be divisible by total number of replicas. predict_batch_size: An int representing the prediction batch size. Must be divisible by total number of replicas. batch_axis: A python tuple of int values describing how each tensor produced by the Estimator `input_fn` should be split across the TPU compute shards. For example, if your input_fn produced (images, labels) where the images tensor is in `HWCN` format, your shard dimensions would be [3, 0], where 3 corresponds to the `N` dimension of your images Tensor, and 0 corresponds to the dimension along which to split the labels to match up with the corresponding images. If None is supplied, and per_host_input_for_training is True, batches will be sharded based on the major dimension. If tpu_config.per_host_input_for_training is False or `PER_HOST_V2`, batch_axis is ignored. eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`. warm_start_from: Optional string filepath to a checkpoint or SavedModel to warm-start from, or a `tf.estimator.WarmStartSettings` object to fully configure warm-starting. If the string filepath is provided instead of a `WarmStartSettings`, then all variables are warm-started, and it is assumed that vocabularies and Tensor names are unchanged. Raises: ValueError: `params` has reserved keys already. """ if config is None or not isinstance(config, tpu_config.RunConfig): raise ValueError( '`config` must be provided with type `tpu_config.RunConfig`') if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS): raise ValueError('{} are reserved keys but existed in params {}.'.format( _RESERVED_PARAMS_KEYS, params)) if use_tpu: # Perform some very basic validations. More validations will be found in # _InternalTPUContext. if train_batch_size is None: raise ValueError('`train_batch_size` cannot be `None`') util_lib.check_positive_integer(train_batch_size, 'train_batch_size') if (config.tpu_config.per_host_input_for_training is tpu_config.InputPipelineConfig.PER_SHARD_V1 and config.tpu_config.computation_shape): raise ValueError( 'Model parallelism only supports per host input for training. ' 'Please adjust TPURunconfig.per_host_input_for_training.') if eval_batch_size is not None: util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size') if predict_batch_size is not None: util_lib.check_positive_integer(predict_batch_size, 'predict_batch_size') # Verifies the model_fn signature according to Estimator framework. estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access # We cannot store config and params in this constructor as parent # constructor might change them, such as assigning a temp dir for # config.model_dir. model_function = self._augment_model_fn(model_fn, batch_axis) # Overwrite log_step_count_steps to disable TensorLoggingHook and # StepCounterHook from being created in Estimator. TPUEstimator already # added equivalent hooks in _augment_model_fn above. self._log_every_n_steps = config.log_step_count_steps config = config.replace(log_step_count_steps=None) # Passing non-None params as wrapped model_fn has it. params = params or {} super(TPUEstimator, self).__init__( model_fn=model_function, model_dir=model_dir, config=config, params=params, warm_start_from=warm_start_from) self._iterations_per_training_loop = ( self._config.tpu_config.iterations_per_loop) # All properties passed to _InternalTPUContext are immutable. # pylint: disable=protected-access self._ctx = tpu_context._get_tpu_context( self._config, train_batch_size, eval_batch_size, predict_batch_size, use_tpu, eval_on_tpu) self._is_input_fn_invoked = None def _add_meta_graph_for_mode(self, builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables=True, mode=model_fn_lib.ModeKeys.PREDICT, export_tags=None): if mode != model_fn_lib.ModeKeys.PREDICT: raise NotImplementedError( 'TPUEstimator only handles mode PREDICT for export_savedmodel(); ' 'got {}.'.format(mode)) super(TPUEstimator, self)._add_meta_graph_for_mode(builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables, mode=mode) input_receiver_fn_map = {_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]} export_tags = [tag_constants.SERVING, tag_constants.TPU] mode = _REWRITE_FOR_INFERENCE_MODE try: (super(TPUEstimator, self). _add_meta_graph_for_mode(builder, input_receiver_fn_map, checkpoint_path, strip_default_attrs, save_variables=False, mode=mode, export_tags=export_tags)) except Exception as error: # pylint: disable=broad-except logging.warning('Saving meta graph for TPU failed: {}.' .format(str(error))) def _call_model_fn(self, features, labels, mode, config): if mode == _REWRITE_FOR_INFERENCE_MODE: return self._call_model_fn_for_inference(features, labels, mode, config) else: return super(TPUEstimator, self)._call_model_fn( features, labels, mode, config) def _call_model_fn_for_inference(self, features, labels, mode, config): """Wraps `_call_model_fn` for `export_savedmodel`.""" if mode != _REWRITE_FOR_INFERENCE_MODE: raise ValueError('mode must be {}; ' 'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode)) capture = _CapturedObject() def computation(): """Compute tpu tensors used in export_outputs. Passed to rewrite_for_inference so that model_fn will be called under the rewriting contexts. Only tpu tensors are returned, but export_outputs and scaffold are captured. Returns: A list of Tensors used in export_outputs and not marked for outside_compilation. """ # We should only call model fn once and it should be inside `computation` # so that building the graph will happen under `rewrite_for_inference`. mode = model_fn_lib.ModeKeys.PREDICT estimator_spec = self._call_model_fn(features, labels, mode, config) # We pick the TPU tensors out from `export_output` and later return them # from `computation` for rewriting. tensors_dict = collections.OrderedDict( (k, _export_output_to_tensors(v)) for k, v in six.iteritems(estimator_spec.export_outputs) ) tensors = nest.flatten(tensors_dict) tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)] # We cannot return anything other than `tpu_tensors` here so we capture # the rest for later use. capture.capture((estimator_spec, tensors_dict, tensors)) return tpu_tensors tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation) estimator_spec, tensors_dict, tensors = capture.get() # Reconstruct `tensors`, but with `tpu_tensors` replaced with # `tpu_tensors_on_cpu`. new_tensors = [ tpu_tensors_on_cpu.pop(0) if _is_tpu_tensor(t) else t for t in tensors ] # Reconstruct `tensors_dict`. new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors) # Reconstruct `export_outputs`. export_outputs = estimator_spec.export_outputs new_export_outputs = collections.OrderedDict( (k, _clone_export_output_with_tensors(export_outputs[k], v)) for k, v in six.iteritems(new_tensors_dict) ) return estimator_spec._replace(export_outputs=new_export_outputs) def _create_global_step(self, graph): """Creates a global step suitable for TPUs. Args: graph: The graph in which to create the global step. Returns: A global step `Tensor`. Raises: ValueError: if the global step tensor is already defined. """ return _create_global_step(graph) def _convert_train_steps_to_hooks(self, steps, max_steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_train_steps_to_hooks( steps, max_steps) # On TPU. if steps is None and max_steps is None: raise ValueError( 'For TPU training, one of `steps` or `max_steps` must be set. ' 'Cannot be both `None`.') # Estimator.train has explicit positiveness check. if steps is not None: util_lib.check_positive_integer(steps, 'Train steps') if max_steps is not None: util_lib.check_positive_integer(max_steps, 'Train max_steps') return [ _TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps) ] def _convert_eval_steps_to_hooks(self, steps): with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx: if ctx.is_running_on_cpu(): return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps) if steps is None: raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.') util_lib.check_positive_integer(steps, 'Eval steps') return [ evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access num_evals=steps), _SetEvalIterationsHook(steps) ] def _call_input_fn(self, input_fn, mode): """Calls the input function. Args: input_fn: The input function. mode: ModeKeys Returns: Either features or (features, labels) where features and labels are: features - `Tensor` or dictionary of string feature name to `Tensor`. labels - `Tensor` or dictionary of `Tensor` with labels. Raises: ValueError: if input_fn takes invalid arguments or does not have `params`. """ input_fn_args = function_utils.fn_args(input_fn) config = self.config # a deep copy. kwargs = {} if 'params' in input_fn_args: kwargs['params'] = self.params # a deep copy. else: raise ValueError('input_fn ({}) does not include params argument, ' 'required by TPUEstimator to pass batch size as ' 'params["batch_size"]'.format(input_fn)) if 'config' in input_fn_args: kwargs['config'] = config if 'mode' in input_fn_args: kwargs['mode'] = mode # Records the fact input_fn has been invoked. self._is_input_fn_invoked = True with self._ctx.with_mode(mode) as ctx: # Setting the batch size in params first. This helps user to have same # input_fn for use_tpu=True/False. batch_size_for_input_fn = ctx.batch_size_for_input_fn if batch_size_for_input_fn is not None: _add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY, batch_size_for_input_fn) # For export_savedmodel, input_fn is never passed to Estimator. So, # `is_export_mode` must be False. if ctx.is_running_on_cpu(is_export_mode=False): with ops.device('/device:CPU:0'): return input_fn(**kwargs) # For TPU computation, input_fn should be invoked in a tf.while_loop for # performance. While constructing the tf.while_loop, the structure of # inputs returned by the `input_fn` needs to be recorded. The structure # includes whether features or labels is dict or single Tensor, dict keys, # tensor shapes, and dtypes. The recorded structure is used to create the # infeed dequeue ops, which must be wrapped and passed as a Fn, called # inside the TPU computation, as the TPU computation is wrapped inside a # tf.while_loop also. So, we either pass input_fn to model_fn or pass # dequeue_fn to model_fn. Here, `input_fn` is passed directly as # `features` in `model_fn` signature. def _input_fn(ctx): _add_item_to_params(kwargs['params'], _CTX_KEY, ctx) return input_fn(**kwargs) return _input_fn def _validate_features_in_predict_input(self, result): """Skip the validation. For TPUEstimator, we do not need to check the result type. `_InputPipeline` has stronger check. Parent class's check generates confusing warning msg. Args: result: `features` returned by input_fn. """ pass def _augment_model_fn(self, model_fn, batch_axis): """Returns a new model_fn, which wraps the TPU support.""" def _model_fn(features, labels, mode, config, params): """A Estimator `model_fn` for TPUEstimator.""" with self._ctx.with_mode(mode) as ctx: model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx) if mode != model_fn_lib.ModeKeys.PREDICT: is_export_mode = False else: # For export_savedmodel, input_fn is never passed to Estimator. So, by # checking the self._is_input_fn_invoked bit, we can know, given the # mode == PREDICT, it is the .predict API, not export_savedmodel API. if self._is_input_fn_invoked: is_export_mode = False else: is_export_mode = True # Clear the bit. self._is_input_fn_invoked = None if ctx.is_running_on_cpu(is_export_mode=is_export_mode): logging.info('Running %s on CPU', mode) return model_fn_wrapper.call_without_tpu( features, labels, is_export_mode=is_export_mode) assert labels is None, '`labels` passed to `model_fn` must be `None`.' # TPUEstimator._call_input_fn passes `input_fn` as features to here. assert callable(features), '`input_fn` is not callable.' input_fn = features input_holders = _InputPipeline(input_fn, batch_axis, ctx) enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = ( input_holders.generate_infeed_enqueue_ops_and_dequeue_fn()) graph = ops.get_default_graph() for enqueue_op in enqueue_ops: if isinstance(enqueue_op, list): graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op) else: graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op) if mode == model_fn_lib.ModeKeys.TRAIN: loss, host_call, scaffold = ( _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn)) host_ops = host_call.create_tpu_hostcall() if host_ops is None: host_ops = [] shutdown_hooks = [] shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE', 'shutdown_worker') if shutdown_mode: if shutdown_mode == 'shutdown_worker': finalizer_hooks = [ session_support.ShutdownLameWorkers(timeout_ms=60*1000), ] elif shutdown_mode == 'shutdown_computation': finalizer_hooks = [ session_support.RestartComputation(timeout_ms=60*1000), ] else: raise ValueError('Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode) shutdown_hooks.append(session_support.GracefulShutdownHook( checkpoint_prefix=self.model_dir + '/model.ckpt', on_shutdown_hooks=finalizer_hooks )) with ops.control_dependencies([loss]): global_step = array_ops.identity(training.get_global_step()) hooks = input_hooks + shutdown_hooks logging_hook_frequency = ( # Divide and round up (self._log_every_n_steps + self._config.tpu_config.iterations_per_loop - 1) // self._config.tpu_config.iterations_per_loop) hooks.extend([ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), InstallSignalHandlerHook(), training.LoggingTensorHook( { 'loss': array_ops.identity(loss), 'step': global_step, }, every_n_iter=logging_hook_frequency) ]) examples_hook = ExamplesPerSecondHook( ctx.global_batch_size, output_dir=self.model_dir, every_n_steps=self._log_every_n_steps) examples_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) hooks.append(examples_hook) chief_hooks = [] if (self._config.save_checkpoints_secs or self._config.save_checkpoints_steps): checkpoint_hook = training.CheckpointSaverHook( self.model_dir, save_secs=self._config.save_checkpoints_secs, save_steps=self._config.save_checkpoints_steps, scaffold=scaffold) checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access self._config.tpu_config.iterations_per_loop) chief_hooks.append(checkpoint_hook) summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss) with ops.control_dependencies([loss]): update_ops = _sync_variables_ops() # Validate the TPU training graph to catch basic errors _validate_tpu_training_graph() train_op = control_flow_ops.group(*update_ops) graph.add_to_collection(_TPU_TRAIN_OP, train_op) return model_fn_lib.EstimatorSpec( mode, loss=loss, training_chief_hooks=chief_hooks, training_hooks=hooks, train_op=train_op, scaffold=scaffold) if mode == model_fn_lib.ModeKeys.EVAL: total_loss, host_calls, scaffold = _eval_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) iterations_per_loop_var = _create_or_get_iterations_per_loop() mean_loss = math_ops.div(total_loss, math_ops.cast( iterations_per_loop_var, dtype=total_loss.dtype)) # Creates a dummy metric update_op for all metrics. Estimator expects # all metrics in eval_metric_ops have update_op and calls them one by # one. The real metric update_ops are invoked in a separated thread. # So, here give Estimator the dummy op for all metrics. with ops.control_dependencies([mean_loss]): # After TPU evaluation computation is done (the mean_loss tensor), # reads all variables back from TPU and updates the eval step # counter properly internal_ops_to_run = _sync_variables_ops() internal_ops_to_run.append( _increase_eval_step_op(iterations_per_loop_var)) with ops.control_dependencies(internal_ops_to_run): dummy_update_op = control_flow_ops.no_op() host_call_ret = host_calls.create_tpu_hostcall() eval_metric_ops = {} eval_update_ops = [] for k, v in host_call_ret['eval_metrics'].items(): eval_metric_ops[k] = (v[0], dummy_update_op) eval_update_ops.append(v[1]) if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] hooks = [ TPUInfeedOutfeedSessionHook( ctx, enqueue_ops, eval_update_ops + host_ops, run_infeed_loop_on_coordinator=( run_infeed_loop_on_coordinator)), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, loss=mean_loss, evaluation_hooks=hooks, eval_metric_ops=eval_metric_ops, scaffold=scaffold) # Predict assert mode == model_fn_lib.ModeKeys.PREDICT dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system( ctx, model_fn_wrapper, dequeue_fn) with ops.control_dependencies([dummy_predict_op]): internal_ops_to_run = _sync_variables_ops() with ops.control_dependencies(internal_ops_to_run): dummy_predict_op = control_flow_ops.no_op() # In train and evaluation, the main TPU program is passed to monitored # training session to run. Infeed enqueue and outfeed dequeue are # executed in side threads. This is not the configuration for # prediction mode. # # For prediction, the Estimator executes the EstimatorSpec.predictions # directly and yield the element (via generator) to call site. So, the # outfeed based prediction must be passed to MonitoredSession directly. # Other parts of the TPU execution are organized as follows. # # 1. All outfeed based Tensors must be grouped with predictions Tensors # to form a single invocation. This avoid the issue we might trigger # multiple outfeeds incorrectly. To achieve this, `host_call` is # placed in control_dependencies of `stopping_signals`, and # `stopping_signals` is passed into _StoppingPredictHook, which sets # the `stopping_signals` as SessionRunArgs. MonitoredSession merges # all SessionRunArgs with the fetch in session.run together. # # 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue) # are grouped together. They will be launched once and only once in # side threads and they quit naturally according to the SAME stopping # condition. enqueue_ops.append(dummy_predict_op) host_call_ret = host_calls.create_tpu_hostcall() if 'host_call' not in host_call_ret: host_ops = [] else: host_ops = host_call_ret['host_call'] predictions = host_call_ret['predictions'] _verify_cross_hosts_transfer_size( predictions, message=( 'The estimated size for TPUEstimatorSpec.predictions is too ' 'large.')) signals = host_call_ret['signals'] with ops.control_dependencies(host_ops): host_ops = [] # Empty, we do do not need it anymore. scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal( signals) predictions = _PaddingSignals.slice_tensor_or_dict( predictions, signals) hooks = [ _StoppingPredictHook(scalar_stopping_signal), TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops, host_ops), ] + input_hooks return model_fn_lib.EstimatorSpec( mode, prediction_hooks=hooks, predictions=predictions, scaffold=scaffold) return _model_fn def _is_tpu_tensor(tensor): if not isinstance(tensor, ops.Tensor): return False try: tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access except ValueError: return True else: return False def _export_output_to_tensors(export_output): """Get a list of `Tensors` used in `export_output`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. Returns: a list of tensors used in export_output. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): return [export_output.scores, export_output.classes] elif isinstance(export_output, export_output_lib.RegressionOutput): return [export_output.value] elif isinstance(export_output, export_output_lib.PredictOutput): return export_output.outputs.values() else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _clone_export_output_with_tensors(export_output, tensors): """Clones `export_output` but with new `tensors`. Args: export_output: an `ExportOutput` object such as `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. tensors: a list of `Tensors` used to construct a new `export_output`. Returns: A dict similar to `export_output` but with `tensors`. Raises: ValueError: if `export_output` is not one of `ClassificationOutput`, `RegressionOutput`, or `PredictOutput`. """ if isinstance(export_output, export_output_lib.ClassificationOutput): if len(tensors) != 2: raise ValueError('tensors must be of length 2; ' 'got {}.'.format(len(tensors))) return export_output_lib.ClassificationOutput(*tensors) elif isinstance(export_output, export_output_lib.RegressionOutput): if len(tensors) != 1: raise ValueError('tensors must be of length 1; ' 'got {}'.format(len(tensors))) return export_output_lib.RegressionOutput(*tensors) elif isinstance(export_output, export_output_lib.PredictOutput): return export_output_lib.PredictOutput( dict(zip(export_output.outputs.keys(), tensors))) else: raise ValueError( '`export_output` must be have type `ClassificationOutput`, ' '`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output)) def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_eval_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)) def multi_tpu_eval_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_eval_step, [_ZERO_LOSS]) (loss,) = tpu.shard( multi_tpu_eval_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_calls, scaffold def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" iterations_per_loop_var = _create_or_get_iterations_per_loop() single_tpu_train_step, host_call, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn)) def multi_tpu_train_steps_on_single_shard(): return training_loop.repeat( iterations_per_loop_var, single_tpu_train_step, [_INITIAL_LOSS]) (loss,) = tpu.shard( multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=ctx.num_replicas, outputs_from_all_shards=False, device_assignment=ctx.device_assignment) scaffold = _get_scaffold(captured_scaffold_fn) return loss, host_call, scaffold def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" num_cores = ctx.num_cores single_tpu_predict_step, host_calls, captured_scaffold_fn = ( model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)) def multi_tpu_predict_steps_on_single_shard(): def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) inputs = [_StopSignals.NON_STOPPING_SIGNAL] outputs = training_loop.while_loop( cond, single_tpu_predict_step, inputs=inputs, name=b'loop') return outputs (dummy_predict_op,) = tpu.shard( multi_tpu_predict_steps_on_single_shard, inputs=[], num_shards=num_cores, outputs_from_all_shards=False) scaffold = _get_scaffold(captured_scaffold_fn) return dummy_predict_op, host_calls, scaffold def _wrap_computation_in_while_loop(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def computation(i): with ops.control_dependencies(op_fn()): return i + 1 iterations_per_loop_var = _create_or_get_iterations_per_loop() # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): iterations = array_ops.identity(iterations_per_loop_var) return control_flow_ops.while_loop( lambda i: i < iterations, computation, [constant_op.constant(0)], parallel_iterations=1) def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn): """Wraps the ops generated by `op_fn` in tf.while_loop.""" def cond(scalar_stopping_signal): return math_ops.logical_not( _StopSignals.should_stop(scalar_stopping_signal)) def computation(unused_scalar_stopping_signal): return_value = op_fn() execute_ops = return_value['ops'] signals = return_value['signals'] with ops.control_dependencies(execute_ops): return _StopSignals.as_scalar_stopping_signal(signals) # By setting parallel_iterations=1, the parallel execution in while_loop is # basically turned off. with ops.device(device): return control_flow_ops.while_loop( cond, computation, [_StopSignals.NON_STOPPING_SIGNAL], parallel_iterations=1) def _validate_tpu_training_graph(): """Validate graph before running distributed training. Raises: ValueError: If the graph seems invalid for running on device """ operations = ops.get_default_graph().get_operations() # Check if there is atleast one CrossReplicaSum operation in the graph # This should be introduced by using the CrossShardOptimizer wrapper cross_replica_sum_ops = [ o for o in operations if o.type == _CROSS_REPLICA_SUM_OP ] if not cross_replica_sum_ops: raise ValueError( 'CrossShardOptimizer must be used for model training on TPUs.') class _CapturedObject(object): """A placeholder to capture an object. This is useful when we need to capture a Python object in the Tensorflow control flow body function and use it outside the control flow. """ def __init__(self): self._object = None self._captured = False def capture(self, o): if self._captured: raise RuntimeError( 'InternalError: Object can be captured only. Please file bug .') self._captured = True self._object = o def get(self): if not self._captured: raise RuntimeError( 'InternalError: Object is not captured properly before `get`. ' 'Please file bug .') return self._object def _get_scaffold(captured_scaffold_fn): """Retrieves the Scaffold from `captured_scaffold_fn`.""" with _CapturingContext(message='Inside scaffold_fn'): scaffold_fn = captured_scaffold_fn.get() if scaffold_fn: scaffold = scaffold_fn() if scaffold is None: raise ValueError( 'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed') else: scaffold = None if scaffold: wrapped_finalize = scaffold.finalize def _finalize(): with _CapturingContext('Inside Scaffold.finalize'): wrapped_finalize() scaffold.finalize = _finalize return scaffold class _CapturingContext(control_flow_ops.ControlFlowContext): """Tracks references to Tensors defined in TPU replication.""" def __init__(self, message): control_flow_ops.ControlFlowContext.__init__(self) self._message = message def AddOp(self, op): # pylint: disable=invalid-name for c in op.inputs: if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access raise ValueError('{}: Op {} depends on TPU computation {}, ' 'which is not allowed.'.format(self._message, op, c)) def __enter__(self): # pylint: disable=protected-access self._g = ops.get_default_graph() self._old = self._g._get_control_flow_context() self._g._set_control_flow_context(self) # pylint: enable=protected-access def __exit__(self, _, __, ___): # pylint: disable=invalid-name self._g._set_control_flow_context(self._old) # pylint: disable=protected-access class _Inputs(object): """A data structure representing the input_fn returned values. This also supports the returned value from input_fn as `Dataset`. """ def __init__(self, features=None, labels=None, dataset=None, signals=None): if dataset is not None and (features is not None or labels is not None or signals is not None): raise RuntimeError('Internal Error: Either (features and labels) or ' 'dataset should be provided, not both. Please file ' 'bug') self._features = features self._labels = labels self._signals = signals self._dataset = dataset self._iterator = None @staticmethod def from_input_fn(return_values): """Returns an `_Inputs` instance according to `input_fn` return value.""" if isinstance(return_values, dataset_ops.Dataset): dataset = return_values return _Inputs(dataset=dataset) features, labels = _Inputs._parse_inputs(return_values) return _Inputs(features, labels) @staticmethod def _parse_inputs(return_values): if isinstance(return_values, tuple): features, labels = return_values else: features, labels = return_values, None return features, labels @property def is_dataset(self): """Returns True if the return value from input_fn is Dataset.""" return self._dataset is not None def dataset_initializer_hook(self): """Returns a `SessionRunHook` to initialize this dataset. This must be called before `features_and_labels`. """ iterator = self._dataset.make_initializable_iterator() # pylint: disable=protected-access hook = estimator_lib._DatasetInitializerHook(iterator) self._iterator = iterator return hook def features_and_labels(self): """Gets `features` and `labels`.""" if self.is_dataset: if self._iterator is None: raise RuntimeError('Internal error: Must call dataset_initializer_hook ' 'before calling features_and_labels(). Please file ' 'a bug!') return _Inputs._parse_inputs(self._iterator.get_next()) return (self._features, self._labels) def signals(self): return self._signals @property def dataset(self): return self._dataset class _InputsWithStoppingSignals(_Inputs): """Inputs with `_StopSignals` inserted into the dataset.""" def __init__(self, dataset, batch_size, add_padding=False): assert dataset is not None user_provided_dataset = dataset.map( _InputsWithStoppingSignals.insert_stopping_signal( stop=False, batch_size=batch_size, add_padding=add_padding)) final_batch_dataset = dataset.take(1).map( _InputsWithStoppingSignals.insert_stopping_signal( stop=True, batch_size=batch_size, add_padding=add_padding)) dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2) super(_InputsWithStoppingSignals, self).__init__(dataset=dataset) self._current_inputs = None def features_and_labels(self): if self._current_inputs is not None: raise RuntimeError( 'Internal Error: The previous inputs have not been properly ' 'consumed. First call features_and_labels, then call signals.') inputs_with_signals = self._iterator.get_next() features = inputs_with_signals['features'] labels = inputs_with_signals.get('labels') self._current_inputs = inputs_with_signals return features, labels def signals(self): """Returns the `Signals` from `_Inputs`.""" if self._current_inputs is None: raise RuntimeError( 'Internal Error: The current inputs have not been properly ' 'generated. First call features_and_labels, then call signals.') signals = self._current_inputs['signals'] self._current_inputs = None return signals @staticmethod def insert_stopping_signal(stop, batch_size, add_padding=False): """Inserts stopping_signal into dataset via _map_fn. Here we change the data structure in the dataset, such that the return value is a dictionary now and `features`, `labels`, and `signals` are three distinguished keys in that dict. This provides a better structure, which eases the process to decompose the inputs (see `features_and_labels`). Args: stop: bool, state of current stopping signals. batch_size: int, batch size. add_padding: bool, whether to pad the tensor to full batch size. Returns: A map_fn passed to dataset.map API. """ def _map_fn(*args): """The map fn to insert signals.""" if len(args) == 1: # Unpack the single Tensor/dict argument as features. This is required # for the input_fn returns no labels. args = args[0] features, labels = _Inputs._parse_inputs(args) new_input_dict = {} if add_padding: padding_mask, features, labels = ( _PaddingSignals.pad_features_and_labels( features, labels, batch_size)) new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels else: new_input_dict['features'] = features if labels is not None: new_input_dict['labels'] = labels padding_mask = None new_input_dict['signals'] = _StopSignals( stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict() return new_input_dict return _map_fn class _StopSignals(object): """Signals class holding all logic to handle TPU stopping condition.""" NON_STOPPING_SIGNAL = False STOPPING_SIGNAL = True def __init__(self, stop, batch_size, padding_mask=None): self._stop = stop self._batch_size = batch_size self._padding_mask = padding_mask def as_dict(self): """Returns the signals as Python dict.""" shape = [self._batch_size, 1] dtype = dtypes.bool if self._stop: stopping = array_ops.ones(shape=shape, dtype=dtype) else: stopping = array_ops.zeros(shape=shape, dtype=dtype) signals = {'stopping': stopping} if self._padding_mask is not None: signals['padding_mask'] = self._padding_mask return signals @staticmethod def as_scalar_stopping_signal(signals): return array_ops.identity(signals['stopping'][0][0]) @staticmethod def should_stop(scalar_stopping_signal): if isinstance(scalar_stopping_signal, ops.Tensor): # STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF # way to express the bool check whether scalar_stopping_signal is True. return math_ops.logical_and( scalar_stopping_signal, _StopSignals.STOPPING_SIGNAL) else: # For non Tensor case, it is used in SessionRunHook. So, we cannot modify # the graph anymore. Here, we use pure Python. return bool(scalar_stopping_signal) class _PaddingSignals(object): """Signals class holding all logic to handle padding.""" @staticmethod def pad_features_and_labels(features, labels, batch_size): """Pads out the batch dimension of features and labels.""" real_batch_size = array_ops.shape( _PaddingSignals._find_any_tensor(features))[0] batch_size_tensor = constant_op.constant(batch_size, dtypes.int32) check_greater = check_ops.assert_greater_equal( batch_size_tensor, real_batch_size, data=(batch_size_tensor, real_batch_size), message='The real batch size should not be greater than batch_size.') with ops.control_dependencies([check_greater]): missing_count = batch_size_tensor - real_batch_size def pad_single_tensor(tensor): """Pads out the batch dimension of a tensor to the complete batch_size.""" rank = len(tensor.shape) assert rank > 0 padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) padded_shape = (batch_size,) + tuple(tensor.shape[1:]) padded_tensor = array_ops.pad(tensor, padding) padded_tensor.set_shape(padded_shape) return padded_tensor def nest_pad(tensor_or_dict): return nest.map_structure(pad_single_tensor, tensor_or_dict) features = nest_pad(features) if labels is not None: labels = nest_pad(labels) padding_mask = _PaddingSignals._padding_mask( real_batch_size, missing_count, batch_size) return padding_mask, features, labels @staticmethod def slice_tensor_or_dict(tensor_or_dict, signals): """Slice the real Tensors according to padding mask in signals.""" padding_mask = signals['padding_mask'] batch_size = array_ops.shape(padding_mask)[0] def verify_batch_size(tensor): check_batch_size = math_ops.equal(batch_size, tensor.shape[0]) with ops.control_dependencies([check_batch_size]): return array_ops.identity(tensor) def slice_single_tensor(tensor): rank = len(tensor.shape) assert rank > 0 real_batch_size = batch_size - math_ops.reduce_sum(padding_mask) return verify_batch_size(tensor)[0:real_batch_size] # As we split the Tensors to all TPU cores and concat them back, it is # important to ensure the real data is placed before padded ones, i.e., # order is preserved. By that, the sliced padding mask should have all 0's. # If this assertion failed, # the slice logic here would not hold. sliced_padding_mask = slice_single_tensor(padding_mask) assert_padding_mask = math_ops.equal( math_ops.reduce_sum(sliced_padding_mask), 0) with ops.control_dependencies([assert_padding_mask]): should_stop = _StopSignals.should_stop( _StopSignals.as_scalar_stopping_signal(signals)) is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0) def slice_fn(tensor): # If the current batch is full batch or part of stopping signals, we do # not need to slice to save performance. return control_flow_ops.cond( math_ops.logical_or(should_stop, is_full_batch), (lambda: verify_batch_size(tensor)), (lambda: slice_single_tensor(tensor))) return nest.map_structure(slice_fn, tensor_or_dict) @staticmethod def _find_any_tensor(batch_features): tensors = [x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] @staticmethod def _padding_mask(real_batch_size, missing_count, batch_size): padding_mask = array_ops.concat( [ array_ops.zeros((real_batch_size,), dtype=dtypes.int32), array_ops.ones((missing_count,), dtype=dtypes.int32) ], axis=0) padding_mask.set_shape((batch_size,)) return padding_mask class _SignalsHelper(object): """A general helper class to handle common signals manipulation.""" def __init__(self, signals): self._signal_keys = [] for key in sorted(signals.iterkeys()): self._signal_keys.append(key) @property def num_signals(self): return len(self._signal_keys) def unflatten(self, tensor_list): return dict(zip(self._signal_keys, tensor_list)) @staticmethod def as_tensor_list(signals): return [signals[key] for key in sorted(signals.iterkeys())] def _verify_cross_hosts_transfer_size(tensor_dict, message): total_size = 0 tensor_structure = {} for key, tensor in tensor_dict.items(): shape = tensor.shape size = np.product(shape) * tensor.dtype.size tensor_structure[key] = shape total_size += size if total_size >= _ONE_GIGABYTE: raise ValueError( '{} The transfer size is larger than the protobuf limit. Please ' 'consider to use Tensors with smaller shapes or reduce batch ' 'size. Given:\n' '{}'.format(message, '\n'.join([ ' -- Key: {}, Shape: {}'.format(k, v) for k, v in tensor_structure.items()]))) def _add_item_to_params(params, key, value): """Adds a new item into `params`.""" if isinstance(params, hparam.HParams): # For HParams, we need to use special API. if key in params: params.key = value else: params.add_hparam(key, value) else: # Now params is Python dict. params[key] = value
{ "content_hash": "d987b7cb21557b965e64d4832c67ad5f", "timestamp": "", "source": "github", "line_count": 3045, "max_line_length": 112, "avg_line_length": 39.20656814449918, "alnum_prop": 0.6581283924143939, "repo_name": "nburn42/tensorflow", "id": "aeb7ba536f56cb3751553fe13f3ba28958196869", "size": "120062", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensorflow/contrib/tpu/python/tpu/tpu_estimator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "9274" }, { "name": "C", "bytes": "341132" }, { "name": "C++", "bytes": "39824558" }, { "name": "CMake", "bytes": "194702" }, { "name": "Go", "bytes": "1046987" }, { "name": "HTML", "bytes": "4680032" }, { "name": "Java", "bytes": "590137" }, { "name": "Jupyter Notebook", "bytes": "1940883" }, { "name": "LLVM", "bytes": "6536" }, { "name": "Makefile", "bytes": "48231" }, { "name": "Objective-C", "bytes": "12456" }, { "name": "Objective-C++", "bytes": "94385" }, { "name": "PHP", "bytes": "2140" }, { "name": "Perl", "bytes": "6179" }, { "name": "Perl 6", "bytes": "1357" }, { "name": "PureBasic", "bytes": "25356" }, { "name": "Python", "bytes": "33704964" }, { "name": "Ruby", "bytes": "533" }, { "name": "Shell", "bytes": "426212" } ], "symlink_target": "" }
def find_second(search, target): return search.find(target, search.find(target) + 1) danton = "De l'audace, encore de l'audace, toujours de l'audace" print find_second(danton, 'audace') #>>> 25 twister = "she sells seashells by the seashore" print find_second(twister,'she') #>>> 13
{ "content_hash": "c6abad1ee3609f05fb4fd8cb1dc2cae0", "timestamp": "", "source": "github", "line_count": 10, "max_line_length": 64, "avg_line_length": 28.8, "alnum_prop": 0.7083333333333334, "repo_name": "mi1980/projecthadoop3", "id": "f3c012468c62210abd15a8f95ed5a9b4cdc47ee0", "size": "523", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "udacity/cs101-intro-cs/code/lesson2/find_second.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "2692" }, { "name": "HTML", "bytes": "8127" }, { "name": "JavaScript", "bytes": "276720" }, { "name": "Python", "bytes": "151460" } ], "symlink_target": "" }
""" This module implement a storage adapter interface. """ class ImagineAdapterInterface(object): """ Storage adapter interface """ def get_item(self, path): """ Get resource item :param path: string :return: PIL.Image """ raise NotImplementedError() def create_cached_item(self, path, content): """ Create cached resource item :param path: string :param content: Image :return: str """ raise NotImplementedError() def get_cached_item(self, path): """ Get cached resource item :param path: string :return: PIL.Image """ raise NotImplementedError() def check_cached_item(self, path): """ Check for cached resource item exists :param path: string :return: bool """ raise NotImplementedError() def remove_cached_item(self, path): """ Remove cached resource item :param path: string :return: bool """ raise NotImplementedError()
{ "content_hash": "f8face489f03ad63a3922ac981ea4db4", "timestamp": "", "source": "github", "line_count": 49, "max_line_length": 50, "avg_line_length": 22.6734693877551, "alnum_prop": 0.5526552655265526, "repo_name": "FlaskGuys/Flask-Imagine", "id": "4a93303f86bf4c0c472f16aee396c0e3cccfeea8", "size": "1111", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "flask_imagine/adapters/interface.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "120700" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import unicode_literals import itertools import os from ..variants import revcomp try: from pyfaidx import Genome as SequenceFileDB # Allow pyflakes to ignore redefinition in except clause. SequenceFileDB except ImportError: SequenceFileDB = None class MockGenomeError(Exception): pass class MockSequence(object): def __init__(self, sequence): self.sequence = sequence def __neg__(self): """Return reverse complement sequence.""" return MockSequence(revcomp(self.sequence)) def __str__(self): return self.sequence def __repr__(self): return 'MockSequence("%s")' % self.sequence class MockChromosome(object): def __init__(self, name, genome=None): self.name = name self.genome = genome def __getitem__(self, n): """Return sequence from region [start, end) Coordinates are 0-based, end-exclusive.""" if isinstance(n, slice): return self.genome.get_seq(self.name, n.start, n.stop) else: return self.genome.get_seq(self.name, n, n+1) def __repr__(self): return 'MockChromosome("%s")' % (self.name) class MockGenome(object): def __init__(self, lookup=None, filename=None, db_filename=None, default_seq=None): """ A mock genome object that provides a pygr compatible interface. lookup: a list of ((chrom, start, end), seq) values that define a lookup table for genome sequence requests. filename: a stream or filename containing a lookup table. db_filename: a fasta file to use for genome sequence requests. All requests are recorded and can be writen to a lookup table file using the `write` method. default_seq: if given, this base will always be returned if region is unavailable. """ self._chroms = {} self._lookup = lookup if lookup is not None else {} self._genome = None self._default_seq = default_seq if db_filename: # Use a real genome database. if SequenceFileDB is None: raise ValueError('pygr is not available.') self._genome = SequenceFileDB(db_filename) elif filename: # Read genome sequence from lookup table. self.read(filename) def __contains__(self, chrom): """Return True if genome contains chromosome.""" return chrom in (self._genome or self._chroms) def __getitem__(self, chrom): """Return a chromosome by its name.""" if chrom not in self._chroms: self._chroms[chrom] = MockChromosome(chrom, self) return self._chroms[chrom] def get_seq(self, chrom, start, end): """Return a sequence by chromosome name and region [start, end). Coordinates are 0-based, end-exclusive. """ if self._genome: # Get sequence from real genome object and save result. seq = self._genome[chrom][start:end] self._lookup[(chrom, start, end)] = str(seq) return seq else: # Use lookup table to fetch genome sequence. try: return MockSequence(self._lookup[(chrom, start, end)]) except KeyError: if self._default_seq: # Generate default sequence. return ''.join(itertools.islice( itertools.cycle(self._default_seq), None, end - start)) else: raise MockGenomeError( 'Sequence not in test data: %s:%d-%d' % (chrom, start, end)) def read(self, filename): """Read a sequence lookup table from a file. filename: a filename string or file stream. """ if hasattr(filename, 'read'): infile = filename else: with open(filename) as infile: return self.read(infile) for line in infile: tokens = line.rstrip().split('\t') chrom, start, end, seq = tokens self._lookup[(chrom, int(start), int(end))] = seq if chrom not in self._lookup: self._chroms[chrom] = MockChromosome(chrom, self) def write(self, filename): """Write a sequence lookup table to file.""" if hasattr(filename, 'write'): out = filename else: with open(filename, 'w') as out: return self.write(out) for (chrom, start, end), seq in self._lookup.items(): out.write('\t'.join(map(str, [chrom, start, end, seq])) + '\n') class MockGenomeTestFile(MockGenome): def __init__(self, lookup=None, filename=None, db_filename=None, default_seq=None, create_data=False): if not create_data: db_filename = None super(MockGenomeTestFile, self).__init__( lookup=lookup, db_filename=db_filename, filename=filename, default_seq=default_seq) self._filename = filename self._create_data = (db_filename is not None) if self._create_data and os.path.exists(filename): # Clear output file when creating data. os.remove(filename) def get_seq(self, chrom, start, end): seq = super(MockGenomeTestFile, self).get_seq(chrom, start, end) # Save each query in append mode. if self._create_data: with open(self._filename, 'a') as out: out.write('\t'.join(map(str, [chrom, start, end, seq])) + '\n') return seq
{ "content_hash": "0ab0a09fea881a0d78dcc8affc7e327c", "timestamp": "", "source": "github", "line_count": 173, "max_line_length": 79, "avg_line_length": 33.30057803468208, "alnum_prop": 0.5717757333796216, "repo_name": "counsyl/hgvs", "id": "5c53da6ad220cbf5f19935388e6880aac31e23c5", "size": "5761", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pyhgvs/tests/genome.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "932" }, { "name": "Python", "bytes": "115341" } ], "symlink_target": "" }
from flask.ext import wtf import auth import flask import model import wtforms import util from main import app ############################################################################### # Create ############################################################################### class PayUpdateForm(wtf.Form): name = wtforms.StringField('Name', [wtforms.validators.required()]) date_for = wtforms.DateField('Date For', [wtforms.validators.required()]) date_paid = wtforms.DateField('Date Paid', [wtforms.validators.required()]) code = wtforms.StringField('Code', [wtforms.validators.required()]) amount = wtforms.FloatField('Amount', [wtforms.validators.required()]) add_more = wtforms.BooleanField('Add more', [wtforms.validators.optional()], default=True) @app.route('/pay/<int:pay_id>/', methods=['GET', 'POST']) @app.route('/pay/create/', methods=['GET', 'POST']) @auth.login_required def pay_update(pay_id=0): if pay_id: pay_db = model.Pay.get_by_id(pay_id, parent=auth.current_user_key()) else: pay_db = model.Pay(parent=auth.current_user_key()) if not pay_db: flask.abort(404) form = PayUpdateForm(obj=pay_db) if form.validate_on_submit(): form.populate_obj(pay_db) pay_db.put() if form.add_more.data: return flask.redirect(flask.url_for('pay_update')) return flask.redirect(flask.url_for('pay_list')) return flask.render_template( 'pay/pay_update.html', html_class='pay-update', title=pay_db.name or 'Create Pay', form=form, pay_db=pay_db, ) ############################################################################### # List ############################################################################### @app.route('/pay/') @auth.login_required def pay_list(): pay_dbs, pay_cursor = auth.current_user_db().get_pay_dbs() return flask.render_template( 'pay/pay_list.html', html_class='pay-list', title='Pay List', pay_dbs=pay_dbs, next_url=util.generate_next_url(pay_cursor), ) ############################################################################### # Admin Pay List ############################################################################### @app.route('/admin/pay/') @auth.admin_required def admin_pay_list(): pay_dbs, pay_cursor = model.Pay.get_dbs() return flask.render_template( 'admin/pay_list.html', html_class='admin-pay-list', title='Pay List', pay_dbs=pay_dbs, next_url=util.generate_next_url(pay_cursor), )
{ "content_hash": "9daac556b089bbc689589758cc8ec5ac", "timestamp": "", "source": "github", "line_count": 81, "max_line_length": 92, "avg_line_length": 31.14814814814815, "alnum_prop": 0.5378517637732858, "repo_name": "georgekis/salary", "id": "d551ac674cb1f90991d221374300b9e61d0ff35d", "size": "2523", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "main/control/pay.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "5786" }, { "name": "CoffeeScript", "bytes": "9306" }, { "name": "HTML", "bytes": "72096" }, { "name": "JavaScript", "bytes": "63" }, { "name": "Python", "bytes": "115973" } ], "symlink_target": "" }
import base64 import datetime import sys import time import unittest import xmlrpclib import SimpleXMLRPCServer import mimetools import httplib import socket import StringIO import os import re from test import test_support try: import threading except ImportError: threading = None try: unicode except NameError: have_unicode = False else: have_unicode = True alist = [{'astring': 'foo@bar.baz.spam', 'afloat': 7283.43, 'anint': 2**20, 'ashortlong': 2L, 'anotherlist': ['.zyx.41'], 'abase64': xmlrpclib.Binary("my dog has fleas"), 'boolean': xmlrpclib.False, 'unicode': u'\u4000\u6000\u8000', u'ukey\u4000': 'regular value', 'datetime1': xmlrpclib.DateTime('20050210T11:41:23'), 'datetime2': xmlrpclib.DateTime( (2005, 02, 10, 11, 41, 23, 0, 1, -1)), 'datetime3': xmlrpclib.DateTime( datetime.datetime(2005, 02, 10, 11, 41, 23)), }] class XMLRPCTestCase(unittest.TestCase): def test_dump_load(self): self.assertEqual(alist, xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0]) def test_dump_bare_datetime(self): # This checks that an unwrapped datetime.date object can be handled # by the marshalling code. This can't be done via test_dump_load() # since with use_datetime set to 1 the unmarshaller would create # datetime objects for the 'datetime[123]' keys as well dt = datetime.datetime(2005, 02, 10, 11, 41, 23) s = xmlrpclib.dumps((dt,)) (newdt,), m = xmlrpclib.loads(s, use_datetime=1) self.assertEqual(newdt, dt) self.assertEqual(m, None) (newdt,), m = xmlrpclib.loads(s, use_datetime=0) self.assertEqual(newdt, xmlrpclib.DateTime('20050210T11:41:23')) def test_datetime_before_1900(self): # same as before but with a date before 1900 dt = datetime.datetime(1, 02, 10, 11, 41, 23) s = xmlrpclib.dumps((dt,)) (newdt,), m = xmlrpclib.loads(s, use_datetime=1) self.assertEqual(newdt, dt) self.assertEqual(m, None) (newdt,), m = xmlrpclib.loads(s, use_datetime=0) self.assertEqual(newdt, xmlrpclib.DateTime('00010210T11:41:23')) def test_cmp_datetime_DateTime(self): now = datetime.datetime.now() dt = xmlrpclib.DateTime(now.timetuple()) self.assertTrue(dt == now) self.assertTrue(now == dt) then = now + datetime.timedelta(seconds=4) self.assertTrue(then >= dt) self.assertTrue(dt < then) def test_bug_1164912 (self): d = xmlrpclib.DateTime() ((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,), methodresponse=True)) self.assertIsInstance(new_d.value, str) # Check that the output of dumps() is still an 8-bit string s = xmlrpclib.dumps((new_d,), methodresponse=True) self.assertIsInstance(s, str) def test_newstyle_class(self): class T(object): pass t = T() t.x = 100 t.y = "Hello" ((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,))) self.assertEqual(t2, t.__dict__) def test_dump_big_long(self): self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,)) def test_dump_bad_dict(self): self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},)) def test_dump_recursive_seq(self): l = [1,2,3] t = [3,4,5,l] l.append(t) self.assertRaises(TypeError, xmlrpclib.dumps, (l,)) def test_dump_recursive_dict(self): d = {'1':1, '2':1} t = {'3':3, 'd':d} d['t'] = t self.assertRaises(TypeError, xmlrpclib.dumps, (d,)) def test_dump_big_int(self): if sys.maxint > 2L**31-1: self.assertRaises(OverflowError, xmlrpclib.dumps, (int(2L**34),)) xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT)) self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,)) self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,)) def dummy_write(s): pass m = xmlrpclib.Marshaller() m.dump_int(xmlrpclib.MAXINT, dummy_write) m.dump_int(xmlrpclib.MININT, dummy_write) self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write) self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write) def test_dump_none(self): value = alist + [None] arg1 = (alist + [None],) strg = xmlrpclib.dumps(arg1, allow_none=True) self.assertEqual(value, xmlrpclib.loads(strg)[0][0]) self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,)) def test_default_encoding_issues(self): # SF bug #1115989: wrong decoding in '_stringify' utf8 = """<?xml version='1.0' encoding='iso-8859-1'?> <params> <param><value> <string>abc \x95</string> </value></param> <param><value> <struct> <member> <name>def \x96</name> <value><string>ghi \x97</string></value> </member> </struct> </value></param> </params> """ # sys.setdefaultencoding() normally doesn't exist after site.py is # loaded. Import a temporary fresh copy to get access to it # but then restore the original copy to avoid messing with # other potentially modified sys module attributes old_encoding = sys.getdefaultencoding() with test_support.CleanImport('sys'): import sys as temp_sys temp_sys.setdefaultencoding("iso-8859-1") try: (s, d), m = xmlrpclib.loads(utf8) finally: temp_sys.setdefaultencoding(old_encoding) items = d.items() if have_unicode: self.assertEqual(s, u"abc \x95") self.assertIsInstance(s, unicode) self.assertEqual(items, [(u"def \x96", u"ghi \x97")]) self.assertIsInstance(items[0][0], unicode) self.assertIsInstance(items[0][1], unicode) else: self.assertEqual(s, "abc \xc2\x95") self.assertEqual(items, [("def \xc2\x96", "ghi \xc2\x97")]) class HelperTestCase(unittest.TestCase): def test_escape(self): self.assertEqual(xmlrpclib.escape("a&b"), "a&amp;b") self.assertEqual(xmlrpclib.escape("a<b"), "a&lt;b") self.assertEqual(xmlrpclib.escape("a>b"), "a&gt;b") class FaultTestCase(unittest.TestCase): def test_repr(self): f = xmlrpclib.Fault(42, 'Test Fault') self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>") self.assertEqual(repr(f), str(f)) def test_dump_fault(self): f = xmlrpclib.Fault(42, 'Test Fault') s = xmlrpclib.dumps((f,)) (newf,), m = xmlrpclib.loads(s) self.assertEqual(newf, {'faultCode': 42, 'faultString': 'Test Fault'}) self.assertEqual(m, None) s = xmlrpclib.Marshaller().dumps(f) self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s) class DateTimeTestCase(unittest.TestCase): def test_default(self): t = xmlrpclib.DateTime() def test_time(self): d = 1181399930.036952 t = xmlrpclib.DateTime(d) self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d))) def test_time_tuple(self): d = (2007,6,9,10,38,50,5,160,0) t = xmlrpclib.DateTime(d) self.assertEqual(str(t), '20070609T10:38:50') def test_time_struct(self): d = time.localtime(1181399930.036952) t = xmlrpclib.DateTime(d) self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d)) def test_datetime_datetime(self): d = datetime.datetime(2007,1,2,3,4,5) t = xmlrpclib.DateTime(d) self.assertEqual(str(t), '20070102T03:04:05') def test_repr(self): d = datetime.datetime(2007,1,2,3,4,5) t = xmlrpclib.DateTime(d) val ="<DateTime '20070102T03:04:05' at %x>" % id(t) self.assertEqual(repr(t), val) def test_decode(self): d = ' 20070908T07:11:13 ' t1 = xmlrpclib.DateTime() t1.decode(d) tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13)) self.assertEqual(t1, tref) t2 = xmlrpclib._datetime(d) self.assertEqual(t1, tref) class BinaryTestCase(unittest.TestCase): def test_default(self): t = xmlrpclib.Binary() self.assertEqual(str(t), '') def test_string(self): d = '\x01\x02\x03abc123\xff\xfe' t = xmlrpclib.Binary(d) self.assertEqual(str(t), d) def test_decode(self): d = '\x01\x02\x03abc123\xff\xfe' de = base64.encodestring(d) t1 = xmlrpclib.Binary() t1.decode(de) self.assertEqual(str(t1), d) t2 = xmlrpclib._binary(de) self.assertEqual(str(t2), d) ADDR = PORT = URL = None # The evt is set twice. First when the server is ready to serve. # Second when the server has been shutdown. The user must clear # the event after it has been set the first time to catch the second set. def http_server(evt, numrequests, requestHandler=None): class TestInstanceClass: def div(self, x, y): return x // y def _methodHelp(self, name): if name == 'div': return 'This is the div function' def my_function(): '''This is my function''' return True class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer): def get_request(self): # Ensure the socket is always non-blocking. On Linux, socket # attributes are not inherited like they are on *BSD and Windows. s, port = self.socket.accept() s.setblocking(True) return s, port if not requestHandler: requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler serv = MyXMLRPCServer(("localhost", 0), requestHandler, logRequests=False, bind_and_activate=False) try: serv.socket.settimeout(3) serv.server_bind() global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() #trying to connect to to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) serv.server_activate() serv.register_introspection_functions() serv.register_multicall_functions() serv.register_function(pow) serv.register_function(lambda x,y: x+y, 'add') serv.register_function(my_function) serv.register_instance(TestInstanceClass()) evt.set() # handle up to 'numrequests' requests while numrequests > 0: serv.handle_request() numrequests -= 1 except socket.timeout: pass finally: serv.socket.close() PORT = None evt.set() def http_multi_server(evt, numrequests, requestHandler=None): class TestInstanceClass: def div(self, x, y): return x // y def _methodHelp(self, name): if name == 'div': return 'This is the div function' def my_function(): '''This is my function''' return True class MyXMLRPCServer(SimpleXMLRPCServer.MultiPathXMLRPCServer): def get_request(self): # Ensure the socket is always non-blocking. On Linux, socket # attributes are not inherited like they are on *BSD and Windows. s, port = self.socket.accept() s.setblocking(True) return s, port if not requestHandler: requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler class MyRequestHandler(requestHandler): rpc_paths = [] serv = MyXMLRPCServer(("localhost", 0), MyRequestHandler, logRequests=False, bind_and_activate=False) serv.socket.settimeout(3) serv.server_bind() try: global ADDR, PORT, URL ADDR, PORT = serv.socket.getsockname() #connect to IP address directly. This avoids socket.create_connection() #trying to connect to to "localhost" using all address families, which #causes slowdown e.g. on vista which supports AF_INET6. The server listens #on AF_INET only. URL = "http://%s:%d"%(ADDR, PORT) serv.server_activate() paths = ["/foo", "/foo/bar"] for path in paths: d = serv.add_dispatcher(path, SimpleXMLRPCServer.SimpleXMLRPCDispatcher()) d.register_introspection_functions() d.register_multicall_functions() serv.get_dispatcher(paths[0]).register_function(pow) serv.get_dispatcher(paths[1]).register_function(lambda x,y: x+y, 'add') evt.set() # handle up to 'numrequests' requests while numrequests > 0: serv.handle_request() numrequests -= 1 except socket.timeout: pass finally: serv.socket.close() PORT = None evt.set() # This function prevents errors like: # <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error> def is_unavailable_exception(e): '''Returns True if the given ProtocolError is the product of a server-side exception caused by the 'temporarily unavailable' response sometimes given by operations on non-blocking sockets.''' # sometimes we get a -1 error code and/or empty headers try: if e.errcode == -1 or e.headers is None: return True exc_mess = e.headers.get('X-exception') except AttributeError: # Ignore socket.errors here. exc_mess = str(e) if exc_mess and 'temporarily unavailable' in exc_mess.lower(): return True return False @unittest.skipUnless(threading, 'Threading required for this test.') class BaseServerTestCase(unittest.TestCase): requestHandler = None request_count = 1 threadFunc = staticmethod(http_server) def setUp(self): # enable traceback reporting SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True self.evt = threading.Event() # start server thread to handle requests serv_args = (self.evt, self.request_count, self.requestHandler) threading.Thread(target=self.threadFunc, args=serv_args).start() # wait for the server to be ready self.evt.wait(10) self.evt.clear() def tearDown(self): # wait on the server thread to terminate self.evt.wait(10) # disable traceback reporting SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False # NOTE: The tests in SimpleServerTestCase will ignore failures caused by # "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This # condition occurs infrequently on some platforms, frequently on others, and # is apparently caused by using SimpleXMLRPCServer with a non-blocking socket # If the server class is updated at some point in the future to handle this # situation more gracefully, these tests should be modified appropriately. class SimpleServerTestCase(BaseServerTestCase): def test_simple1(self): try: p = xmlrpclib.ServerProxy(URL) self.assertEqual(p.pow(6,8), 6**8) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_nonascii(self): start_string = 'P\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}t' end_string = 'h\N{LATIN SMALL LETTER O WITH HORN}n' try: p = xmlrpclib.ServerProxy(URL) self.assertEqual(p.add(start_string, end_string), start_string + end_string) except (xmlrpclib.ProtocolError, socket.error) as e: # ignore failures due to non-blocking socket unavailable errors. if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) # [ch] The test 404 is causing lots of false alarms. def XXXtest_404(self): # send POST with httplib, it should return 404 header and # 'Not Found' message. conn = httplib.HTTPConnection(ADDR, PORT) conn.request('POST', '/this-is-not-valid') response = conn.getresponse() conn.close() self.assertEqual(response.status, 404) self.assertEqual(response.reason, 'Not Found') def test_introspection1(self): try: p = xmlrpclib.ServerProxy(URL) meth = p.system.listMethods() expected_methods = set(['pow', 'div', 'my_function', 'add', 'system.listMethods', 'system.methodHelp', 'system.methodSignature', 'system.multicall']) self.assertEqual(set(meth), expected_methods) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_introspection2(self): try: # test _methodHelp() p = xmlrpclib.ServerProxy(URL) divhelp = p.system.methodHelp('div') self.assertEqual(divhelp, 'This is the div function') except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) @unittest.skipIf(sys.flags.optimize >= 2, "Docstrings are omitted with -O2 and above") def test_introspection3(self): try: # test native doc p = xmlrpclib.ServerProxy(URL) myfunction = p.system.methodHelp('my_function') self.assertEqual(myfunction, 'This is my function') except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_introspection4(self): # the SimpleXMLRPCServer doesn't support signatures, but # at least check that we can try making the call try: p = xmlrpclib.ServerProxy(URL) divsig = p.system.methodSignature('div') self.assertEqual(divsig, 'signatures not supported') except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_multicall(self): try: p = xmlrpclib.ServerProxy(URL) multicall = xmlrpclib.MultiCall(p) multicall.add(2,3) multicall.pow(6,8) multicall.div(127,42) add_result, pow_result, div_result = multicall() self.assertEqual(add_result, 2+3) self.assertEqual(pow_result, 6**8) self.assertEqual(div_result, 127//42) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_non_existing_multicall(self): try: p = xmlrpclib.ServerProxy(URL) multicall = xmlrpclib.MultiCall(p) multicall.this_is_not_exists() result = multicall() # result.results contains; # [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:' # 'method "this_is_not_exists" is not supported'>}] self.assertEqual(result.results[0]['faultCode'], 1) self.assertEqual(result.results[0]['faultString'], '<type \'exceptions.Exception\'>:method "this_is_not_exists" ' 'is not supported') except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_dotted_attribute(self): # Raises an AttributeError because private methods are not allowed. self.assertRaises(AttributeError, SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add') self.assertTrue(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title')) # Get the test to run faster by sending a request with test_simple1. # This avoids waiting for the socket timeout. self.test_simple1() class MultiPathServerTestCase(BaseServerTestCase): threadFunc = staticmethod(http_multi_server) request_count = 2 def test_path1(self): p = xmlrpclib.ServerProxy(URL+"/foo") self.assertEqual(p.pow(6,8), 6**8) self.assertRaises(xmlrpclib.Fault, p.add, 6, 8) def test_path2(self): p = xmlrpclib.ServerProxy(URL+"/foo/bar") self.assertEqual(p.add(6,8), 6+8) self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8) #A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism #does indeed serve subsequent requests on the same connection class BaseKeepaliveServerTestCase(BaseServerTestCase): #a request handler that supports keep-alive and logs requests into a #class variable class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler protocol_version = 'HTTP/1.1' myRequests = [] def handle(self): self.myRequests.append([]) self.reqidx = len(self.myRequests)-1 return self.parentClass.handle(self) def handle_one_request(self): result = self.parentClass.handle_one_request(self) self.myRequests[self.reqidx].append(self.raw_requestline) return result requestHandler = RequestHandler def setUp(self): #clear request log self.RequestHandler.myRequests = [] return BaseServerTestCase.setUp(self) #A test case that verifies that a server using the HTTP/1.1 keep-alive mechanism #does indeed serve subsequent requests on the same connection class KeepaliveServerTestCase1(BaseKeepaliveServerTestCase): def test_two(self): p = xmlrpclib.ServerProxy(URL) #do three requests. self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) #they should have all been handled by a single request handler self.assertEqual(len(self.RequestHandler.myRequests), 1) #check that we did at least two (the third may be pending append #due to thread scheduling) self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2) #test special attribute access on the serverproxy, through the __call__ #function. class KeepaliveServerTestCase2(BaseKeepaliveServerTestCase): #ask for two keepalive requests to be handled. request_count=2 def test_close(self): p = xmlrpclib.ServerProxy(URL) #do some requests with close. self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) p("close")() #this should trigger a new keep-alive request self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(p.pow(6,8), 6**8) #they should have all been two request handlers, each having logged at least #two complete requests self.assertEqual(len(self.RequestHandler.myRequests), 2) self.assertGreaterEqual(len(self.RequestHandler.myRequests[-1]), 2) self.assertGreaterEqual(len(self.RequestHandler.myRequests[-2]), 2) def test_transport(self): p = xmlrpclib.ServerProxy(URL) #do some requests with close. self.assertEqual(p.pow(6,8), 6**8) p("transport").close() #same as above, really. self.assertEqual(p.pow(6,8), 6**8) self.assertEqual(len(self.RequestHandler.myRequests), 2) #A test case that verifies that gzip encoding works in both directions #(for a request and the response) class GzipServerTestCase(BaseServerTestCase): #a request handler that supports keep-alive and logs requests into a #class variable class RequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): parentClass = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler protocol_version = 'HTTP/1.1' def do_POST(self): #store content of last request in class self.__class__.content_length = int(self.headers["content-length"]) return self.parentClass.do_POST(self) requestHandler = RequestHandler class Transport(xmlrpclib.Transport): #custom transport, stores the response length for our perusal fake_gzip = False def parse_response(self, response): self.response_length=int(response.getheader("content-length", 0)) return xmlrpclib.Transport.parse_response(self, response) def send_content(self, connection, body): if self.fake_gzip: #add a lone gzip header to induce decode error remotely connection.putheader("Content-Encoding", "gzip") return xmlrpclib.Transport.send_content(self, connection, body) def setUp(self): BaseServerTestCase.setUp(self) def test_gzip_request(self): t = self.Transport() t.encode_threshold = None p = xmlrpclib.ServerProxy(URL, transport=t) self.assertEqual(p.pow(6,8), 6**8) a = self.RequestHandler.content_length t.encode_threshold = 0 #turn on request encoding self.assertEqual(p.pow(6,8), 6**8) b = self.RequestHandler.content_length self.assertTrue(a>b) def test_bad_gzip_request(self): t = self.Transport() t.encode_threshold = None t.fake_gzip = True p = xmlrpclib.ServerProxy(URL, transport=t) cm = self.assertRaisesRegexp(xmlrpclib.ProtocolError, re.compile(r"\b400\b")) with cm: p.pow(6, 8) def test_gsip_response(self): t = self.Transport() p = xmlrpclib.ServerProxy(URL, transport=t) old = self.requestHandler.encode_threshold self.requestHandler.encode_threshold = None #no encoding self.assertEqual(p.pow(6,8), 6**8) a = t.response_length self.requestHandler.encode_threshold = 0 #always encode self.assertEqual(p.pow(6,8), 6**8) b = t.response_length self.requestHandler.encode_threshold = old self.assertTrue(a>b) #Test special attributes of the ServerProxy object class ServerProxyTestCase(unittest.TestCase): def setUp(self): unittest.TestCase.setUp(self) if threading: self.url = URL else: # Without threading, http_server() and http_multi_server() will not # be executed and URL is still equal to None. 'http://' is a just # enough to choose the scheme (HTTP) self.url = 'http://' def test_close(self): p = xmlrpclib.ServerProxy(self.url) self.assertEqual(p('close')(), None) def test_transport(self): t = xmlrpclib.Transport() p = xmlrpclib.ServerProxy(self.url, transport=t) self.assertEqual(p('transport'), t) # This is a contrived way to make a failure occur on the server side # in order to test the _send_traceback_header flag on the server class FailingMessageClass(mimetools.Message): def __getitem__(self, key): key = key.lower() if key == 'content-length': return 'I am broken' return mimetools.Message.__getitem__(self, key) @unittest.skipUnless(threading, 'Threading required for this test.') class FailingServerTestCase(unittest.TestCase): def setUp(self): self.evt = threading.Event() # start server thread to handle requests serv_args = (self.evt, 1) threading.Thread(target=http_server, args=serv_args).start() # wait for the server to be ready self.evt.wait() self.evt.clear() def tearDown(self): # wait on the server thread to terminate self.evt.wait() # reset flag SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False # reset message class SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message def test_basic(self): # check that flag is false by default flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header self.assertEqual(flagval, False) # enable traceback reporting SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True # test a call that shouldn't fail just as a smoke test try: p = xmlrpclib.ServerProxy(URL) self.assertEqual(p.pow(6,8), 6**8) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e): # protocol error; provide additional information in test output self.fail("%s\n%s" % (e, getattr(e, "headers", ""))) def test_fail_no_info(self): # use the broken message class SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass try: p = xmlrpclib.ServerProxy(URL) p.pow(6,8) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e) and hasattr(e, "headers"): # The two server-side error headers shouldn't be sent back in this case self.assertTrue(e.headers.get("X-exception") is None) self.assertTrue(e.headers.get("X-traceback") is None) else: self.fail('ProtocolError not raised') def test_fail_with_info(self): # use the broken message class SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass # Check that errors in the server send back exception/traceback # info when flag is set SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True try: p = xmlrpclib.ServerProxy(URL) p.pow(6,8) except (xmlrpclib.ProtocolError, socket.error), e: # ignore failures due to non-blocking socket 'unavailable' errors if not is_unavailable_exception(e) and hasattr(e, "headers"): # We should get error info in the response expected_err = "invalid literal for int() with base 10: 'I am broken'" self.assertEqual(e.headers.get("x-exception"), expected_err) self.assertTrue(e.headers.get("x-traceback") is not None) else: self.fail('ProtocolError not raised') class CGIHandlerTestCase(unittest.TestCase): def setUp(self): self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler() def tearDown(self): self.cgi = None def test_cgi_get(self): with test_support.EnvironmentVarGuard() as env: env['REQUEST_METHOD'] = 'GET' # if the method is GET and no request_text is given, it runs handle_get # get sysout output with test_support.captured_stdout() as data_out: self.cgi.handle_request() # parse Status header data_out.seek(0) handle = data_out.read() status = handle.split()[1] message = ' '.join(handle.split()[2:4]) self.assertEqual(status, '400') self.assertEqual(message, 'Bad Request') def test_cgi_xmlrpc_response(self): data = """<?xml version='1.0'?> <methodCall> <methodName>test_method</methodName> <params> <param> <value><string>foo</string></value> </param> <param> <value><string>bar</string></value> </param> </params> </methodCall> """ with test_support.EnvironmentVarGuard() as env, \ test_support.captured_stdout() as data_out, \ test_support.captured_stdin() as data_in: data_in.write(data) data_in.seek(0) env['CONTENT_LENGTH'] = str(len(data)) self.cgi.handle_request() data_out.seek(0) # will respond exception, if so, our goal is achieved ;) handle = data_out.read() # start with 44th char so as not to get http header, we just need only xml self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:]) # Also test the content-length returned by handle_request # Using the same test method inorder to avoid all the datapassing # boilerplate code. # Test for bug: http://bugs.python.org/issue5040 content = handle[handle.find("<?xml"):] self.assertEqual( int(re.search('Content-Length: (\d+)', handle).group(1)), len(content)) class FakeSocket: def __init__(self): self.data = StringIO.StringIO() def send(self, buf): self.data.write(buf) return len(buf) def sendall(self, buf): self.data.write(buf) def getvalue(self): return self.data.getvalue() def makefile(self, x='r', y=-1): raise RuntimeError def close(self): pass class FakeTransport(xmlrpclib.Transport): """A Transport instance that records instead of sending a request. This class replaces the actual socket used by httplib with a FakeSocket object that records the request. It doesn't provide a response. """ def make_connection(self, host): conn = xmlrpclib.Transport.make_connection(self, host) conn.sock = self.fake_socket = FakeSocket() return conn class TransportSubclassTestCase(unittest.TestCase): def issue_request(self, transport_class): """Return an HTTP request made via transport_class.""" transport = transport_class() proxy = xmlrpclib.ServerProxy("http://example.com/", transport=transport) try: proxy.pow(6, 8) except RuntimeError: return transport.fake_socket.getvalue() return None def test_custom_user_agent(self): class TestTransport(FakeTransport): def send_user_agent(self, conn): xmlrpclib.Transport.send_user_agent(self, conn) conn.putheader("X-Test", "test_custom_user_agent") req = self.issue_request(TestTransport) self.assertIn("X-Test: test_custom_user_agent\r\n", req) def test_send_host(self): class TestTransport(FakeTransport): def send_host(self, conn, host): xmlrpclib.Transport.send_host(self, conn, host) conn.putheader("X-Test", "test_send_host") req = self.issue_request(TestTransport) self.assertIn("X-Test: test_send_host\r\n", req) def test_send_request(self): class TestTransport(FakeTransport): def send_request(self, conn, url, body): xmlrpclib.Transport.send_request(self, conn, url, body) conn.putheader("X-Test", "test_send_request") req = self.issue_request(TestTransport) self.assertIn("X-Test: test_send_request\r\n", req) def test_send_content(self): class TestTransport(FakeTransport): def send_content(self, conn, body): conn.putheader("X-Test", "test_send_content") xmlrpclib.Transport.send_content(self, conn, body) req = self.issue_request(TestTransport) self.assertIn("X-Test: test_send_content\r\n", req) @test_support.reap_threads def test_main(): xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase, BinaryTestCase, FaultTestCase, TransportSubclassTestCase] xmlrpc_tests.append(SimpleServerTestCase) xmlrpc_tests.append(KeepaliveServerTestCase1) xmlrpc_tests.append(KeepaliveServerTestCase2) try: import gzip xmlrpc_tests.append(GzipServerTestCase) except ImportError: pass #gzip not supported in this build xmlrpc_tests.append(MultiPathServerTestCase) xmlrpc_tests.append(ServerProxyTestCase) xmlrpc_tests.append(FailingServerTestCase) xmlrpc_tests.append(CGIHandlerTestCase) test_support.run_unittest(*xmlrpc_tests) if __name__ == "__main__": test_main()
{ "content_hash": "036f2795c5513d49fe0259a3c8c84f1f", "timestamp": "", "source": "github", "line_count": 1018, "max_line_length": 88, "avg_line_length": 38.95383104125737, "alnum_prop": 0.59750346740638, "repo_name": "MattDevo/edk2", "id": "5e3bc1f94d11531c3b4951fb31d3c08000a8f0c8", "size": "39655", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_xmlrpc.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Assembly", "bytes": "4545237" }, { "name": "Batchfile", "bytes": "93042" }, { "name": "C", "bytes": "94289702" }, { "name": "C++", "bytes": "20170310" }, { "name": "CSS", "bytes": "1905" }, { "name": "DIGITAL Command Language", "bytes": "13695" }, { "name": "GAP", "bytes": "698245" }, { "name": "GDB", "bytes": "96" }, { "name": "HTML", "bytes": "472114" }, { "name": "Lua", "bytes": "249" }, { "name": "Makefile", "bytes": "231845" }, { "name": "NSIS", "bytes": "2229" }, { "name": "Objective-C", "bytes": "4147834" }, { "name": "PHP", "bytes": "674" }, { "name": "PLSQL", "bytes": "24782" }, { "name": "Perl", "bytes": "6218" }, { "name": "Python", "bytes": "27130096" }, { "name": "R", "bytes": "21094" }, { "name": "Roff", "bytes": "28192" }, { "name": "Shell", "bytes": "104362" }, { "name": "SourcePawn", "bytes": "29427" }, { "name": "Visual Basic", "bytes": "494" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models import wagtail.images.models class Migration(migrations.Migration): dependencies = [ ('images', '0003_auto_20160405_1542'), ] operations = [ migrations.AlterField( model_name='attributedrendition', name='file', field=models.ImageField(height_field='height', width_field='width', upload_to=wagtail.images.models.get_rendition_upload_to), ), ]
{ "content_hash": "fedd23758cc867123a5fd9bd82d97c60", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 137, "avg_line_length": 26.526315789473685, "alnum_prop": 0.6488095238095238, "repo_name": "OpenCanada/website", "id": "d4c77b1a26c287554651e6a15191ae129fda0e2d", "size": "528", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "images/migrations/0004_auto_20160606_1603.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "231211" }, { "name": "HTML", "bytes": "400148" }, { "name": "JavaScript", "bytes": "61896" }, { "name": "Python", "bytes": "417101" }, { "name": "Shell", "bytes": "985" } ], "symlink_target": "" }
from abc import ABCMeta, abstractmethod class BaseClient: """This class plays a role as an interface defining methods for agent to communicate with teletraan service. """ __metaclass__ = ABCMeta @abstractmethod def send_reports(self, env_reports=None): """Args: env_reports: a dict with env name as key and DeployStatus as value. Returns: PingResponse describing next action for deploy agent. """ pass
{ "content_hash": "08c2f62142bac826e0bf123206239497", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 79, "avg_line_length": 25.94736842105263, "alnum_prop": 0.6348884381338742, "repo_name": "nickdechant/teletraan", "id": "39d6b77c6d6ca90da242dc851265cf40f9d92864", "size": "1079", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "deploy-agent/deployd/client/base_client.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "268204" }, { "name": "HTML", "bytes": "283383" }, { "name": "Java", "bytes": "984681" }, { "name": "JavaScript", "bytes": "2758442" }, { "name": "Makefile", "bytes": "184" }, { "name": "Python", "bytes": "643906" }, { "name": "Ruby", "bytes": "1001" }, { "name": "Shell", "bytes": "16702" } ], "symlink_target": "" }
from pyspark import SparkContext, SparkConf from pyspark.streaming import StreamingContext from pyspark.sql import SQLContext import time import socket def main(sc): # Initialize spark streaming context with a batch interval of 10 sec, # The messages would accumulate for 10 seconds and then get processed. ssc = StreamingContext(sc, batch_interval) # Receive the logs host = socket.gethostbyname(socket.gethostname()) # Create a DStream that represents streaming data from TCP source socket_stream = ssc.socketTextStream(host, 5555) lines = socket_stream.window(window_time) #lines.pprint() lines.foreachRDD(lambda x: print(x.collect())) #words = lines.flatMap(lambda line: line.split(" ")) #words.pprint() # Start the streaming process ssc.start() #time.sleep(window_time*5) process_cnt = 0 while process_cnt < process_times: time.sleep(window_time) process_cnt += 1 ssc.stop() if __name__=="__main__": # Define Spark configuration conf = SparkConf() conf.setMaster("local[4]") conf.setAppName("Stream Analysis") # Initialize a SparkContext sc = SparkContext(conf=conf) batch_interval = 10 window_time = 10 process_times = 1 # Compute the whole time for displaying total_time = batch_interval * process_times main(sc)
{ "content_hash": "d20645d68291552fd2959788cf4795a7", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 71, "avg_line_length": 22.45614035087719, "alnum_prop": 0.73359375, "repo_name": "xuwenyihust/Visor", "id": "12a37bba2da49b5e75362b079c8e3ba591cf7bab", "size": "1280", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/stream_monitor/stream_monitor.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "11533" }, { "name": "Python", "bytes": "39909" }, { "name": "Shell", "bytes": "526" } ], "symlink_target": "" }
""" GoRename is a Go gorename plugin for Sublime Text 3. It depends on the gorename tool being installed: go get -u golang.org/x/tools/cmd/gorename """ # TODO: review & clean import sublime, sublime_plugin, subprocess, time, re, os, subprocess, sys, time, hashlib DEBUG = False VERSION = '' use_golangconfig = False # holds renaming parameters renameMe = {} runningTool = False def log(*msg): print("GoRename:", msg[0:]) def debug(*msg): if DEBUG: print("GoRename [DEBUG]:", msg[0:]) def error(*msg): print("GoRename [ERROR]:", msg[0:]) def plugin_loaded(): global DEBUG global VERSION global use_golangconfig DEBUG = get_setting("gorename_debug", False) use_golangconfig = get_setting("gorename_use_golangconfig", False) # load shellenv def load_shellenv(): global shellenv from .dep import shellenv # try golangconfig if use_golangconfig: try: global golangconfig import golangconfig except: error("couldn't import golangconfig:", sys.exc_info()[0]) log("using shellenv instead of golangconfig") use_golangconfig = False load_shellenv() else: load_shellenv() log("debug:", DEBUG) log("use_golangconfig", use_golangconfig) # keep track of the version if possible (pretty nasty workaround, any other ideas ?) try: PluginPath = os.path.dirname(os.path.realpath(__file__)) p = subprocess.Popen(["git", "describe", "master", "--tags"], stdout=subprocess.PIPE, cwd=PluginPath) GITVERSION = p.communicate()[0].decode("utf-8").rstrip() if p.returncode != 0: debug("git return code", p.returncode) raise Exception("git return code", p.returncode) defsettings = os.path.join(PluginPath, 'Default.sublime-settings') f = open(defsettings,'r') filedata = f.read() f.close() newdata = filedata.replace(get_setting('gorename_version'), GITVERSION+'_') f = open(defsettings,'w') f.write(newdata) f.close() except: debug("couldn't get git tag:", sys.exc_info()[0]) # read version VERSION = sublime.load_settings('Default.sublime-settings').get('gorename_version') log("version:", VERSION) # check if user setting exists and creates it us = sublime.load_settings("GoRename.sublime-settings") if (not us.has('gorename_debug')): us.set('gorename_debug', DEBUG) sublime.save_settings("GoRename.sublime-settings") class GoRenameCommand(sublime_plugin.TextCommand): def __init__(self, view): self.view = view # ... def run(self, edit, simulate=False, force=False, verbose=False): try: current_selection = self.view.sel() region = current_selection[0] text = self.view.substr(sublime.Region(0, region.end())) cb_map = self.get_map(text) byte_end = cb_map[sorted(cb_map.keys())[-1]] byte_begin = None if not region.empty(): byte_begin = cb_map[region.begin()-1] else: byte_begin = byte_end except: sublime.error_message('GoRename:\nCouldn\'t get cursor positon, make sure that the Go source file is saved and the cursor is over the identifier (variable, function ...) you want to query.') error("couldn't cursor position: ", sys.exc_info()[0]) word = self.view.substr(self.view.word(region.begin())).rstrip() position = self.view.rowcol(region.begin()) line_number = position[0]+1 del position line_string = self.view.substr(self.view.line(region)) # TODO: improve preliminary identifier validation if len(word) == 0: self.view.show_popup('<b>Gorename</b>:<br/> Invalid identifier:\nno identifier here.') return message = 'Running GoRename %s:\nFrom %s to %s\n[Line Number: %s][Byte Offset: %s]\nFlags: %s\nReference:\n%s' global s, f, v, flags s = simulate f = force v = verbose flags = '' def compile_flags(only_enabled=False): # and construct flags argument compiled_flags_array = [] enabledTitle = 'ENABLED: ' if only_enabled: enabledTitle = '' global flags # reset flags = '' if s: compiled_flags_array.append(enabledTitle+'Simulate (-d)') flags = '-d ' elif not only_enabled: compiled_flags_array.append('DISABLED: Simulate (-d)') if f: compiled_flags_array.append(enabledTitle+'force (-force)') flags = flags + '-force ' elif not only_enabled: compiled_flags_array.append('DISABLED: force (-force)') if v: compiled_flags_array.append(enabledTitle+'verbose (-v)') flags = flags + '-v' elif not only_enabled: compiled_flags_array.append('DISABLED: verbose (-v)') return compiled_flags_array def rename_name_input(name): debug('flags:', flags) global renameMe renameMe['compiled_message'] = message % ('%s', word.replace('%', '%%'), name.replace('%', '%%'), line_number, byte_begin, str(compile_flags(True)).replace('%', '%%'), line_string.replace('%', '%%'), ) self.write_running(renameMe['compiled_message'] % ('[press ENTER to continue]'), True, True) renameMe['offset'] = byte_begin renameMe['name'] = name renameMe['flags'] = flags renameMe['file_path'] = self.view.file_name() renameMe['checksum'] = hashlib.sha256(open(renameMe['file_path'],'rb').read()).hexdigest() def popup_menu_callback(flag_opt): global s,f,v if flag_opt == 0: s = not s elif flag_opt == 1: f = not f elif flag_opt == 2: v = not v if flag_opt != -1: pop_menu() else: varName = '' if get_setting("gorename_autofill", False): varName = word self.view.window().show_input_panel('GoRename: rename "%s" (from line %s) to' % (word, line_number), varName, rename_name_input, on_change=None, on_cancel=None) def pop_menu(): self.view.show_popup_menu(compile_flags(), popup_menu_callback) pop_menu() def gorename_complete(self, out, err, focus=False): self.write_out(out, err) def write_running(self, content, readonly=False, focus=False): """ Write the "Running..." header to a new file and focus it to get results """ #window = self.view.window() window = sublime.active_window() view = get_output_view(window) view.set_read_only(False) # Run a new command to use the edit object for this view. view.run_command('go_rename_write_running', {'content': content}) if get_setting("gorename_output", "buffer") == "output_panel": window.run_command('show_panel', {'panel': "output." + view.name() }) else: window.focus_view(view) view.set_read_only(readonly) # focus no matter what if focus: window.focus_view(view) def write_out(self, result, err): """ Write the gorename output to a new file. """ #window = self.view.window() window = sublime.active_window() view = get_output_view(window) # Run a new command to use the edit object for this view. view.run_command('go_rename_write_results', { 'result': result, 'err': err}) if get_setting("gorename_output", "buffer") == "output_panel": window.run_command('show_panel', {'panel': "output." + view.name() }) else: window.focus_view(view) def get_map(self, chars): """ Generate a map of character offset to byte offset for the given string 'chars'. """ byte_offset = 0 cb_map = {} for char_offset, char in enumerate(chars): cb_map[char_offset] = byte_offset byte_offset += len(char.encode('utf-8')) if char == '\n' and self.view.line_endings() == "Windows": byte_offset += 1 return cb_map def gorename(self, file_path, begin_offset=None, flags=None, name=None, callback=None): """ Builds the gorename shell command and calls it, returning it's output as a string. """ global runningTool runningTool = True pos = "#" + str(begin_offset) # golangconfig or shellenv ? cmd_env = '' if use_golangconfig: try: toolpath, cmd_env = golangconfig.subprocess_info('gorename', ['GOPATH', 'PATH'], view=self.view) toolpath = os.path.realpath(toolpath) except: error("golangconfig:", sys.exc_info()) return else: toolpath = 'gorename' cmd_env = shellenv.get_env(for_subprocess=True)[1] cmd_env.update(get_setting("gorename_env", {})) debug("env", cmd_env) gorename_json = "" if get_setting("gorename_json", False): gorename_json = "-json" # Build gorename cmd. cmd = "%(toolpath)s -offset \"%(file_path)s:%(pos)s\" -to %(name)s %(flags)s" % { "toolpath": toolpath, "file_path": os.path.realpath(file_path), "pos": pos, "name": name, "flags": flags} debug("cmd", cmd) sublime.set_timeout_async(lambda: self.runInThread(cmd, callback, cmd_env), 0) def runInThread(self, cmd, callback, env): proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, shell=True, env=env) out, err = proc.communicate() callback(out.decode('utf-8'), err.decode('utf-8')) global runningTool runningTool = False class GoRenameConfirmCommand(sublime_plugin.TextCommand): """ Writes the gorename output to the current view. """ def run(self, edit): global renameMe #view = self.view debug('Stored rename parameters:', renameMe) # check that the referenced file hasn't changed if (len(renameMe)==0): sublime.error_message("Invalid GoRename parameters") if (runningTool == False): if ((hashlib.sha256(open(renameMe['file_path'],'rb').read()).hexdigest() != renameMe['checksum']) and (get_setting('gorename_rename_modified_files', False) == False)): sublime.error_message("Couldn't execute gorename, the referenced file has changed, please start over.") # reset renameMe renameMe = {} else: GR = GoRenameCommand(self) GR.write_running(renameMe['compiled_message'] % ('[Running...]'), True, True) GR.gorename(file_path=renameMe['file_path'] ,begin_offset=renameMe['offset'], name=renameMe['name'], flags=renameMe['flags'], callback=GR.gorename_complete) # reset renameMe renameMe = {} else: sublime.message_dialog("GoRename tool already executing") class GoRenameWriteResultsCommand(sublime_plugin.TextCommand): """ Writes the gorename output to the current view. """ def run(self, edit, result, err): view = self.view view.set_read_only(False) if result: view.insert(edit, view.size(), result) if err: errLen = view.insert(edit, view.size(), err) view.set_read_only(True) # reset global renameMe renameMe = {} class GoRenameWriteRunningCommand(sublime_plugin.TextCommand): """ Writes the gorename output to the current view. """ def run(self, edit, content): view = self.view view.set_viewport_position(view.text_to_layout(view.size() - 1)) view.insert(edit, view.size(), content) class GoRenameShowResultsCommand(sublime_plugin.TextCommand): def run(self, edit): if get_setting("gorename_output", "buffer") == "output_panel": self.view.window().run_command('show_panel', {'panel': "output.GoRename Output" }) else: output_view = get_output_view(self.view.window()) self.view.window().focus_view(output_view) class GoRenameOpenResultCommand(sublime_plugin.EventListener): ''' def on_modification(self, view): if view.name() == "GoRename Output": log("on modif") ''' def on_selection_modified(self, view): if view.name() == "GoRename Output": if len(view.sel()) != 1: return if view.sel()[0].size() == 0: return lines = view.lines(view.sel()[0]) if len(lines) != 1: return line = view.full_line(lines[0]) text = view.substr(line) format = get_setting("gorename_format") # "filename:line:col" pattern for json m = re.search("\"([^\"]+):([0-9]+):([0-9]+)\"", text) # >filename:line:col< pattern for xml if m == None: m = re.search(">([^<]+):([0-9]+):([0-9]+)<", text) # filename:line.col-line.col: pattern for plain if m == None: m = re.search("^([^:]+):([0-9]+).([0-9]+)[-: ]", text) if m: w = view.window() new_view = w.open_file(m.group(1) + ':' + m.group(2) + ':' + m.group(3), sublime.ENCODED_POSITION) group, index = w.get_view_index(new_view) if group != -1: w.focus_group(group) def get_output_view(window): view = None buff_name = 'GoRename Output' if get_setting("gorename_output", "buffer") == "output_panel": view = window.create_output_panel(buff_name) else: # If the output file is already open, use that. for v in window.views(): if v.name() == buff_name: view = v break # Otherwise, create a new one. if view is None: view = window.new_file() view.set_name(buff_name) view.set_scratch(True) view_settings = view.settings() view_settings.set('line_numbers', False) view.set_syntax_file('Packages/GoRename/GoRenameResults.tmLanguage') return view def get_setting(key, default=None): """ Returns the setting in the following hierarchy: project setting, user setting, default setting. If none are set the 'default' value passed in is returned. """ val = None try: val = sublime.active_window().active_view().settings().get('GoRename', {}).get(key) except AttributeError: pass if not val: val = sublime.load_settings("GoRename.sublime-settings").get(key) if not val: val = sublime.load_settings("Default.sublime-settings").get(key) if not val: val = default return val
{ "content_hash": "27cde97c7781ad2a63c6b65f3fa4fb3a", "timestamp": "", "source": "github", "line_count": 463, "max_line_length": 202, "avg_line_length": 33.66090712742981, "alnum_prop": 0.5583573949310234, "repo_name": "alvarolm/GoRename", "id": "5d706c05faff5e214238243cbfe0474d58c6ed22", "size": "15773", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "goRename.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "37094" } ], "symlink_target": "" }
import abc from oslo_log import log as logging import webob.exc from neutron.api import extensions from neutron.api.v2 import resource from neutron.common import rpc as n_rpc from neutron import policy from neutron import wsgi from neutron_lib import exceptions from networking_cisco._i18n import _ from networking_cisco import backwards_compatibility as bc from networking_cisco.backwards_compatibility import agent_exceptions from networking_cisco.backwards_compatibility import cb_faults from networking_cisco.plugins.cisco.common import cisco_constants from networking_cisco.plugins.cisco.extensions import ciscohostingdevicemanager PATH_PREFIX = "/dev_mgr" LOG = logging.getLogger(__name__) class InvalidCfgAgent(agent_exceptions.AgentNotFound): message = _("Agent %(agent_id)s is not a Cisco cfg agent or has been " "disabled") class HostingDeviceAssignedToCfgAgent(exceptions.Conflict): message = _("The hosting device %(hosting_device_id)s is already assigned " "to Cisco cfg agent %(agent_id)s.") class HostingDeviceSchedulingFailed(exceptions.Conflict): message = _("Failed to assign hosting device %(hosting_device_id)s to " "Cisco cfg agent %(agent_id)s.") class HostingDeviceNotAssignedToCfgAgent(exceptions.NotFound): message = _("The hosting device %(hosting_device_id)s is currently not " "assigned to Cisco cfg agent %(agent_id)s.") CFG_AGENT_SCHEDULER_ALIAS = 'cisco-cfg-agent-scheduler' CFG_AGENT_HOSTING_DEVICE = 'cfg-agent-hosting-device' CFG_AGENT_HOSTING_DEVICES = CFG_AGENT_HOSTING_DEVICE + 's' HOSTING_DEVICE_CFG_AGENT = 'hosting-device-cfg-agent' HOSTING_DEVICE_CFG_AGENTS = HOSTING_DEVICE_CFG_AGENT + 's' class HostingDeviceSchedulerController(wsgi.Controller): def get_plugin(self): plugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER) if not plugin: LOG.error('No Device manager service plugin registered to ' 'handle hosting device scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % CFG_AGENT_HOSTING_DEVICES, {}) return plugin.list_hosting_devices_handled_by_cfg_agent( request.context, kwargs['agent_id']) def create(self, request, body, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "create_%s" % CFG_AGENT_HOSTING_DEVICE, {}) cfg_agent_id = kwargs['agent_id'] hosting_device_id = body['hosting_device_id'] result = plugin.assign_hosting_device_to_cfg_agent( request.context, cfg_agent_id, hosting_device_id) notify(request.context, 'agent.hosting_device.add', hosting_device_id, cfg_agent_id) return result def delete(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "delete_%s" % CFG_AGENT_HOSTING_DEVICE, {}) cfg_agent_id = kwargs['agent_id'] hosting_device_id = kwargs['id'] result = plugin.unassign_hosting_device_from_cfg_agent( request.context, cfg_agent_id, hosting_device_id) notify(request.context, 'agent.hosting_device.remove', hosting_device_id, cfg_agent_id) return result class CfgAgentsHandlingHostingDeviceController(wsgi.Controller): def get_plugin(self): plugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER) if not plugin: LOG.error('No device manager service plugin registered to ' 'handle hosting device scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin def index(self, request, **kwargs): plugin = self.get_plugin() policy.enforce(request.context, "get_%s" % HOSTING_DEVICE_CFG_AGENTS, {}) return plugin.list_cfg_agents_handling_hosting_device( request.context, kwargs['hosting_device_id']) class Ciscocfgagentscheduler(bc.extensions.ExtensionDescriptor): """Extension class supporting configuration agent scheduler.""" @classmethod def get_name(cls): return "Cisco Configuration Agent Scheduler" @classmethod def get_alias(cls): return CFG_AGENT_SCHEDULER_ALIAS @classmethod def get_description(cls): return "Schedule hosting devices among Cisco configuration agents" @classmethod def get_namespace(cls): return ("http://docs.openstack.org/ext/" + CFG_AGENT_SCHEDULER_ALIAS + "/api/v1.0") @classmethod def get_updated(cls): return "2014-03-31T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] parent = dict(member_name="agent", collection_name="agents") controller = resource.Resource(HostingDeviceSchedulerController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(CFG_AGENT_HOSTING_DEVICES, controller, parent)) parent = dict(member_name=ciscohostingdevicemanager.DEVICE, collection_name=ciscohostingdevicemanager.DEVICES) controller = resource.Resource( CfgAgentsHandlingHostingDeviceController(), cb_faults.FAULT_MAP) exts.append(extensions.ResourceExtension(HOSTING_DEVICE_CFG_AGENTS, controller, parent, PATH_PREFIX)) return exts def get_extended_resources(self, version): return {} class CfgAgentSchedulerPluginBase(object): """REST API to operate the cfg agent scheduler. All of method must be in an admin context. """ @abc.abstractmethod def assign_hosting_device_to_cfg_agent(self, context, id, hosting_device_id): pass @abc.abstractmethod def unassign_hosting_device_from_cfg_agent(self, context, id, hosting_device_id): pass @abc.abstractmethod def list_hosting_devices_handled_by_cfg_agent(self, context, id): pass @abc.abstractmethod def list_cfg_agents_handling_hosting_device(self, context, hosting_device_id): pass def notify(context, action, hosting_device_id, cfg_agent_id): info = {'id': cfg_agent_id, 'hosting_device_id': hosting_device_id} notifier = n_rpc.get_notifier('hosting_device') notifier.info(context, action, {'cfg_agent': info})
{ "content_hash": "3fe112bae8c6bab4ee69e459934c6dbf", "timestamp": "", "source": "github", "line_count": 186, "max_line_length": 79, "avg_line_length": 37.32258064516129, "alnum_prop": 0.6381446269086718, "repo_name": "noironetworks/networking-cisco", "id": "4fb186ce77101ea3ad1d3e111a977f3e26e79fd9", "size": "7576", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "networking_cisco/plugins/cisco/extensions/ciscocfgagentscheduler.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "1043" }, { "name": "Python", "bytes": "3635799" }, { "name": "Shell", "bytes": "30511" } ], "symlink_target": "" }
from __future__ import unicode_literals, division, absolute_import import logging from datetime import datetime, timedelta from sqlalchemy import Column, Integer, String, Unicode, DateTime from sqlalchemy.schema import Index from flexget import db_schema, options, plugin from flexget.event import event from flexget.logger import console from flexget.manager import Session from flexget.utils.tools import parse_timedelta from flexget.utils.sqlalchemy_utils import table_add_column, table_schema SCHEMA_VER = 3 log = logging.getLogger('failed') Base = db_schema.versioned_base('failed', SCHEMA_VER) @db_schema.upgrade('failed') def upgrade(ver, session): if ver is None: # add count column table_add_column('failed', 'count', Integer, session, default=1) ver = 0 if ver == 0: # define an index log.info('Adding database index ...') failed = table_schema('failed', session) Index('failed_title_url', failed.c.title, failed.c.url, failed.c.count).create() ver = 1 if ver == 1: table_add_column('failed', 'reason', Unicode, session) ver = 2 if ver == 2: table_add_column('failed', 'retry_time', DateTime, session) ver = 3 return ver class FailedEntry(Base): __tablename__ = 'failed' id = Column(Integer, primary_key=True) title = Column(Unicode) url = Column(String) tof = Column(DateTime) reason = Column(Unicode) count = Column(Integer, default=1) retry_time = Column(DateTime) def __init__(self, title, url, reason=None): self.title = title self.url = url self.reason = reason self.tof = datetime.now() def __str__(self): return '<Failed(title=%s)>' % self.title # create indexes, used when creating tables columns = Base.metadata.tables['failed'].c Index('failed_title_url', columns.title, columns.url, columns.count) class PluginFailed(object): """ Records entry failures and stores them for trying again after a certain interval. Rejects them after they have failed too many times. """ schema = { "oneOf": [ # Allow retry_failed: no form to turn off plugin altogether {"type": "boolean"}, { "type": "object", "properties": { "retry_time": {"type": "string", "format": "interval", "default": "1 hour"}, "max_retries": {"type": "integer", "minimum": 0, "default": 3}, "retry_time_multiplier": { # Allow turning off the retry multiplier with 'no' as well as 1 "oneOf": [{"type": "number", "minimum": 0}, {"type": "boolean"}], "default": 1.5 } }, "additionalProperties": False } ] } def __init__(self): try: self.backlog = plugin.get_plugin_by_name('backlog') except plugin.DependencyError: log.warning('Unable utilize backlog plugin, failed entries may not be retried properly.') def prepare_config(self, config): if not isinstance(config, dict): config = {} config.setdefault('retry_time', '1 hour') config.setdefault('max_retries', 3) if config.get('retry_time_multiplier', True) is True: # If multiplier is not specified, or is specified as True, use the default config['retry_time_multiplier'] = 1.5 else: # If multiplier is False, turn it off config['retry_time_multiplier'] = 1 return config def retry_time(self, fail_count, config): """Return the timedelta an entry that has failed `fail_count` times before should wait before being retried.""" base_retry_time = parse_timedelta(config['retry_time']) # Timedeltas do not allow floating point multiplication. Convert to seconds and then back to avoid this. base_retry_secs = base_retry_time.days * 86400 + base_retry_time.seconds retry_secs = base_retry_secs * (config['retry_time_multiplier'] ** fail_count) return timedelta(seconds=retry_secs) @plugin.priority(-255) def on_task_input(self, task, config): config = self.prepare_config(config) for entry in task.all_entries: entry.on_fail(self.add_failed, config=config) def add_failed(self, entry, reason=None, config=None, **kwargs): """Adds entry to internal failed list, displayed with --failed""" reason = reason or 'Unknown' with Session() as session: # query item's existence item = session.query(FailedEntry).filter(FailedEntry.title == entry['title']).\ filter(FailedEntry.url == entry['original_url']).first() if not item: item = FailedEntry(entry['title'], entry['original_url'], reason) item.count = 0 retry_time = self.retry_time(item.count, config) item.retry_time = datetime.now() + retry_time item.count += 1 item.tof = datetime.now() item.reason = reason session.merge(item) log.debug('Marking %s in failed list. Has failed %s times.' % (item.title, item.count)) if self.backlog and item.count <= config['max_retries']: self.backlog.instance.add_backlog(entry.task, entry, amount=retry_time, session=session) entry.task.rerun() # limit item number to 25 for row in session.query(FailedEntry).order_by(FailedEntry.tof.desc())[25:]: session.delete(row) @plugin.priority(255) def on_task_filter(self, task, config): if config is False: return config = self.prepare_config(config) max_count = config['max_retries'] for entry in task.entries: item = task.session.query(FailedEntry).filter(FailedEntry.title == entry['title']).\ filter(FailedEntry.url == entry['original_url']).first() if item: if item.count > max_count: entry.reject('Has already failed %s times in the past. (failure reason: %s)' % (item.count, item.reason)) elif item.retry_time and item.retry_time > datetime.now(): entry.reject('Waiting before retrying entry which has failed in the past. (failure reason: %s)' % item.reason) def do_cli(manager, options): if options.failed_action == 'list': list_failed() elif options.failed_action == 'clear': clear_failed(manager) def list_failed(): session = Session() try: results = session.query(FailedEntry).all() if not results: console('No failed entries recorded') for entry in results: console('%16s - %s - %s times - %s' % (entry.tof.strftime('%Y-%m-%d %H:%M'), entry.title, entry.count, entry.reason)) finally: session.close() def clear_failed(manager): session = Session() try: results = session.query(FailedEntry).delete() console('Cleared %i items.' % results) session.commit() if results: manager.config_changed() finally: session.close() @event('plugin.register') def register_plugin(): plugin.register(PluginFailed, 'retry_failed', builtin=True, api_ver=2) @event('options.register') def register_parser_arguments(): parser = options.register_command('failed', do_cli, help='list or clear remembered failures') subparsers = parser.add_subparsers(dest='failed_action', metavar='<action>') subparsers.add_parser('list', help='list all the entries that have had failures') subparsers.add_parser('clear', help='clear all failures from database, so they can be retried')
{ "content_hash": "2a3c12b7792a5f2835c504b379fd99b4", "timestamp": "", "source": "github", "line_count": 210, "max_line_length": 119, "avg_line_length": 38.02857142857143, "alnum_prop": 0.5996744302529426, "repo_name": "camon/Flexget", "id": "8813e3e980e0e850e87f7cf66411fd726c3dfd0c", "size": "7986", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "flexget/plugins/filter/retry_failed.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "56725" }, { "name": "JavaScript", "bytes": "455222" }, { "name": "Python", "bytes": "1957167" } ], "symlink_target": "" }
import tornado.iostream from waterbutler.server import settings CORS_ACCEPT_HEADERS = [ 'Range', 'Content-Type', 'Authorization', 'Cache-Control', 'X-Requested-With', 'X-CSRFToken', ] CORS_EXPOSE_HEADERS = [ 'Range', 'Accept-Ranges', 'Content-Range', 'Content-Length', 'Content-Encoding', ] HTTP_REASONS = { 422: 'Unprocessable Entity', 461: 'Unavailable For Legal Reasons', } def make_disposition(filename): return 'attachment;filename="{}"'.format(filename.replace('"', '\\"')) def parse_request_range(range_header): """WB uses tornado's ``httputil._parse_request_range`` function to parse the Range HTTP header and return a tuple representing the range. Tornado's version returns a tuple suitable for slicing arrays, meaning that a range of 0-1 will be returned as ``(0, 2)``. WB had been assuming that the tuple would represent the first and last byte positions and was consistently returning one more byte than requested. Since WB doesn't ever use ranges to do list slicing of byte streams, this function wraps tornado's version and returns the actual byte indices. Ex. ``Range: bytes=0-1`` will be returned as ``(0, 1)``. If the end byte is omitted, the second element of the tuple will be ``None``. This will be sent to the provider as an open ended range, e.g. (``Range: bytes=5-``). Most providers interpret this to mean "send from the start byte to the end of the file". If this function receives an unsupported or unfamiliar Range header, it will return ``None``, indicating that the full file should be sent. Some formats supported by other providers but unsupported by WB include: * ``Range: bytes=-5`` -- some providers interpret this as "send the last five bytes" * ``Range: bytes=0-5,10-12`` -- indicates a multi-range, "send the first six bytes, then the next three bytes starting from the eleventh". Unfamiliar byte ranges are anything not matching ``^bytes=[0-9]+\-[0-9]*$``, or ranges where the end byte position is less than the start byte. :param str range_header: a string containing the value of the Range header :rtype: `tuple` or `None` :return: a `tuple` representing the inclusive range of byte positions or `None`. """ request_range = tornado.httputil._parse_request_range(range_header) if request_range is None: return request_range start, end = request_range if start is None or start < 0: return None if end is not None: end -= 1 if end < start: return None return (start, end) class CORsMixin: def _cross_origin_is_allowed(self): if self.request.method == 'OPTIONS': return True elif not self.request.cookies and self.request.headers.get('Authorization'): return True return False def set_default_headers(self): if not self.request.headers.get('Origin'): return allowed_origin = None if self._cross_origin_is_allowed(): allowed_origin = self.request.headers['Origin'] elif isinstance(settings.CORS_ALLOW_ORIGIN, str): if settings.CORS_ALLOW_ORIGIN == '*': # Wild cards cannot be used with allowCredentials. # Match Origin if its specified, makes pdfs and pdbs render properly allowed_origin = self.request.headers['Origin'] else: allowed_origin = settings.CORS_ALLOW_ORIGIN else: if self.request.headers['Origin'] in settings.CORS_ALLOW_ORIGIN: allowed_origin = self.request.headers['Origin'] if allowed_origin is not None: self.set_header('Access-Control-Allow-Origin', allowed_origin) self.set_header('Access-Control-Allow-Credentials', 'true') self.set_header('Access-Control-Allow-Headers', ', '.join(CORS_ACCEPT_HEADERS)) self.set_header('Access-Control-Expose-Headers', ', '.join(CORS_EXPOSE_HEADERS)) self.set_header('Cache-control', 'no-store, no-cache, must-revalidate, max-age=0') def options(self, *args, **kwargs): self.set_status(204) if self.request.headers.get('Origin'): self.set_header('Access-Control-Allow-Methods', 'GET, PUT, POST, DELETE'), class UtilMixin: bytes_downloaded = 0 bytes_uploaded = 0 def set_status(self, code, reason=None): return super().set_status(code, reason or HTTP_REASONS.get(code)) async def write_stream(self, stream): try: while True: chunk = await stream.read(settings.CHUNK_SIZE) if not chunk: break # Temp fix, write does not accept bytearrays currently if isinstance(chunk, bytearray): chunk = bytes(chunk) self.write(chunk) self.bytes_downloaded += len(chunk) del chunk await self.flush() except tornado.iostream.StreamClosedError: # Client has disconnected early. # No need for any exception to be raised return
{ "content_hash": "16e23e16be07f7022c475674a95695b0", "timestamp": "", "source": "github", "line_count": 144, "max_line_length": 99, "avg_line_length": 36.270833333333336, "alnum_prop": 0.6350756270342715, "repo_name": "RCOSDP/waterbutler", "id": "a421343c402de7ba0cf55a24d080172f27e1ab23", "size": "5223", "binary": false, "copies": "1", "ref": "refs/heads/nii-mergework-201901", "path": "waterbutler/server/utils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "840181" } ], "symlink_target": "" }
import copy import io import os import pickle import re import shutil import tempfile import threading import time import unittest import warnings from unittest import mock from django.conf import settings from django.core import management, signals from django.core.cache import ( DEFAULT_CACHE_ALIAS, CacheKeyWarning, cache, caches, ) from django.core.cache.utils import make_template_fragment_key from django.db import close_old_connections, connection, connections from django.http import ( HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse, ) from django.middleware.cache import ( CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware, ) from django.middleware.csrf import CsrfViewMiddleware from django.template import engines from django.template.context_processors import csrf from django.template.response import TemplateResponse from django.test import ( RequestFactory, SimpleTestCase, TestCase, TransactionTestCase, ignore_warnings, override_settings, ) from django.test.signals import setting_changed from django.utils import timezone, translation from django.utils.cache import ( get_cache_key, learn_cache_key, patch_cache_control, patch_response_headers, patch_vary_headers, ) from django.utils.deprecation import RemovedInDjango21Warning from django.views.decorators.cache import cache_page from .models import Poll, expensive_calculation # functions/classes for complex data type tests def f(): return 42 class C: def m(n): return 24 class Unpicklable: def __getstate__(self): raise pickle.PickleError() @override_settings(CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', } }) class DummyCacheTests(SimpleTestCase): # The Dummy cache backend doesn't really behave like a test backend, # so it has its own test case. def test_simple(self): "Dummy cache backend ignores cache set calls" cache.set("key", "value") self.assertIsNone(cache.get("key")) def test_add(self): "Add doesn't do anything in dummy cache backend" cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertTrue(result) self.assertIsNone(cache.get("addkey1")) def test_non_existent(self): "Nonexistent keys aren't found in the dummy cache backend" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): "get_many returns nothing for the dummy cache backend" cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertEqual(cache.get_many(['a', 'c', 'd']), {}) self.assertEqual(cache.get_many(['a', 'b', 'e']), {}) def test_delete(self): "Cache deletion is transparently ignored on the dummy cache backend" cache.set("key1", "spam") cache.set("key2", "eggs") self.assertIsNone(cache.get("key1")) cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_has_key(self): "The has_key method doesn't ever return True for the dummy cache backend" cache.set("hello1", "goodbye1") self.assertFalse(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) def test_in(self): "The in operator doesn't ever return True for the dummy cache backend" cache.set("hello2", "goodbye2") self.assertNotIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): "Dummy cache values can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr('answer') with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): "Dummy cache values can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr('answer') with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_data_types(self): "All data types are ignored equally by the dummy cache" stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertIsNone(cache.get("stuff")) def test_expiration(self): "Expiration has no effect on the dummy cache" cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertIsNone(cache.get("expire2")) self.assertFalse(cache.has_key("expire3")) def test_unicode(self): "Unicode values are ignored by the dummy cache" stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } for (key, value) in stuff.items(): cache.set(key, value) self.assertIsNone(cache.get(key)) def test_set_many(self): "set_many does nothing for the dummy cache backend" cache.set_many({'a': 1, 'b': 2}) cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1') def test_delete_many(self): "delete_many does nothing for the dummy cache backend" cache.delete_many(['a', 'b']) def test_clear(self): "clear does nothing for the dummy cache backend" cache.clear() def test_incr_version(self): "Dummy cache versions can't be incremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.incr_version('answer') with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): "Dummy cache versions can't be decremented" cache.set('answer', 42) with self.assertRaises(ValueError): cache.decr_version('answer') with self.assertRaises(ValueError): cache.decr_version('does_not_exist') def test_get_or_set(self): self.assertEqual(cache.get_or_set('mykey', 'default'), 'default') self.assertEqual(cache.get_or_set('mykey', None), None) def test_get_or_set_callable(self): def my_callable(): return 'default' self.assertEqual(cache.get_or_set('mykey', my_callable), 'default') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default') def custom_key_func(key, key_prefix, version): "A customized cache key function" return 'CUSTOM-' + '-'.join([key_prefix, str(version), key]) _caches_setting_base = { 'default': {}, 'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())}, 'v2': {'VERSION': 2}, 'custom_key': {'KEY_FUNCTION': custom_key_func}, 'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'}, 'cull': {'OPTIONS': {'MAX_ENTRIES': 30}}, 'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}}, } def caches_setting_for_tests(base=None, exclude=None, **params): # `base` is used to pull in the memcached config from the original settings, # `exclude` is a set of cache names denoting which `_caches_setting_base` keys # should be omitted. # `params` are test specific overrides and `_caches_settings_base` is the # base config for the tests. # This results in the following search order: # params -> _caches_setting_base -> base base = base or {} exclude = exclude or set() setting = {k: base.copy() for k in _caches_setting_base.keys() if k not in exclude} for key, cache_params in setting.items(): cache_params.update(_caches_setting_base[key]) cache_params.update(params) return setting class BaseCacheTests: # A common set of tests to apply to all cache backends def setUp(self): self.factory = RequestFactory() def tearDown(self): cache.clear() def test_simple(self): # Simple cache set/get works cache.set("key", "value") self.assertEqual(cache.get("key"), "value") def test_add(self): # A key can be added to a cache cache.add("addkey1", "value") result = cache.add("addkey1", "newvalue") self.assertFalse(result) self.assertEqual(cache.get("addkey1"), "value") def test_prefix(self): # Test for same cache key conflicts between shared backend cache.set('somekey', 'value') # should not be set in the prefixed cache self.assertFalse(caches['prefix'].has_key('somekey')) caches['prefix'].set('somekey', 'value2') self.assertEqual(cache.get('somekey'), 'value') self.assertEqual(caches['prefix'].get('somekey'), 'value2') def test_non_existent(self): """Nonexistent cache keys return as None/default.""" self.assertIsNone(cache.get("does_not_exist")) self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!") def test_get_many(self): # Multiple cache keys can be returned using get_many cache.set('a', 'a') cache.set('b', 'b') cache.set('c', 'c') cache.set('d', 'd') self.assertDictEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'}) self.assertDictEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'}) def test_delete(self): # Cache keys can be deleted cache.set("key1", "spam") cache.set("key2", "eggs") self.assertEqual(cache.get("key1"), "spam") cache.delete("key1") self.assertIsNone(cache.get("key1")) self.assertEqual(cache.get("key2"), "eggs") def test_has_key(self): # The cache can be inspected for cache keys cache.set("hello1", "goodbye1") self.assertTrue(cache.has_key("hello1")) self.assertFalse(cache.has_key("goodbye1")) cache.set("no_expiry", "here", None) self.assertTrue(cache.has_key("no_expiry")) def test_in(self): # The in operator can be used to inspect cache contents cache.set("hello2", "goodbye2") self.assertIn("hello2", cache) self.assertNotIn("goodbye2", cache) def test_incr(self): # Cache values can be incremented cache.set('answer', 41) self.assertEqual(cache.incr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.incr('answer', 10), 52) self.assertEqual(cache.get('answer'), 52) self.assertEqual(cache.incr('answer', -10), 42) with self.assertRaises(ValueError): cache.incr('does_not_exist') def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist') def test_close(self): self.assertTrue(hasattr(cache, 'close')) cache.close() def test_data_types(self): # Many different data types can be cached stuff = { 'string': 'this is a string', 'int': 42, 'list': [1, 2, 3, 4], 'tuple': (1, 2, 3, 4), 'dict': {'A': 1, 'B': 2}, 'function': f, 'class': C, } cache.set("stuff", stuff) self.assertEqual(cache.get("stuff"), stuff) def test_cache_read_for_model_instance(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() my_poll = Poll.objects.create(question="Well?") self.assertEqual(Poll.objects.count(), 1) pub_date = my_poll.pub_date cache.set('question', my_poll) cached_poll = cache.get('question') self.assertEqual(cached_poll.pub_date, pub_date) # We only want the default expensive calculation run once self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_write_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache write expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) self.assertEqual(expensive_calculation.num_runs, 1) cache.set('deferred_queryset', defer_qs) # cache set should not re-evaluate default functions self.assertEqual(expensive_calculation.num_runs, 1) def test_cache_read_for_model_instance_with_deferred(self): # Don't want fields with callable as default to be called on cache read expensive_calculation.num_runs = 0 Poll.objects.all().delete() Poll.objects.create(question="What?") self.assertEqual(expensive_calculation.num_runs, 1) defer_qs = Poll.objects.all().defer('question') self.assertEqual(defer_qs.count(), 1) cache.set('deferred_queryset', defer_qs) self.assertEqual(expensive_calculation.num_runs, 1) runs_before_cache_read = expensive_calculation.num_runs cache.get('deferred_queryset') # We only want the default expensive calculation run on creation and set self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read) def test_expiration(self): # Cache values can be set to expire cache.set('expire1', 'very quickly', 1) cache.set('expire2', 'very quickly', 1) cache.set('expire3', 'very quickly', 1) time.sleep(2) self.assertIsNone(cache.get("expire1")) cache.add("expire2", "newvalue") self.assertEqual(cache.get("expire2"), "newvalue") self.assertFalse(cache.has_key("expire3")) def test_unicode(self): # Unicode values can be cached stuff = { 'ascii': 'ascii_value', 'unicode_ascii': 'Iñtërnâtiônàlizætiøn1', 'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2', 'ascii2': {'x': 1} } # Test `set` for (key, value) in stuff.items(): cache.set(key, value) self.assertEqual(cache.get(key), value) # Test `add` for (key, value) in stuff.items(): cache.delete(key) cache.add(key, value) self.assertEqual(cache.get(key), value) # Test `set_many` for (key, value) in stuff.items(): cache.delete(key) cache.set_many(stuff) for (key, value) in stuff.items(): self.assertEqual(cache.get(key), value) def test_binary_string(self): # Binary strings should be cacheable from zlib import compress, decompress value = 'value_to_be_compressed' compressed_value = compress(value.encode()) # Test set cache.set('binary1', compressed_value) compressed_result = cache.get('binary1') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test add cache.add('binary1-add', compressed_value) compressed_result = cache.get('binary1-add') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) # Test set_many cache.set_many({'binary1-set_many': compressed_value}) compressed_result = cache.get('binary1-set_many') self.assertEqual(compressed_value, compressed_result) self.assertEqual(value, decompress(compressed_result).decode()) def test_set_many(self): # Multiple keys can be set using set_many cache.set_many({"key1": "spam", "key2": "eggs"}) self.assertEqual(cache.get("key1"), "spam") self.assertEqual(cache.get("key2"), "eggs") def test_set_many_expiration(self): # set_many takes a second ``timeout`` parameter cache.set_many({"key1": "spam", "key2": "eggs"}, 1) time.sleep(2) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_delete_many(self): # Multiple keys can be deleted using delete_many cache.set("key1", "spam") cache.set("key2", "eggs") cache.set("key3", "ham") cache.delete_many(["key1", "key2"]) self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) self.assertEqual(cache.get("key3"), "ham") def test_clear(self): # The cache can be emptied using clear cache.set("key1", "spam") cache.set("key2", "eggs") cache.clear() self.assertIsNone(cache.get("key1")) self.assertIsNone(cache.get("key2")) def test_long_timeout(self): """ Followe memcached's convention where a timeout greater than 30 days is treated as an absolute expiration timestamp instead of a relative offset (#12399). """ cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key2'), 'ham') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_forever_timeout(self): """ Passing in None into timeout results in a value that is cached forever """ cache.set('key1', 'eggs', None) self.assertEqual(cache.get('key1'), 'eggs') cache.add('key2', 'ham', None) self.assertEqual(cache.get('key2'), 'ham') added = cache.add('key1', 'new eggs', None) self.assertIs(added, False) self.assertEqual(cache.get('key1'), 'eggs') cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None) self.assertEqual(cache.get('key3'), 'sausage') self.assertEqual(cache.get('key4'), 'lobster bisque') def test_zero_timeout(self): """ Passing in zero into timeout results in a value that is not cached """ cache.set('key1', 'eggs', 0) self.assertIsNone(cache.get('key1')) cache.add('key2', 'ham', 0) self.assertIsNone(cache.get('key2')) cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0) self.assertIsNone(cache.get('key3')) self.assertIsNone(cache.get('key4')) def test_float_timeout(self): # Make sure a timeout given as a float doesn't crash anything. cache.set("key1", "spam", 100.2) self.assertEqual(cache.get("key1"), "spam") def _perform_cull_test(self, cull_cache, initial_count, final_count): # Create initial cache key entries. This will overflow the cache, # causing a cull. for i in range(1, initial_count): cull_cache.set('cull%d' % i, 'value', 1000) count = 0 # Count how many keys are left in the cache. for i in range(1, initial_count): if cull_cache.has_key('cull%d' % i): count += 1 self.assertEqual(count, final_count) def test_cull(self): self._perform_cull_test(caches['cull'], 50, 29) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 19) def _perform_invalid_key_test(self, key, expected_warning): """ All the builtin backends (except memcached, see below) should warn on keys that would be refused by memcached. This encourages portable caching code without making it too difficult to use production backends with more liberal key rules. Refs #6447. """ # mimic custom ``make_key`` method being defined since the default will # never show the below warnings def func(key, *args): return key old_func = cache.key_func cache.key_func = func try: with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") cache.set(key, 'value') self.assertEqual(len(w), 1) self.assertIsInstance(w[0].message, CacheKeyWarning) self.assertEqual(str(w[0].message.args[0]), expected_warning) finally: cache.key_func = old_func def test_invalid_key_characters(self): # memcached doesn't allow whitespace or control characters in keys. key = 'key with spaces and 清' expected_warning = ( "Cache key contains characters that will cause errors if used " "with memcached: %r" % key ) self._perform_invalid_key_test(key, expected_warning) def test_invalid_key_length(self): # memcached limits key length to 250. key = ('a' * 250) + '清' expected_warning = ( 'Cache key will cause errors if used with memcached: ' '%r (longer than %s)' % (key, 250) ) self._perform_invalid_key_test(key, expected_warning) def test_cache_versioning_get_set(self): # set, using default version = 1 cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertEqual(cache.get('answer1', version=1), 42) self.assertIsNone(cache.get('answer1', version=2)) self.assertIsNone(caches['v2'].get('answer1')) self.assertEqual(caches['v2'].get('answer1', version=1), 42) self.assertIsNone(caches['v2'].get('answer1', version=2)) # set, default version = 1, but manually override version = 2 cache.set('answer2', 42, version=2) self.assertIsNone(cache.get('answer2')) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) # v2 set, using default version = 2 caches['v2'].set('answer3', 42) self.assertIsNone(cache.get('answer3')) self.assertIsNone(cache.get('answer3', version=1)) self.assertEqual(cache.get('answer3', version=2), 42) self.assertEqual(caches['v2'].get('answer3'), 42) self.assertIsNone(caches['v2'].get('answer3', version=1)) self.assertEqual(caches['v2'].get('answer3', version=2), 42) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set('answer4', 42, version=1) self.assertEqual(cache.get('answer4'), 42) self.assertEqual(cache.get('answer4', version=1), 42) self.assertIsNone(cache.get('answer4', version=2)) self.assertIsNone(caches['v2'].get('answer4')) self.assertEqual(caches['v2'].get('answer4', version=1), 42) self.assertIsNone(caches['v2'].get('answer4', version=2)) def test_cache_versioning_add(self): # add, default version = 1, but manually override version = 2 cache.add('answer1', 42, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=2) self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.add('answer1', 37, version=1) self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) # v2 add, using default version = 2 caches['v2'].add('answer2', 42) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37) self.assertIsNone(cache.get('answer2', version=1)) self.assertEqual(cache.get('answer2', version=2), 42) caches['v2'].add('answer2', 37, version=1) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) # v2 add, default version = 2, but manually override version = 1 caches['v2'].add('answer3', 42, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37, version=1) self.assertEqual(cache.get('answer3', version=1), 42) self.assertIsNone(cache.get('answer3', version=2)) caches['v2'].add('answer3', 37) self.assertEqual(cache.get('answer3', version=1), 42) self.assertEqual(cache.get('answer3', version=2), 37) def test_cache_versioning_has_key(self): cache.set('answer1', 42) # has_key self.assertTrue(cache.has_key('answer1')) self.assertTrue(cache.has_key('answer1', version=1)) self.assertFalse(cache.has_key('answer1', version=2)) self.assertFalse(caches['v2'].has_key('answer1')) self.assertTrue(caches['v2'].has_key('answer1', version=1)) self.assertFalse(caches['v2'].has_key('answer1', version=2)) def test_cache_versioning_delete(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.delete('answer1') self.assertIsNone(cache.get('answer1', version=1)) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.delete('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertIsNone(cache.get('answer2', version=2)) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].delete('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertIsNone(cache.get('answer3', version=2)) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].delete('answer4', version=1) self.assertIsNone(cache.get('answer4', version=1)) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) cache.decr('answer1') self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) cache.decr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) caches['v2'].decr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) caches['v2'].decr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42) def test_cache_versioning_get_set_many(self): # set, using default version = 1 cache.set_many({'ford1': 37, 'arthur1': 42}) self.assertDictEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42}) self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertDictEqual(cache.get_many(['ford1', 'arthur1'], version=2), {}) self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1']), {}) self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42}) self.assertDictEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {}) # set, default version = 1, but manually override version = 2 cache.set_many({'ford2': 37, 'arthur2': 42}, version=2) self.assertDictEqual(cache.get_many(['ford2', 'arthur2']), {}) self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=1), {}) self.assertDictEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42}) self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {}) self.assertDictEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42}) # v2 set, using default version = 2 caches['v2'].set_many({'ford3': 37, 'arthur3': 42}) self.assertDictEqual(cache.get_many(['ford3', 'arthur3']), {}) self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=1), {}) self.assertDictEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42}) self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {}) self.assertDictEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42}) # v2 set, default version = 2, but manually override version = 1 caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1) self.assertDictEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42}) self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertDictEqual(cache.get_many(['ford4', 'arthur4'], version=2), {}) self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4']), {}) self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42}) self.assertDictEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {}) def test_incr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertIsNone(cache.get('answer', version=3)) self.assertEqual(cache.incr_version('answer', version=2), 3) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertIsNone(cache.get('answer', version=2)) self.assertEqual(cache.get('answer', version=3), 42) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertIsNone(caches['v2'].get('answer2', version=3)) self.assertEqual(caches['v2'].incr_version('answer2'), 3) self.assertIsNone(caches['v2'].get('answer2')) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertIsNone(caches['v2'].get('answer2', version=2)) self.assertEqual(caches['v2'].get('answer2', version=3), 42) with self.assertRaises(ValueError): cache.incr_version('does_not_exist') def test_decr_version(self): cache.set('answer', 42, version=2) self.assertIsNone(cache.get('answer')) self.assertIsNone(cache.get('answer', version=1)) self.assertEqual(cache.get('answer', version=2), 42) self.assertEqual(cache.decr_version('answer', version=2), 1) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.get('answer', version=1), 42) self.assertIsNone(cache.get('answer', version=2)) caches['v2'].set('answer2', 42) self.assertEqual(caches['v2'].get('answer2'), 42) self.assertIsNone(caches['v2'].get('answer2', version=1)) self.assertEqual(caches['v2'].get('answer2', version=2), 42) self.assertEqual(caches['v2'].decr_version('answer2'), 1) self.assertIsNone(caches['v2'].get('answer2')) self.assertEqual(caches['v2'].get('answer2', version=1), 42) self.assertIsNone(caches['v2'].get('answer2', version=2)) with self.assertRaises(ValueError): cache.decr_version('does_not_exist', version=2) def test_custom_key_func(self): # Two caches with different key functions aren't visible to each other cache.set('answer1', 42) self.assertEqual(cache.get('answer1'), 42) self.assertIsNone(caches['custom_key'].get('answer1')) self.assertIsNone(caches['custom_key2'].get('answer1')) caches['custom_key'].set('answer2', 42) self.assertIsNone(cache.get('answer2')) self.assertEqual(caches['custom_key'].get('answer2'), 42) self.assertEqual(caches['custom_key2'].get('answer2'), 42) def test_cache_write_unpicklable_object(self): update_middleware = UpdateCacheMiddleware() update_middleware.cache = cache fetch_middleware = FetchFromCacheMiddleware() fetch_middleware.cache = cache request = self.factory.get('/cache/test') request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Testing cookie serialization.' response.content = content response.set_cookie('foo', 'bar') update_middleware.process_response(request, response) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) update_middleware.process_response(request, get_cache_data) get_cache_data = fetch_middleware.process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) self.assertEqual(get_cache_data.cookies, response.cookies) def test_add_fail_on_pickleerror(self): # Shouldn't fail silently if trying to cache an unpicklable type. with self.assertRaises(pickle.PickleError): cache.add('unpicklable', Unpicklable()) def test_set_fail_on_pickleerror(self): with self.assertRaises(pickle.PickleError): cache.set('unpicklable', Unpicklable()) def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertEqual(cache.get_or_set('null', None), None) def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value') def test_get_or_set_version(self): msg = "get_or_set() missing 1 required positional argument: 'default'" cache.get_or_set('brian', 1979, version=2) with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian') with self.assertRaisesMessage(TypeError, msg): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3)) def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default') @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Spaces are used in the table name to ensure quoting/escaping is working LOCATION='test cache table' )) class DBCacheTests(BaseCacheTests, TransactionTestCase): available_apps = ['cache'] def setUp(self): # The super calls needs to happen first for the settings override. super().setUp() self.create_table() def tearDown(self): # The super call needs to happen first because it uses the database. super().tearDown() self.drop_table() def create_table(self): management.call_command('createcachetable', verbosity=0, interactive=False) def drop_table(self): with connection.cursor() as cursor: table_name = connection.ops.quote_name('test cache table') cursor.execute('DROP TABLE %s' % table_name) def test_zero_cull(self): self._perform_cull_test(caches['zero_cull'], 50, 18) def test_second_call_doesnt_crash(self): out = io.StringIO() management.call_command('createcachetable', stdout=out) self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES)) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.db.DatabaseCache', # Use another table name to avoid the 'table already exists' message. LOCATION='createcachetable_dry_run_mode' )) def test_createcachetable_dry_run_mode(self): out = io.StringIO() management.call_command('createcachetable', dry_run=True, stdout=out) output = out.getvalue() self.assertTrue(output.startswith("CREATE TABLE")) def test_createcachetable_with_table_argument(self): """ Delete and recreate cache table with legacy behavior (explicitly specifying the table name). """ self.drop_table() out = io.StringIO() management.call_command( 'createcachetable', 'test cache table', verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n") @override_settings(USE_TZ=True) class DBCacheWithTimeZoneTests(DBCacheTests): pass class DBCacheRouter: """A router that puts the cache table on the 'other' database.""" def db_for_read(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def db_for_write(self, model, **hints): if model._meta.app_label == 'django_cache': return 'other' return None def allow_migrate(self, db, app_label, **hints): if app_label == 'django_cache': return db == 'other' return None @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'my_cache_table', }, }, ) class CreateCacheTableForDBCacheTests(TestCase): multi_db = True @override_settings(DATABASE_ROUTERS=[DBCacheRouter()]) def test_createcachetable_observes_database_router(self): # cache table should not be created on 'default' with self.assertNumQueries(0, using='default'): management.call_command('createcachetable', database='default', verbosity=0, interactive=False) # cache table should be created on 'other' # Queries: # 1: check table doesn't already exist # 2: create savepoint (if transactional DDL is supported) # 3: create the table # 4: create the index # 5: release savepoint (if transactional DDL is supported) num = 5 if connections['other'].features.can_rollback_ddl else 3 with self.assertNumQueries(num, using='other'): management.call_command('createcachetable', database='other', verbosity=0, interactive=False) class PicklingSideEffect: def __init__(self, cache): self.cache = cache self.locked = False def __getstate__(self): if self.cache._lock.active_writers: self.locked = True return {} @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.locmem.LocMemCache', )) class LocMemCacheTests(BaseCacheTests, TestCase): def setUp(self): super().setUp() # LocMem requires a hack to make the other caches # share a data store with the 'normal' cache. caches['prefix']._cache = cache._cache caches['prefix']._expire_info = cache._expire_info caches['v2']._cache = cache._cache caches['v2']._expire_info = cache._expire_info caches['custom_key']._cache = cache._cache caches['custom_key']._expire_info = cache._expire_info caches['custom_key2']._cache = cache._cache caches['custom_key2']._expire_info = cache._expire_info @override_settings(CACHES={ 'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other' }, }) def test_multiple_caches(self): "Multiple locmem caches are isolated" cache.set('value', 42) self.assertEqual(caches['default'].get('value'), 42) self.assertIsNone(caches['other'].get('value')) def test_locking_on_pickle(self): """#20613/#18541 -- Ensures pickling is done outside of the lock.""" bad_obj = PicklingSideEffect(cache) cache.set('set', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") cache.add('add', bad_obj) self.assertFalse(bad_obj.locked, "Cache was locked during pickling") def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = 'value' _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] cache.incr(key) self.assertEqual(expire, cache._expire_info[_key]) cache.decr(key) self.assertEqual(expire, cache._expire_info[_key]) # memcached backend isn't guaranteed to be available. # To check the memcached backend, the test settings file will # need to contain at least one cache backend setting that points at # your memcache server. configured_caches = {} for _cache_params in settings.CACHES.values(): configured_caches[_cache_params['BACKEND']] = _cache_params MemcachedCache_params = configured_caches.get('django.core.cache.backends.memcached.MemcachedCache') PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache') # The memcached backends don't support cull-related options like `MAX_ENTRIES`. memcached_excluded_caches = {'cull', 'zero_cull'} class BaseMemcachedTests(BaseCacheTests): # By default it's assumed that the client doesn't clean up connections # properly, in which case the backend must do so after each request. should_disconnect_on_close = True def test_location_multiple_servers(self): locations = [ ['server1.tld', 'server2:11211'], 'server1.tld;server2:11211', 'server1.tld,server2:11211', ] for location in locations: params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location} with self.settings(CACHES={'default': params}): self.assertEqual(cache._servers, ['server1.tld', 'server2:11211']) def test_invalid_key_characters(self): """ On memcached, we don't introduce a duplicate key validation step (for speed reasons), we just let the memcached API library raise its own exception on bad keys. Refs #6447. In order to be memcached-API-library agnostic, we only assert that a generic exception of some kind is raised. """ # memcached does not allow whitespace or control characters in keys # when using the ascii protocol. with self.assertRaises(Exception): cache.set('key with spaces', 'value') def test_invalid_key_length(self): # memcached limits key length to 250 with self.assertRaises(Exception): cache.set('a' * 251, 'value') def test_default_never_expiring_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, TIMEOUT=None)): cache.set('infinite_foo', 'bar') self.assertEqual(cache.get('infinite_foo'), 'bar') def test_default_far_future_timeout(self): # Regression test for #22845 with self.settings(CACHES=caches_setting_for_tests( base=self.base_params, exclude=memcached_excluded_caches, # 60*60*24*365, 1 year TIMEOUT=31536000)): cache.set('future_foo', 'bar') self.assertEqual(cache.get('future_foo'), 'bar') def test_cull(self): # culling isn't implemented, memcached deals with it. pass def test_zero_cull(self): # culling isn't implemented, memcached deals with it. pass def test_memcached_deletes_key_on_failed_set(self): # By default memcached allows objects up to 1MB. For the cache_db session # backend to always use the current session, memcached needs to delete # the old key if it fails to set. # pylibmc doesn't seem to have SERVER_MAX_VALUE_LENGTH as far as I can # tell from a quick check of its source code. This is falling back to # the default value exposed by python-memcached on my system. max_value_length = getattr(cache._lib, 'SERVER_MAX_VALUE_LENGTH', 1048576) cache.set('small_value', 'a') self.assertEqual(cache.get('small_value'), 'a') large_value = 'a' * (max_value_length + 1) try: cache.set('small_value', large_value) except Exception: # Some clients (e.g. pylibmc) raise when the value is too large, # while others (e.g. python-memcached) intentionally return True # indicating success. This test is primarily checking that the key # was deleted, so the return/exception behavior for the set() # itself is not important. pass # small_value should be deleted, or set if configured to accept larger values value = cache.get('small_value') self.assertTrue(value is None or value == large_value) def test_close(self): # For clients that don't manage their connections properly, the # connection is closed when the request is complete. signals.request_finished.disconnect(close_old_connections) try: with mock.patch.object(cache._lib.Client, 'disconnect_all', autospec=True) as mock_disconnect: signals.request_finished.send(self.__class__) self.assertIs(mock_disconnect.called, self.should_disconnect_on_close) finally: signals.request_finished.connect(close_old_connections) @unittest.skipUnless(MemcachedCache_params, "MemcachedCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, )) class MemcachedCacheTests(BaseMemcachedTests, TestCase): base_params = MemcachedCache_params def test_memcached_uses_highest_pickle_version(self): # Regression test for #19810 for cache_key in settings.CACHES: self.assertEqual(caches[cache_key]._cache.pickleProtocol, pickle.HIGHEST_PROTOCOL) @override_settings(CACHES=caches_setting_for_tests( base=MemcachedCache_params, exclude=memcached_excluded_caches, OPTIONS={'server_max_value_length': 9999}, )) def test_memcached_options(self): self.assertEqual(cache._cache.server_max_value_length, 9999) @unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured") @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, )) class PyLibMCCacheTests(BaseMemcachedTests, TestCase): base_params = PyLibMCCache_params # libmemcached manages its own connections. should_disconnect_on_close = False # By default, pylibmc/libmemcached don't verify keys client-side and so # this test triggers a server-side bug that causes later tests to fail # (#19914). The `verify_keys` behavior option could be set to True (which # would avoid triggering the server-side bug), however this test would # still fail due to https://github.com/lericson/pylibmc/issues/219. @unittest.skip("triggers a memcached-server bug, causing subsequent tests to fail") def test_invalid_key_characters(self): pass @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={ 'binary': True, 'behaviors': {'tcp_nodelay': True}, }, )) def test_pylibmc_options(self): self.assertTrue(cache._cache.binary) self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) @override_settings(CACHES=caches_setting_for_tests( base=PyLibMCCache_params, exclude=memcached_excluded_caches, OPTIONS={'tcp_nodelay': True}, )) def test_pylibmc_legacy_options(self): deprecation_message = ( "Specifying pylibmc cache behaviors as a top-level property " "within `OPTIONS` is deprecated. Move `tcp_nodelay` into a dict named " "`behaviors` inside `OPTIONS` instead." ) with warnings.catch_warnings(record=True) as warns: warnings.simplefilter("always") self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True)) self.assertEqual(len(warns), 1) self.assertIsInstance(warns[0].message, RemovedInDjango21Warning) self.assertEqual(str(warns[0].message), deprecation_message) @override_settings(CACHES=caches_setting_for_tests( BACKEND='django.core.cache.backends.filebased.FileBasedCache', )) class FileBasedCacheTests(BaseCacheTests, TestCase): """ Specific test cases for the file-based cache. """ def setUp(self): super().setUp() self.dirname = tempfile.mkdtemp() # Caches location cannot be modified through override_settings / modify_settings, # hence settings are manipulated directly here and the setting_changed signal # is triggered manually. for cache_params in settings.CACHES.values(): cache_params.update({'LOCATION': self.dirname}) setting_changed.send(self.__class__, setting='CACHES', enter=False) def tearDown(self): super().tearDown() # Call parent first, as cache.clear() may recreate cache base directory shutil.rmtree(self.dirname) def test_ignores_non_cache_files(self): fname = os.path.join(self.dirname, 'not-a-cache-file') with open(fname, 'w'): os.utime(fname, None) cache.clear() self.assertTrue(os.path.exists(fname), 'Expected cache.clear to ignore non cache files') os.remove(fname) def test_clear_does_not_remove_cache_dir(self): cache.clear() self.assertTrue(os.path.exists(self.dirname), 'Expected cache.clear to keep the cache dir') def test_creates_cache_dir_if_nonexistent(self): os.rmdir(self.dirname) cache.set('foo', 'bar') os.path.exists(self.dirname) def test_get_ignores_enoent(self): cache.set('foo', 'bar') os.unlink(cache._key_to_file('foo')) # Returns the default instead of erroring. self.assertEqual(cache.get('foo', 'baz'), 'baz') def test_get_does_not_ignore_non_filenotfound_exceptions(self): with mock.patch('builtins.open', side_effect=IOError): with self.assertRaises(IOError): cache.get('foo') @override_settings(CACHES={ 'default': { 'BACKEND': 'cache.liberal_backend.CacheClass', }, }) class CustomCacheKeyValidationTests(SimpleTestCase): """ Tests for the ability to mixin a custom ``validate_key`` method to a custom cache backend that otherwise inherits from a builtin backend, and override the default key validation. Refs #6447. """ def test_custom_key_validation(self): # this key is both longer than 250 characters, and has spaces key = 'some key with spaces' * 15 val = 'a value' cache.set(key, val) self.assertEqual(cache.get(key), val) @override_settings( CACHES={ 'default': { 'BACKEND': 'cache.closeable_cache.CacheClass', } } ) class CacheClosingTests(SimpleTestCase): def test_close(self): self.assertFalse(cache.closed) signals.request_finished.send(self.__class__) self.assertTrue(cache.closed) DEFAULT_MEMORY_CACHES_SETTINGS = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake', } } NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS) NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None class DefaultNonExpiringCacheKeyTests(SimpleTestCase): """ Settings having Cache arguments with a TIMEOUT=None create Caches that will set non-expiring keys. """ def setUp(self): # The 5 minute (300 seconds) default expiration time for keys is # defined in the implementation of the initializer method of the # BaseCache type. self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout def tearDown(self): del(self.DEFAULT_TIMEOUT) def test_default_expiration_time_for_keys_is_5_minutes(self): """The default expiration time of a cache key is 5 minutes. This value is defined in django.core.cache.backends.base.BaseCache.__init__(). """ self.assertEqual(300, self.DEFAULT_TIMEOUT) def test_caches_with_unset_timeout_has_correct_default_timeout(self): """Caches that have the TIMEOUT parameter undefined in the default settings will use the default 5 minute timeout. """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self): """Memory caches that have the TIMEOUT parameter set to `None` in the default settings with have `None` as the default timeout. This means "no timeout". """ cache = caches[DEFAULT_CACHE_ALIAS] self.assertIsNone(cache.default_timeout) self.assertIsNone(cache.get_backend_timeout()) @override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS) def test_caches_with_unset_timeout_set_expiring_key(self): """Memory caches that have the TIMEOUT parameter unset will set cache keys having the default 5 minute timeout. """ key = "my-key" value = "my-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNotNone(cache._expire_info[cache_key]) @override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS) def test_caches_set_with_timeout_as_none_set_non_expiring_key(self): """Memory caches that have the TIMEOUT parameter set to `None` will set a non expiring key by default. """ key = "another-key" value = "another-value" cache = caches[DEFAULT_CACHE_ALIAS] cache.set(key, value) cache_key = cache.make_key(key) self.assertIsNone(cache._expire_info[cache_key]) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ALLOWED_HOSTS=['.example.com'], ) class CacheUtils(SimpleTestCase): """TestCase for django.utils.cache functions.""" def setUp(self): self.host = 'www.example.com' self.path = '/cache/test/' self.factory = RequestFactory(HTTP_HOST=self.host) def tearDown(self): cache.clear() def _get_request_cache(self, method='GET', query_string=None, update_cache=None): request = self._get_request(self.host, self.path, method, query_string=query_string) request._cache_update_cache = True if not update_cache else update_cache return request def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: response = HttpResponse() if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. key_prefix = 'localprefix' learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) response = HttpResponse() # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' 'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e' ) def test_cache_key_varies_by_url(self): """ get_cache_key keys differ by fully-qualified URL instead of path """ request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com') learn_cache_key(request1, HttpResponse()) request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com') learn_cache_key(request2, HttpResponse()) self.assertNotEqual(get_cache_key(request1), get_cache_key(request2)) def test_learn_cache_key(self): request = self.factory.head(self.path) response = HttpResponse() response['Vary'] = 'Pony' # Make sure that the Vary header is added to the key hash learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e' ) def test_patch_cache_control(self): tests = ( # Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts (None, {'private': True}, {'private'}), ('', {'private': True}, {'private'}), # Test whether private/public attributes are mutually exclusive ('private', {'private': True}, {'private'}), ('private', {'public': True}, {'public'}), ('public', {'public': True}, {'public'}), ('public', {'private': True}, {'private'}), ('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}), ('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}), ) cc_delim_re = re.compile(r'\s*,\s*') for initial_cc, newheaders, expected_cc in tests: response = HttpResponse() if initial_cc is not None: response['Cache-Control'] = initial_cc patch_cache_control(response, **newheaders) parts = set(cc_delim_re.split(response['Cache-Control'])) self.assertEqual(parts, expected_cc) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix', }, }, ) class PrefixedCacheUtils(CacheUtils): pass @override_settings( CACHE_MIDDLEWARE_SECONDS=60, CACHE_MIDDLEWARE_KEY_PREFIX='test', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, ) class CacheHEADTest(SimpleTestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def _set_cache(self, request, msg): response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) def test_head_caches_correctly(self): test_content = 'test content' request = self.factory.head(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) def test_head_with_cached_get(self): test_content = 'test content' request = self.factory.get(self.path) request._cache_update_cache = True self._set_cache(request, test_content) request = self.factory.head(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNotNone(get_cache_data) self.assertEqual(test_content.encode(), get_cache_data.content) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, LANGUAGES=[ ('en', 'English'), ('es', 'Spanish'), ], ) class CacheI18nTest(TestCase): def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") key2 = get_cache_key(request) self.assertEqual(key, key2) def check_accept_language_vary(self, accept_language, vary, reference_key): request = self.factory.get(self.path) request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = vary key = learn_cache_key(request, response) key2 = get_cache_key(request) self.assertEqual(key, reference_key) self.assertEqual(key2, reference_key) @override_settings(USE_I18N=True, USE_L10N=False, USE_TZ=False) def test_cache_key_i18n_translation_accept_language(self): lang = translation.get_language() self.assertEqual(lang, 'en') request = self.factory.get(self.path) request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0' response = HttpResponse() response['Vary'] = 'accept-encoding' key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when translation is active") self.check_accept_language_vary( 'en-us', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'en-US', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'en-US,en;q=0.8', 'accept-encoding, accept-language, cookie', key ) self.check_accept_language_vary( 'en-US,en;q=0.8,ko;q=0.6', 'accept-language, cookie, accept-encoding', key ) self.check_accept_language_vary( 'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ', 'accept-encoding, cookie, accept-language', key ) self.check_accept_language_vary( 'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4', 'accept-language, accept-encoding, cookie', key ) self.check_accept_language_vary( 'ko;q=1.0,en;q=0.5', 'cookie, accept-language, accept-encoding', key ) self.check_accept_language_vary( 'ko, en', 'cookie, accept-encoding, accept-language', key ) self.check_accept_language_vary( 'ko-KR, en-US', 'accept-encoding, accept-language, cookie', key ) @override_settings(USE_I18N=False, USE_L10N=True, USE_TZ=False) def test_cache_key_i18n_formatting(self): request = self.factory.get(self.path) lang = translation.get_language() response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(lang, key, "Cache keys should include the language name when formatting is active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_i18n_timezone(self): request = self.factory.get(self.path) # This is tightly coupled to the implementation, # but it's the most straightforward way to test the key. tz = timezone.get_current_timezone_name() tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_') response = HttpResponse() key = learn_cache_key(request, response) self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active") key2 = get_cache_key(request) self.assertEqual(key, key2) @override_settings(USE_I18N=False, USE_L10N=False) def test_cache_key_no_i18n(self): request = self.factory.get(self.path) lang = translation.get_language() tz = timezone.get_current_timezone_name() tz = tz.encode('ascii', 'ignore').decode('ascii').replace(' ', '_') response = HttpResponse() key = learn_cache_key(request, response) self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active") self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active") @override_settings(USE_I18N=False, USE_L10N=False, USE_TZ=True) def test_cache_key_with_non_ascii_tzname(self): # Timezone-dependent cache keys should use ASCII characters only # (#17476). The implementation here is a bit odd (timezone.utc is an # instance, not a class), but it simulates the correct conditions. class CustomTzName(timezone.utc): pass request = self.factory.get(self.path) response = HttpResponse() with timezone.override(CustomTzName): CustomTzName.zone = 'Hora estándar de Argentina'.encode('UTF-8') # UTF-8 string sanitized_name = 'Hora_estndar_de_Argentina' self.assertIn( sanitized_name, learn_cache_key(request, response), "Cache keys should include the time zone name when time zones are active" ) CustomTzName.name = 'Hora estándar de Argentina' # unicode sanitized_name = 'Hora_estndar_de_Argentina' self.assertIn( sanitized_name, learn_cache_key(request, response), "Cache keys should include the time zone name when time zones are active" ) @ignore_warnings(category=RemovedInDjango21Warning) # USE_ETAGS=True @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_ETAGS=True, USE_I18N=True, ) def test_middleware(self): def set_cache(request, lang, msg): translation.activate(lang) response = HttpResponse() response.content = msg return UpdateCacheMiddleware().process_response(request, response) # cache with non empty request.GET request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) # first access, cache must return None self.assertIsNone(get_cache_data) response = HttpResponse() content = 'Check for cache with QUERY_STRING' response.content = content UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) # cache must return content self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, content.encode()) # different QUERY_STRING, cache must be empty request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'}) request._cache_update_cache = True get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) # i18n tests en_message = "Hello world!" es_message = "Hola mundo!" request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) # The cache can be recovered self.assertIsNotNone(get_cache_data) self.assertEqual(get_cache_data.content, en_message.encode()) # ETags are used. self.assertTrue(get_cache_data.has_header('ETag')) # ETags can be disabled. with self.settings(USE_ETAGS=False): request._cache_update_cache = True set_cache(request, 'en', en_message) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertFalse(get_cache_data.has_header('ETag')) # change the session language and set content request = self.factory.get(self.path) request._cache_update_cache = True set_cache(request, 'es', es_message) # change again the language translation.activate('en') # retrieve the content from cache get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, en_message.encode()) # change again the language translation.activate('es') get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertEqual(get_cache_data.content, es_message.encode()) # reset the language translation.deactivate() @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX="test", CACHE_MIDDLEWARE_SECONDS=60, USE_ETAGS=True, ) def test_middleware_doesnt_cache_streaming_response(self): request = self.factory.get(self.path) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) content = ['Check for cache with streaming content.'] response = StreamingHttpResponse(content) UpdateCacheMiddleware().process_response(request, response) get_cache_data = FetchFromCacheMiddleware().process_request(request) self.assertIsNone(get_cache_data) @override_settings( CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'KEY_PREFIX': 'cacheprefix' }, }, ) class PrefixedCacheI18nTest(CacheI18nTest): pass def hello_world_view(request, value): return HttpResponse('Hello World %s' % value) def csrf_view(request): return HttpResponse(csrf(request)['csrf_token']) @override_settings( CACHE_MIDDLEWARE_ALIAS='other', CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix', CACHE_MIDDLEWARE_SECONDS=30, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, 'other': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'other', 'TIMEOUT': '1', }, }, ) class CacheMiddlewareTest(SimpleTestCase): def setUp(self): super().setUp() self.factory = RequestFactory() self.default_cache = caches['default'] self.other_cache = caches['other'] def tearDown(self): self.default_cache.clear() self.other_cache.clear() super().tearDown() def test_constructor(self): """ Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as Middleware vs. usage of CacheMiddleware as view decorator and setting attributes appropriately. """ # If no arguments are passed in construction, it's being used as middleware. middleware = CacheMiddleware() # Now test object attributes against values defined in setUp above self.assertEqual(middleware.cache_timeout, 30) self.assertEqual(middleware.key_prefix, 'middlewareprefix') self.assertEqual(middleware.cache_alias, 'other') # If arguments are being passed in construction, it's being used as a decorator. # First, test with "defaults": as_view_decorator = CacheMiddleware(cache_alias=None, key_prefix=None) self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30 self.assertEqual(as_view_decorator.key_prefix, '') # Value of DEFAULT_CACHE_ALIAS from django.core.cache self.assertEqual(as_view_decorator.cache_alias, 'default') # Next, test with custom values: as_view_decorator_with_custom = CacheMiddleware(cache_timeout=60, cache_alias='other', key_prefix='foo') self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60) self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo') self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other') def test_middleware(self): middleware = CacheMiddleware() prefix_middleware = CacheMiddleware(key_prefix='prefix1') timeout_middleware = CacheMiddleware(cache_timeout=1) request = self.factory.get('/view/') # Put the request through the request middleware result = middleware.process_request(request) self.assertIsNone(result) response = hello_world_view(request, '1') # Now put the response through the response middleware response = middleware.process_response(request, response) # Repeating the request should result in a cache hit result = middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') # The same request through a different middleware won't hit result = prefix_middleware.process_request(request) self.assertIsNone(result) # The same request with a timeout _will_ hit result = timeout_middleware.process_request(request) self.assertIsNotNone(result) self.assertEqual(result.content, b'Hello World 1') def test_view_decorator(self): # decorate the same view with different cache decorators default_view = cache_page(3)(hello_world_view) default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view) explicit_default_view = cache_page(3, cache='default')(hello_world_view) explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view) other_view = cache_page(1, cache='other')(hello_world_view) other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view) request = self.factory.get('/view/') # Request the view once response = default_view(request, '1') self.assertEqual(response.content, b'Hello World 1') # Request again -- hit the cache response = default_view(request, '2') self.assertEqual(response.content, b'Hello World 1') # Requesting the same view with the explicit cache should yield the same result response = explicit_default_view(request, '3') self.assertEqual(response.content, b'Hello World 1') # Requesting with a prefix will hit a different cache key response = explicit_default_with_prefix_view(request, '4') self.assertEqual(response.content, b'Hello World 4') # Hitting the same view again gives a cache hit response = explicit_default_with_prefix_view(request, '5') self.assertEqual(response.content, b'Hello World 4') # And going back to the implicit cache will hit the same cache response = default_with_prefix_view(request, '6') self.assertEqual(response.content, b'Hello World 4') # Requesting from an alternate cache won't hit cache response = other_view(request, '7') self.assertEqual(response.content, b'Hello World 7') # But a repeated hit will hit cache response = other_view(request, '8') self.assertEqual(response.content, b'Hello World 7') # And prefixing the alternate cache yields yet another cache entry response = other_with_prefix_view(request, '9') self.assertEqual(response.content, b'Hello World 9') # But if we wait a couple of seconds... time.sleep(2) # ... the default cache will still hit caches['default'] response = default_view(request, '11') self.assertEqual(response.content, b'Hello World 1') # ... the default cache with a prefix will still hit response = default_with_prefix_view(request, '12') self.assertEqual(response.content, b'Hello World 4') # ... the explicit default cache will still hit response = explicit_default_view(request, '13') self.assertEqual(response.content, b'Hello World 1') # ... the explicit default cache with a prefix will still hit response = explicit_default_with_prefix_view(request, '14') self.assertEqual(response.content, b'Hello World 4') # .. but a rapidly expiring cache won't hit response = other_view(request, '15') self.assertEqual(response.content, b'Hello World 15') # .. even if it has a prefix response = other_with_prefix_view(request, '16') self.assertEqual(response.content, b'Hello World 16') def test_sensitive_cookie_not_cached(self): """ Django must prevent caching of responses that set a user-specific (and maybe security sensitive) cookie in response to a cookie-less request. """ csrf_middleware = CsrfViewMiddleware() cache_middleware = CacheMiddleware() request = self.factory.get('/view/') self.assertIsNone(cache_middleware.process_request(request)) csrf_middleware.process_view(request, csrf_view, (), {}) response = csrf_view(request) response = csrf_middleware.process_response(request, response) response = cache_middleware.process_response(request, response) # Inserting a CSRF cookie in a cookie-less request prevented caching. self.assertIsNone(cache_middleware.process_request(request)) def test_304_response_has_http_caching_headers_but_not_cached(self): original_view = mock.Mock(return_value=HttpResponseNotModified()) view = cache_page(2)(original_view) request = self.factory.get('/view/') # The view shouldn't be cached on the second call. view(request).close() response = view(request) response.close() self.assertEqual(original_view.call_count, 2) self.assertIsInstance(response, HttpResponseNotModified) self.assertIn('Cache-Control', response) self.assertIn('Expires', response) @override_settings( CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix', CACHE_MIDDLEWARE_SECONDS=1, CACHES={ 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', }, }, USE_I18N=False, ) class TestWithTemplateResponse(SimpleTestCase): """ Tests various headers w/ TemplateResponse. Most are probably redundant since they manipulate the same object anyway but the ETag header is 'special' because it relies on the content being complete (which is not necessarily always the case with a TemplateResponse) """ def setUp(self): self.path = '/cache/test/' self.factory = RequestFactory() def tearDown(self): cache.clear() def test_patch_vary_headers(self): headers = ( # Initial vary, new headers, resulting vary. (None, ('Accept-Encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'), ('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'), ('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), (None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'), ('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'), ) for initial_vary, newheaders, resulting_vary in headers: template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) if initial_vary is not None: response['Vary'] = initial_vary patch_vary_headers(response, newheaders) self.assertEqual(response['Vary'], resulting_vary) def test_get_cache_key(self): request = self.factory.get(self.path) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) key_prefix = 'localprefix' # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) # A specified key_prefix is taken into account. learn_cache_key(request, response, key_prefix=key_prefix) self.assertEqual( get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.GET.' '58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e' ) def test_get_cache_key_with_query(self): request = self.factory.get(self.path, {'test': 1}) template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) # Expect None if no headers have been set yet. self.assertIsNone(get_cache_key(request)) # Set headers to an empty list. learn_cache_key(request, response) # The querystring is taken into account. self.assertEqual( get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.GET.' '0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e' ) @override_settings(USE_ETAGS=False) def test_without_etag(self): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) self.assertFalse(response.has_header('ETag')) patch_response_headers(response) self.assertFalse(response.has_header('ETag')) response = response.render() self.assertFalse(response.has_header('ETag')) @ignore_warnings(category=RemovedInDjango21Warning) @override_settings(USE_ETAGS=True) def test_with_etag(self): template = engines['django'].from_string("This is a test") response = TemplateResponse(HttpRequest(), template) self.assertFalse(response.has_header('ETag')) patch_response_headers(response) self.assertFalse(response.has_header('ETag')) response = response.render() self.assertTrue(response.has_header('ETag')) class TestMakeTemplateFragmentKey(SimpleTestCase): def test_without_vary_on(self): key = make_template_fragment_key('a.fragment') self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e') def test_with_one_vary_on(self): key = make_template_fragment_key('foo', ['abc']) self.assertEqual(key, 'template.cache.foo.900150983cd24fb0d6963f7d28e17f72') def test_with_many_vary_on(self): key = make_template_fragment_key('bar', ['abc', 'def']) self.assertEqual(key, 'template.cache.bar.4b35f12ab03cec09beec4c21b2d2fa88') def test_proper_escaping(self): key = make_template_fragment_key('spam', ['abc:def%']) self.assertEqual(key, 'template.cache.spam.f27688177baec990cdf3fbd9d9c3f469') class CacheHandlerTest(SimpleTestCase): def test_same_instance(self): """ Attempting to retrieve the same alias should yield the same instance. """ cache1 = caches['default'] cache2 = caches['default'] self.assertIs(cache1, cache2) def test_per_thread(self): """ Requesting the same alias from separate threads should yield separate instances. """ c = [] def runner(): c.append(caches['default']) for x in range(2): t = threading.Thread(target=runner) t.start() t.join() self.assertIsNot(c[0], c[1])
{ "content_hash": "f1ec90497134da5726475269ce2cdbf4", "timestamp": "", "source": "github", "line_count": 2290, "max_line_length": 115, "avg_line_length": 39.76681222707423, "alnum_prop": 0.628071947818066, "repo_name": "camilonova/django", "id": "13023a29ae360a5b6eb5f5ec9504fb86e7edaf45", "size": "91211", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "tests/cache/tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "55935" }, { "name": "HTML", "bytes": "182943" }, { "name": "JavaScript", "bytes": "252645" }, { "name": "Makefile", "bytes": "125" }, { "name": "Python", "bytes": "11830666" }, { "name": "Shell", "bytes": "809" }, { "name": "Smarty", "bytes": "130" } ], "symlink_target": "" }
import Confidential message = Confidential('top secret text') secret_field = Confidential.getDeclaredField('secret') secret_field.setAccessible(True) # break the lock! print 'message.secret =', secret_field.get(message)
{ "content_hash": "74c19593539d32459104b854377f6b2a", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 54, "avg_line_length": 37, "alnum_prop": 0.7837837837837838, "repo_name": "Wallace-dyfq/example-code", "id": "1df710c39026038fe44a21267ddfc0fe01e5a6c9", "size": "222", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "09-pythonic-obj/private/expose.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "5651" }, { "name": "Java", "bytes": "3443" }, { "name": "JavaScript", "bytes": "323" }, { "name": "Python", "bytes": "539185" }, { "name": "Shell", "bytes": "946" } ], "symlink_target": "" }
from unittest import TestCase from mock import Mock from qrl.core import config from qrl.core.Indexer import Indexer from qrl.core.misc import logger from qrl.core.State import State from qrl.core.StateContainer import StateContainer from qrl.core.OptimizedAddressState import OptimizedAddressState from qrl.core.ChainManager import ChainManager from qrl.core.txs.SlaveTransaction import SlaveTransaction from qrl.generated.qrl_pb2 import SlaveMetadata from tests.misc.helper import get_alice_xmss, get_slave_xmss, set_qrl_dir logger.initialize_default() class TestSlaveTransactionStateChanges(TestCase): def setUp(self): with set_qrl_dir('no_data'): self.state = State() self.alice = get_alice_xmss() self.slave = get_slave_xmss() alice_address_state = OptimizedAddressState.get_default(self.alice.address) alice_address_state.pbdata.balance = 100 self.addresses_state = { self.alice.address: alice_address_state, self.slave.address: OptimizedAddressState.get_default(self.slave.address) } self.params = { "slave_pks": [self.slave.pk], "access_types": [0], "fee": 1, "xmss_pk": self.alice.pk } self.unused_chain_manager_mock = Mock(autospec=ChainManager, name='unused ChainManager') def test_apply_slave_txn(self): tx = SlaveTransaction.create(**self.params) tx.sign(self.alice) addresses_state = dict(self.addresses_state) state_container = StateContainer(addresses_state=addresses_state, tokens=Indexer(b'token', None), slaves=Indexer(b'slave', None), lattice_pk=Indexer(b'lattice_pk', None), multi_sig_spend_txs=dict(), votes_stats=dict(), block_number=1, total_coin_supply=100, current_dev_config=config.dev, write_access=True, my_db=self.state._db, batch=None) tx.apply(self.state, state_container) self.assertEqual(addresses_state[self.alice.address].balance, 99) storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1) self.assertIn(storage_key, state_container.paginated_tx_hash.key_value) self.assertEqual([tx.txhash], state_container.paginated_tx_hash.key_value[storage_key]) self.assertIn((tx.addr_from, tx.slave_pks[0]), state_container.slaves.data) data = state_container.slaves.data[(tx.addr_from, tx.slave_pks[0])] self.assertIsInstance(data, SlaveMetadata) self.assertEqual(tx.access_types[0], data.access_type) self.assertEqual(tx.txhash, data.tx_hash) def test_revert_slave_txn(self): tx = SlaveTransaction.create(**self.params) tx.sign(self.alice) addresses_state = dict(self.addresses_state) addresses_state[self.alice.address].pbdata.balance = 100 state_container = StateContainer(addresses_state=addresses_state, tokens=Indexer(b'token', None), slaves=Indexer(b'slave', None), lattice_pk=Indexer(b'lattice_pk', None), multi_sig_spend_txs=dict(), votes_stats=dict(), block_number=1, total_coin_supply=100, current_dev_config=config.dev, write_access=True, my_db=self.state._db, batch=None) tx.apply(self.state, state_container) tx.revert(self.state, state_container) self.assertEqual(addresses_state[self.alice.address].balance, 100) storage_key = state_container.paginated_tx_hash.generate_key(self.alice.address, 1) self.assertIn(storage_key, state_container.paginated_tx_hash.key_value) self.assertEqual([], state_container.paginated_tx_hash.key_value[storage_key]) self.assertIn((tx.addr_from, tx.slave_pks[0]), state_container.slaves.data) data = state_container.slaves.data[(tx.addr_from, tx.slave_pks[0])] self.assertIsInstance(data, SlaveMetadata) self.assertEqual(tx.access_types[0], data.access_type) self.assertEqual(tx.txhash, data.tx_hash)
{ "content_hash": "ac3a0ce7c0729bae63a1e94a0d8f9d9e", "timestamp": "", "source": "github", "line_count": 100, "max_line_length": 96, "avg_line_length": 48.35, "alnum_prop": 0.5698035160289555, "repo_name": "theQRL/QRL", "id": "45eaf91b152ca7cb180fef960e691218aa67557a", "size": "4835", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/core/txs/test_SlaveTransactionStateChanges.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "185833" }, { "name": "Python", "bytes": "1938166" }, { "name": "Shell", "bytes": "2126" } ], "symlink_target": "" }
import mock import unittest import os from kfp_component.google.dataflow import launch_python MODULE = 'kfp_component.google.dataflow._launch_python' @mock.patch(MODULE + '.storage') @mock.patch('kfp_component.google.dataflow._common_ops.display') @mock.patch(MODULE + '.stage_file') @mock.patch(MODULE + '.KfpExecutionContext') @mock.patch(MODULE + '.DataflowClient') @mock.patch(MODULE + '.Process') @mock.patch(MODULE + '.subprocess') class LaunchPythonTest(unittest.TestCase): def test_launch_python_succeed(self, mock_subprocess, mock_process, mock_client, mock_context, mock_stage_file, mock_display, mock_storage): mock_context().__enter__().context_id.return_value = 'ctx-1' mock_storage.Client().bucket().blob().exists.return_value = False mock_process().read_lines.return_value = [ b'https://console.cloud.google.com/dataflow/jobs/us-central1/job-1?project=project-1' ] expected_job = { 'id': 'job-1', 'currentState': 'JOB_STATE_DONE' } mock_client().get_job.return_value = expected_job result = launch_python('/tmp/test.py', 'project-1', 'us-central1', staging_dir='gs://staging/dir') self.assertEqual(expected_job, result) mock_storage.Client().bucket().blob().upload_from_string.assert_called_with( 'job-1,us-central1' ) def test_launch_python_retry_succeed(self, mock_subprocess, mock_process, mock_client, mock_context, mock_stage_file, mock_display, mock_storage): mock_context().__enter__().context_id.return_value = 'ctx-1' mock_storage.Client().bucket().blob().exists.return_value = True mock_storage.Client().bucket().blob().download_as_bytes.return_value = b'job-1,us-central1' expected_job = { 'id': 'job-1', 'currentState': 'JOB_STATE_DONE' } mock_client().get_job.return_value = expected_job result = launch_python('/tmp/test.py', 'project-1', 'us-central1', staging_dir='gs://staging/dir') self.assertEqual(expected_job, result) mock_process.assert_not_called() def test_launch_python_no_job_created(self, mock_subprocess, mock_process, mock_client, mock_context, mock_stage_file, mock_display, mock_storage): mock_context().__enter__().context_id.return_value = 'ctx-1' mock_process().read_lines.return_value = [ b'no job id', b'no job id' ] result = launch_python('/tmp/test.py', 'project-1', 'us-central1') self.assertEqual(None, result)
{ "content_hash": "3f0023ec1b2f3dfe03eb0580515831f6", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 106, "avg_line_length": 40.78125, "alnum_prop": 0.6386973180076628, "repo_name": "kubeflow/pipelines", "id": "e755b5c89a977e99bef23b2c5d4d5956e88b09ff", "size": "3196", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "components/gcp/container/component_sdk/python/tests/google/dataflow/test__launch_python.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "799" }, { "name": "CSS", "bytes": "2171" }, { "name": "Dockerfile", "bytes": "49331" }, { "name": "Go", "bytes": "1903937" }, { "name": "HTML", "bytes": "3656" }, { "name": "JavaScript", "bytes": "544297" }, { "name": "Jinja", "bytes": "938" }, { "name": "Jupyter Notebook", "bytes": "359548" }, { "name": "Makefile", "bytes": "22164" }, { "name": "Mustache", "bytes": "23652" }, { "name": "PowerShell", "bytes": "3194" }, { "name": "Python", "bytes": "5684887" }, { "name": "Shell", "bytes": "264595" }, { "name": "Smarty", "bytes": "8295" }, { "name": "Starlark", "bytes": "553" }, { "name": "TypeScript", "bytes": "4294958" } ], "symlink_target": "" }
import ast # Tighten your belts... from collections import namedtuple from ..util import pypy, iterate, chunk as chunk_, Line, ensure_buffer Pair = namedtuple('Pair', ('prefix', 'suffix')) WrapFormat = namedtuple('WrapFormat', ('single', 'multiple', 'intra', 'indent')) BARE_FORMAT = WrapFormat(Pair('', ''), Pair('', ''), Pair('', ''), 0) UNBUFFERED_FORMAT = WrapFormat(Pair('yield ', ''), Pair('yield ', ''), Pair('yield ', ''), 0) if pypy: BUFFERED_FORMAT = WrapFormat(Pair('_buffer.append(', ')'), Pair('_buffer.extend((', '))'), Pair('', ','), 1) else: BUFFERED_FORMAT = WrapFormat(Pair('__ws(', ')'), Pair('__w((', '))'), Pair('', ','), 1) class Text(object): """Identify and process contiguous blocks of template text.""" UNBUFFERED = UNBUFFERED_FORMAT BUFFERED = BUFFERED_FORMAT priority = -25 def match(self, context, line): """Identify if a line to be processed can be processed by this transformer.""" return line.kind == 'text' # This is common enough to short-circuit. @staticmethod def wrap(scope, lines, format=BARE_FORMAT): """Wrap a stream of lines in armour. Takes a stream of lines, for example, the following single line: Line(1, "Lorem ipsum dolor.") Or the following multiple lines: Line(1, "Lorem ipsum") Line(2, "dolor") Line(3, "sit amet.") Provides a generator of wrapped lines. For a single line, the following format is utilized: {format.single.prefix}{line.stripped}{format.single.suffix} In the above multi-line example, the following format would be utilized: {format.multiple.prefix}{line[1].stripped}{format.intra.suffix} {format.intra.prefix}{line[2].stripped}{format.intra.suffix} {format.intra.prefix}{line[3].stripped}{format.multiple.suffix} """ for line in iterate(lines): prefix = suffix = '' if line.first and line.last: prefix = format.single.prefix suffix = format.single.suffix else: prefix = format.multiple.prefix if line.first else format.intra.prefix suffix = format.multiple.suffix if line.last else format.intra.suffix yield line.value.clone(line=prefix + line.value.stripped + suffix, scope=scope + (0 if line.first else format.indent)) @staticmethod def gather(input): """Collect contiguous lines of text, preserving line numbers.""" try: line = input.next() except StopIteration: return lead = True buffer = [] # Gather contiguous (uninterrupted) lines of template text. while line.kind == 'text': value = line.line.rstrip().rstrip('\\') + ('' if line.continued else '\n') if lead and line.stripped: yield Line(line.number, value) lead = False elif not lead: if line.stripped: for buf in buffer: yield buf buffer = [] yield Line(line.number, value) else: buffer.append(Line(line.number, value)) try: line = input.next() except StopIteration: line = None break if line: input.push(line) # Put the last line back, as it won't be a text line. def process(self, context, lines): """Chop up individual lines into static and dynamic parts. Applies light optimizations, such as empty chunk removal, and calls out to other methods to process different chunk types. The processor protocol here requires the method to accept values by yielding resulting lines while accepting sent chunks. Deferral of multiple chunks is possible by yielding None. The processor will be sent None to be given a chance to yield a final line and perform any clean-up. """ handler = None for line in lines: for chunk in chunk_(line): if 'strip' in context.flag: chunk.line = chunk.stripped if not chunk.line: continue # Eliminate empty chunks, i.e. trailing text segments, ${}, etc. if not handler or handler[0] != chunk.kind: if handler: try: result = next(handler[1]) except StopIteration: result = None if result: yield result handler = getattr(self, 'process_' + chunk.kind, self.process_generic)(chunk.kind, context) handler = (chunk.kind, handler) try: next(handler[1]) # We fast-forward to the first yield. except StopIteration: return result = handler[1].send(chunk) # Send the handler the next contiguous chunk. if result: yield result if __debug__: # In development mode we skip the contiguous chunk compaction optimization. handler = (None, handler[1]) # Clean up the final iteration. if handler: try: result = next(handler[1]) except StopIteration: return if result: yield result def process_text(self, kind, context): """Combine multiple lines of bare text and emit as a Python string literal.""" result = None while True: chunk = yield None if chunk is None: if result: yield result.clone(line=repr(result.line)) return if not result: result = chunk continue result.line += chunk.line # Append contiguous lines together. # TODO: Preserve line number range(). def process_generic(self, kind, context): """Transform otherwise unhandled kinds of chunks by calling an underscore prefixed function by that name.""" result = None while True: chunk = yield result if chunk is None: return result = chunk.clone(line='_' + kind + '(' + chunk.line + ')') def process_format(self, kind, context): """Handle transforming format string + arguments into Python code.""" result = None while True: chunk = yield result if chunk is None: return # We need to split the expression defining the format string from the values to pass when formatting. # We want to allow any Python expression, so we'll need to piggyback on Python's own parser in order # to exploit the currently available syntax. Apologies, this is probably the scariest thing in here. split = -1 line = chunk.line try: ast.parse(line) except SyntaxError as e: # We expect this, and catch it. It'll have exploded after the first expr. split = line.rfind(' ', 0, e.offset) result = chunk.clone(line='_bless(' + line[:split].rstrip() + ').format(' + line[split:].lstrip() + ')') def __call__(self, context): # Make sure we have a buffer to write to, if we're operating in buffered mode. for i in ensure_buffer(context): yield i dirty = False lines = self.gather(context.input) lines = self.process(context, lines) lines = self.wrap(context.scope, lines, self.BUFFERED if 'buffer' in context.flag else self.UNBUFFERED) # Armour the lines as appropriate and emit them as generated. for line in lines: dirty = True yield line if dirty and 'text' in context.flag and 'dirty' not in context.flag: context.flag.add('dirty')
{ "content_hash": "5a4ca0ac25e99780d2f3301d433798f5", "timestamp": "", "source": "github", "line_count": 237, "max_line_length": 121, "avg_line_length": 29.126582278481013, "alnum_prop": 0.6604374909459655, "repo_name": "marrow/cinje", "id": "074e18d57395f59022c6e6674675a78ba2d71e17", "size": "6922", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "cinje/inline/text.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "878" }, { "name": "Python", "bytes": "84510" } ], "symlink_target": "" }
""" Created on Thu Jan 4 20:54:24 2018 @author: Stefan """ import numpy as np import matplotlib.pyplot as plt from Hashable import Hashable # from TrainingDataFromSgf import TrainingDataSgfPass import os import time import random import sqlite3 from Filters import apply_filters_by_id import collections import io # neccessarry to store arrays in database (from stackOverFlow) from TrainingDataFromSgf import TrainingDataSgfPass def adapt_array(arr): out = io.BytesIO() np.save(out, arr) out.seek(0) return sqlite3.Binary(out.read()) def convert_array(text): out = io.BytesIO(text) out.seek(0) return np.load(out) # Converts np.array to TEXT when inserting sqlite3.register_adapter(np.ndarray, adapt_array) # Converts TEXT to np.array when selecting sqlite3.register_converter("array", convert_array) def softmax(x): """Compute softmax values for each sets of scores in x.""" e_x = np.exp(x - np.max(x)) return e_x / e_x.sum() def relu(x): x[x < 0] = 0 return x class PolicyNet: def __init__(self, layers=[9*9, 1000, 100, 9*9+1], activation_function=0, filter_ids=[6,7,8]): # Specifications of the game self.n = 9 # 9x9 board self.filter_ids = filter_ids self.filtercount = len(filter_ids) layers[0] += self.filtercount*self.n*self.n # Parameters of the NN self.layers = layers # please leave the first and last equal zu n^2 for now self.activation_function = activation_function # Initialize the weights self.layercount = len(self.layers)-1 self.init_weights() # Momentum will be attached directly to the Neural Network self.momentum = [0]*self.layercount self.momentum_active = False if self.activation_function is 0: mu = 0 self.weights = [0]*self.layercount # alloc memory for i in range(0, self.layercount): sigma = 1/np.sqrt(self.layers[i+1]) self.weights[i] = np.random.normal(mu, sigma, (self.layers[i+1], self.layers[i]+1)) # the +1 in the input dimension is for the bias elif self.activation_function is 1: mu = 0 self.layercount = len(self.layers)-1 self.weights = [0]*self.layercount # alloc memory for i in range(0, self.layercount): sigma = np.sqrt(2)/np.sqrt(self.layers[i+1]) self.weights[i] = np.random.normal(mu, sigma, (self.layers[i+1], self.layers[i]+1)) # the +1 in the input dimension is for the bias def apply_filters(self, board, color=-1): filtered = apply_filters_by_id(board, color, self.filter_ids) return filtered # Function Definition yard # error functions # error fct Number 0 def compute_kl_divergence(self, suggested, target): # Compute Kullback-Leibler divergence, stabilized version t = target[target != 0] # ->we'd divide by 0 else, does not have inpact on error anyway s = suggested[target != 0] difference = s / t # this is stable error = - np.inner(t*np.log(difference), np.ones(len(t))) return error # error fct Number 1 def compute_ms_error(self, suggested, target): # Returns the total mean square error difference = np.absolute(suggested - target) error = 0.5 * np.inner(difference, difference) return error # error fct Number 2 def compute_hellinger_dist(self, suggested, target): return np.linalg.norm(np.sqrt(suggested) - np.sqrt(target), ord=2) / np.sqrt(2) # error fct Number 3 def compute_cross_entropy(self, suggested, target): return self.compute_entropy(target) + self.compute_kl_divergence(target, suggested) # error fct Number x, actually not a good one. Only for statistics. def compute_abs_error(self, suggested, target): # compare the prediction with the answer/target, absolute error difference = np.absolute(suggested - target) error = np.inner(difference, np.ones(len(target))) return error # Auxilary function for cross-entropy def compute_entropy(self, distribution): return -np.inner(distribution, np.log(distribution)) # Auxilary and Utilary Functions def compute_error(self, suggested, target, error_function): if error_function == 0: return self.compute_kl_divergence(suggested, target) elif error_function == 1: return self.compute_ms_error(suggested, target) elif error_function == 2: return self.compute_hellinger_dist(suggested, target) elif error_function == 3: return self.compute_cross_entropy(suggested, target) def init_weights(self): # Xavier Initialization if self.activation_function is 0: # TODO wie im last layer haben wir softmax! wie initialisieren??? mu = 0 self.weights = [0] * self.layercount # alloc memory for i in range(0, self.layercount): sigma = 1 / np.sqrt(self.layers[i + 1]) self.weights[i] = np.random.normal(mu, sigma, (self.layers[i + 1], self.layers[i] + 1)) # He Initialization elif self.activation_function is 1: mu = 0 self.weights = [0] * self.layercount # alloc memory for i in range(0, self.layercount): sigma = np.sqrt(2) / np.sqrt( self.layers[i + 1]) self.weights[i] = np.random.normal(mu, sigma, ( self.layers[i + 1], self.layers[i] + 1)) # edit: the +1 in the input dimension is for the bias def weight_ensemble(self, testset, instances=1, details=False): optimal_weights = self.weights optimal_value = self.PropagateSet(testset) first_value = optimal_value for i in range(instances): self.init_weights() weights = self.weights value = self.PropagateSet(testset) if value < optimal_value: optimal_value = value optimal_weights = weights improvement = first_value - optimal_value self.weights = optimal_weights if details: return improvement def convert_input(self, boardvector): # rescaling help function [-1,0,1]->[-1.35,0.45,1.05] boardvector = boardvector.astype(float) for i in range(0, len(boardvector)): if boardvector[i] == 0: boardvector[i] = 0.45 if boardvector[i] == -1: boardvector[i] = -1.35 if boardvector[i] == 1: boardvector[i] = 1.05 return boardvector def splitintobatches(self, trainingdata, batchsize): # splits trainingdata into batches of size batchsize N = len(trainingdata.dic) if batchsize > N: batchsize = N k = int(np.ceil(N/batchsize)) Batch_sets = [0]*k Batch_sets[0] = TrainingDataSgfPass() Batch_sets[0].dic = dict(list(trainingdata.dic.items())[:batchsize]) for i in range(k-1): Batch_sets[i] = TrainingDataSgfPass() Batch_sets[i].dic = dict(list(trainingdata.dic.items())[i*batchsize:(i+1)*batchsize]) Batch_sets[k-1] = TrainingDataSgfPass() Batch_sets[k-1].dic = dict(list(trainingdata.dic.items())[(k-1)*batchsize:N]) number_of_batchs = k return[number_of_batchs, Batch_sets] def gen_id_list_from_db(self, db_name, batchsize, sample_proportion, db_move=True): if not db_move: con = sqlite3.connect(r"DB/Dist/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) else: con = sqlite3.connect(r"DB/Move/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) cur = con.cursor() cur.execute("select count(*) from movedata") datasize = cur.fetchone()[0] con.close() dataprop = np.floor(float(datasize) * sample_proportion) number_of_batches = int(np.ceil(dataprop / batchsize)) id_set = set(range(int(datasize)) + np.ones(int(datasize), dtype='Int32')) batch_id_list = [0]*number_of_batches for i in range(number_of_batches): try: batch = set(random.sample(id_set, batchsize)) batch_id_list[i] = batch id_set -= batch except ValueError: batch_id_list[i] = id_set return [number_of_batches, batch_id_list] def extract_batches_from_id_list(self, number_of_batches, batch_id_list, db_name, db_move=True): # TODO 2 Beno: momentan: Konstruktion des batches-dict mithilfe der Id_list mit einzelnen db-Abfragen. # Optimierung: dict einmal generieren, danach nur shufflen. # id_list benutzen und neues dict aus altem dict auslesen? Was ist schneller? if not db_move: con = sqlite3.connect(r"DB/Dist/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) else: con = sqlite3.connect(r"DB/Move/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) cur = con.cursor() batches = collections.defaultdict() for i in range(len(batch_id_list)): batches[i] = collections.defaultdict() for key in batches.keys(): for j in batch_id_list[key]: cur.execute("select * from movedata where id = ?", (int(j),)) data = cur.fetchone() batches[key][int(j)] = data[1:] con.close() return [number_of_batches, batches] def gen_whole_set_from_id_list(self, batch_id_list, db_name, move_db=True): whole_set_id_list = [] for item in batch_id_list[0]: whole_set_id_list.append(item) if not move_db: con = sqlite3.connect(r"DB/Dist/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) else: con = sqlite3.connect(r"DB/Move/" + db_name, detect_types=sqlite3.PARSE_DECLTYPES) cur = con.cursor() dict = collections.defaultdict() j = 0 for i in whole_set_id_list: cur.execute("select * from movedata where id = ?", (int(i),)) data = cur.fetchone() dict[j] = data[1:] j += 1 return dict def saveweights(self, filename, folder='Saved_Weights'): dir_path = os.path.dirname(os.path.realpath(__file__)) file = dir_path + "/" + folder + "/" + filename np.savez(file, self.weights) def loadweightsfromfile(self, filename, folder='Saved_Weights', filter_ids=[0, 1, 2, 3, 4, 5, 6, 7]): # if file doesnt exist, do nothing dir_path = os.path.dirname(os.path.realpath(__file__)) file = dir_path + "/" + folder + "/" + filename if os.path.exists(file): with np.load(file) as data: self.filter_ids = filter_ids self.filtercount = len(filter_ids) self.weights = [] self.layer = [data['arr_0'][0].shape[1]] # there are n+1 layers if there are n weight matrices for i in range(len(data['arr_0'])): self.weights.append(data['arr_0'][i]) tempshape = data['arr_0'][i].shape self.layer.append(tempshape[0]) self.layercount = len(self.layer) - 1 elif os.path.exists(file + ".npz"): with np.load(file + ".npz") as data: self.weights = [] for i in range(len(data['arr_0'])): self.weights.append(data['arr_0'][i]) # The actual functions def learn(self, trainingdata, epochs=1, eta=0.001, batch_size=10, sample_proportion=1, error_function=0, db=False, db_name='none', adaptive_rule='logarithmic'): if adaptive_rule is "none": duplicate = True else: duplicate = False # TODO Stefan: if not db: # Dictionary Case [number_of_batches, batches] = self.splitintobatches(trainingdata, batch_size) else: id_list = self.gen_id_list_from_db(db_name, batch_size, sample_proportion, duplicate) errors_by_epoch = [] for epoch in range(0, epochs): print("current epoch: " + str(epoch)) errors_by_epoch.append(0) if db: [number_of_batches, batches] = self.extract_batches_from_id_list(number_of_batches, id_list, db_name) for i_batch in range(number_of_batches): batch = batches[i_batch] error_in_batch = self.learn_batch(batch, eta, error_function, db, adaptive_rule=adaptive_rule) errors_by_epoch[epoch] += error_in_batch errors_by_epoch[epoch] = errors_by_epoch[epoch] / number_of_batches return errors_by_epoch """ def Learnsplit(self, trainingdata, eta, batch_size, stoch_coeff, error_function, trainingrate, error_tolerance, maxepochs): N = len(trainingdata.dic) splitindex = int(round(N*trainingrate)) trainingset, testset = TrainingDataSgfPass(), TrainingDataSgfPass() trainingset.dic = dict(list(trainingdata.dic.items())[:splitindex]) testset.dic = dict(list(trainingdata.dic.items())[splitindex:]) error = [error_tolerance+1] epochs = 0 while error[-1:][0] > error_tolerance and epochs < maxepochs: epochs += 1 self.Learn(trainingdata, 1, batch_size, stoch_coeff, error_function) error.append(self.PropagateSet(testset,error_function)) return [error,epochs] """ # Takes a batch, propagates all boards in that batch while accumulating delta weights. Then sums the delta weights # up and then adjusts the weights of the Network. def learn_batch(self, batch, eta_start=0.01, error_function=0, db=False, adaptive_rule="linear", error_feedback=True, regularization=0, momentum=0): deltaweights_batch = [0] * self.layercount if not db: # Dictionary case selection = random.sample(list(batch.dic.keys()), len(batch.dic)) # This is indeed random order. else: selection = list(batch.keys()) batch_counter = 0 for entry in selection: if not db: # Usual Dictionary case. Extract input and target. t0 = Hashable.unwrap(entry) tf = self.apply_filters(t0.reshape((9, 9))) testdata = np.append(self.convert_input(t0),(tf)) targ = batch.dic[entry].reshape(9*9+1) # target output, this is to be approximated else: # DB case t0 = batch[entry][0] tf = [] helpme = batch[entry] for i in range(len(self.filter_ids)): tf.extend(batch[entry][i+2]) testdata = np.append(self.convert_input(t0),(tf)) targ = batch[entry][1].reshape(9 * 9 + 1) if np.sum(targ) > 0: # We can only learn if there are actual target vectors batch_counter += 1 targ_sum = np.sum(targ) # save this for the adaptive eta targ = targ/np.linalg.norm(targ, ord=1) # normalize (L1-norm) y = np.append(testdata, [1]) # We append 1 for the bias ys = [0]*self.layercount # alocate storage space, y_saved for backpropagation # Forward-propagate for i in range(0, self.layercount): w = self.weights[i] s = w.dot(y) if i == self.layercount-1: # softmax as activationfct only in last layer y = np.append(softmax(s), [1]) elif self.activation_function is 0: # in all other hidden layers we use tanh/relu as activation fct y = np.append(np.tanh(s), [1]) elif self.activation_function is 1: y = np.append(relu(s), [1]) ys[i] = y # save the y values for backpropagation out = y # Back-propagate # Calculate Jacobian of the softmax activationfct in last layer only jacobian_softmax = [0] * self.layercount for i in range(self.layercount-1, self.layercount): # Please note that I think this is pure witchcraft happening here yt = ys[i] # load y from ys and lets call it yt y_temporary yt = yt[:-1] # the last entry is from the offset, we don't need this le = len(yt) jacobian_softmax_temporary = np.ones((le, le)) # alloc storage temporarily for j in range(0, le): jacobian_softmax_temporary[j, :] *= yt[j] jacobian_softmax_temporary = np.identity(le) - jacobian_softmax_temporary for j in range(0, le): jacobian_softmax_temporary[:, j] *= yt[j] jacobian_softmax[i] = jacobian_softmax_temporary # Calculate Jacobian fot the not-last layers, sparse version (slightly faster, without np.diag()) if self.activation_function is 0: # Tanh jacobian_tanh = [0] * self.layercount for i in range(0, self.layercount): yt = ys[i] # load y from ys and lets call it yt yt = yt[:-1] # the last entry is from the offset, we don't need this u = 1 - yt * yt jacobian_tanh[i] = u jacobian_hidden = jacobian_tanh if self.activation_function is 1: # ReLU jacobian_relu = [0]*self.layercount for i in range(0, self.layercount): # please note I think this is pure witchcraft happening here yt = ys[i] # load y from ys and lets call it yt yt = yt[:-1] # the last entry is from the offset, we don't need this yt[yt > 0] = 1 jacobian_relu[i] = yt jacobian_hidden = jacobian_relu # Use (L2) and (L3) to get the error signals of the layers errorsignals = [0] * self.layercount errorsignals[self.layercount-1] = jacobian_softmax[self.layercount-1] for i in range(2, self.layercount+1): w = self.weights[self.layercount-i+1] dft = jacobian_hidden[self.layercount-i] w_load = w[:, :-1] errdet = np.zeros(w_load.shape) for k in range(len(dft)): # Sparse version errdet[:, k] = dft[k] * w_load[:, k] errorsignals[self.layercount-i] = np.dot(errorsignals[self.layercount-i+1], errdet) # Use (D3) to compute err_errorsignals as sum over the rows/columns? of the errorsignals weighted by # the deriv of the error fct by the output layer. We don't use Lemma 3 dircetly here, we just apply the # definition of delta_error. err_errorsignals = [0]*self.layercount if error_function is 0: errorbyyzero = -targ/out[:-1] # Kullback-Leibler divergence derivative elif error_function is 1: errorbyyzero = out[:-1]-targ # Mean-squared-error derivative elif error_function is 2: errorbyyzero = 1/4*(1-np.sqrt(targ)/np.sqrt(out[:-1])) # Hellinger-distance derivative for i in range(0, self.layercount): err_errorsignals[i] = np.dot(errorbyyzero, errorsignals[i]) # this is the matrix variant of (D3) # Use (2.2) to get the sought derivatives. Observe that this is an outer product, though not mentioned # in the source (Fuck you Heining, you b*stard) errorbyweights = [0]*self.layercount # dE/dW errorbyweights[0] = np.outer(err_errorsignals[0], testdata).T for i in range(1, self.layercount): errorbyweights[i] = np.outer(err_errorsignals[i-1], ys[i][:-1]) # (L1) # Compute the change of weights, then apply actualization step of Gradient Descent to weight matrices if adaptive_rule == "linear": eta = eta_start * targ_sum elif adaptive_rule == "logarithmic": eta = eta_start * np.log(2 + targ_sum) elif adaptive_rule == "none": eta = eta_start for i in range(0, self.layercount): if type(deltaweights_batch[i]) is int: # initialize deltaweights_batch[i] = -eta * errorbyweights[i] else: deltaweights_batch[i] -= eta * errorbyweights[i] # Regularization Factor: regul = (1-(eta_start*regularization)/batch_counter) # TODO which eta to choose for the regularization??? Eta start or some weighted eta? # TODO Frage: Muss ich dann auch die Error measures anpassen? # Now adjust weights for i in range(0, self.layercount): if type(deltaweights_batch[i]) is not int: # in this case we had no target for any board in this batch if self.momentum_active: self.weights[i][:, :-1] = regul * self.weights[i][:, :-1] + deltaweights_batch[i].T + momentum * self.momentum[i] else: self.weights[i][:, :-1] = regul * self.weights[i][:, :-1] + deltaweights_batch[i].T # Problem: atm we only adjust non-bias weights. Change that! TODO self.momentum[i] = deltaweights_batch[i].T + momentum * self.momentum[i] self.momentum_active = True if error_feedback: error = self.propagate_set(batch, db, adaptive_rule, error_function=error_function) return error def propagate_board(self, board): # Convert board to NeuroNet format (82-dim vector) if type(board) != list and type(board) != np.ndarray: board = board.vertices if len(board) != 82: board = board.flatten() board = np.asarray(board, float) # Like Heining we are setting: (-1.35:w, 0.45:empty, 1.05:b) tf = self.apply_filters(board.reshape((9, 9))) for i in range(0, len(board)): if board[i] == np.int(0): board[i] = 0.45 if board[i] == -1: board[i] = -1.35 if board[i] == 1: board[i] = 1.05 board = np.append(board, tf) y = np.append(board, [1]) # Forward-propagate for i in range(0, self.layercount): w = self.weights[i] s = w.dot(y) if i == self.layercount-1: # softmax as activationfct only in last layer y = np.append(softmax(s), [1]) elif self.activation_function is 0: y = np.append(np.tanh(s), [1]) elif self.activation_function is 1: y = np.append(relu(s), [1]) out = y[:-1] return out def propagate_set(self, testset, db=False, adaptive_rule='linear', error_function=0): error = 0 checked = 0 if not db: # Dictionary case selection = random.sample(list(testset.dic.keys()), len(testset.dic)) # This is indeed random order. else: selection = list(testset.keys()) for entry in selection: if not db: # Usual Dictionary case. Extract input and target. t0 = Hashable.unwrap(entry) tf = self.apply_filters(t0.reshape((9, 9))) testdata = np.append(self.convert_input(t0),(tf)) targ = testset.dic[entry].reshape(9*9+1) # target output, this is to be approximated else: # DB case t0 = testset[entry][0] tf = [] helpme = testset[entry] for i in range(len(self.filter_ids)): tf.extend(testset[entry][i+2]) testdata = np.append(self.convert_input(t0),(tf)) targ = testset[entry][1].reshape(9 * 9 + 1) if np.sum(targ) > 0: # We can only learn if there are actual target vectors targ_sum = np.sum(targ) targ = targ/np.linalg.norm(targ, ord=1) # normalize (L1-norm) y = np.append(testdata, [1]) # Forward-propagate for i in range(0, self.layercount): w = self.weights[i] s = w.dot(y) if i == self.layercount - 1: # softmax as activationfct only in last layer y = np.append(softmax(s), [1]) elif self.activation_function is 0: y = np.append(np.tanh(s), [1]) elif self.activation_function is 1: y = np.append(relu(s), [1]) # sum up the error error += self.compute_error(y[:-1], targ, error_function) if adaptive_rule == "linear": checked += 1 * targ_sum elif adaptive_rule == "logarithmic": checked += 1 * np.log(2 + targ_sum) elif adaptive_rule == "none": checked += 1 if checked is 0: print("The Set contained no feasible boards to propagate, sorry") return else: error = error / checked # average over the test set return error
{ "content_hash": "07f7f15bd671af3f461b82128ec5ea83", "timestamp": "", "source": "github", "line_count": 556, "max_line_length": 148, "avg_line_length": 46.94604316546763, "alnum_prop": 0.5568921921691824, "repo_name": "stefanpeidli/GoNet", "id": "0dd6db41fa457e993a251aef8bd214b5ea523887", "size": "26126", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "PolicyNet.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "259305" }, { "name": "TeX", "bytes": "135" } ], "symlink_target": "" }
"""Functions to bootstrap a new cluster. """ import os import os.path import re import logging import time from ganeti.cmdlib import cluster import ganeti.rpc.node as rpc from ganeti import ssh from ganeti import utils from ganeti import errors from ganeti import config from ganeti import constants from ganeti import objects from ganeti import ssconf from ganeti import serializer from ganeti import hypervisor from ganeti.storage import drbd from ganeti.storage import filestorage from ganeti import netutils from ganeti import luxi from ganeti import jstore from ganeti import pathutils from ganeti import runtime from ganeti import vcluster # ec_id for InitConfig's temporary reservation manager _INITCONF_ECID = "initconfig-ecid" #: After how many seconds daemon must be responsive _DAEMON_READY_TIMEOUT = 10.0 def GenerateHmacKey(file_name): """Writes a new HMAC key. @type file_name: str @param file_name: Path to output file """ utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400, backup=True) # pylint: disable=R0913 def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert, new_confd_hmac_key, new_cds, new_client_cert, master_name, rapi_cert_pem=None, spice_cert_pem=None, spice_cacert_pem=None, cds=None, nodecert_file=pathutils.NODED_CERT_FILE, clientcert_file=pathutils.NODED_CLIENT_CERT_FILE, rapicert_file=pathutils.RAPI_CERT_FILE, spicecert_file=pathutils.SPICE_CERT_FILE, spicecacert_file=pathutils.SPICE_CACERT_FILE, hmackey_file=pathutils.CONFD_HMAC_KEY, cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE): """Updates the cluster certificates, keys and secrets. @type new_cluster_cert: bool @param new_cluster_cert: Whether to generate a new cluster certificate @type new_rapi_cert: bool @param new_rapi_cert: Whether to generate a new RAPI certificate @type new_spice_cert: bool @param new_spice_cert: Whether to generate a new SPICE certificate @type new_confd_hmac_key: bool @param new_confd_hmac_key: Whether to generate a new HMAC key @type new_cds: bool @param new_cds: Whether to generate a new cluster domain secret @type new_client_cert: bool @param new_client_cert: Whether to generate a new client certificate @type master_name: string @param master_name: FQDN of the master node @type rapi_cert_pem: string @param rapi_cert_pem: New RAPI certificate in PEM format @type spice_cert_pem: string @param spice_cert_pem: New SPICE certificate in PEM format @type spice_cacert_pem: string @param spice_cacert_pem: Certificate of the CA that signed the SPICE certificate, in PEM format @type cds: string @param cds: New cluster domain secret @type nodecert_file: string @param nodecert_file: optional override of the node cert file path @type rapicert_file: string @param rapicert_file: optional override of the rapi cert file path @type spicecert_file: string @param spicecert_file: optional override of the spice cert file path @type spicecacert_file: string @param spicecacert_file: optional override of the spice CA cert file path @type hmackey_file: string @param hmackey_file: optional override of the hmac key file path """ # pylint: disable=R0913 # noded SSL certificate utils.GenerateNewSslCert( new_cluster_cert, nodecert_file, 1, "Generating new cluster certificate at %s" % nodecert_file) # If the cluster certificate was renewed, the client cert has to be # renewed and resigned. if new_cluster_cert or new_client_cert: utils.GenerateNewClientSslCert(clientcert_file, nodecert_file, master_name) # confd HMAC key if new_confd_hmac_key or not os.path.exists(hmackey_file): logging.debug("Writing new confd HMAC key to %s", hmackey_file) GenerateHmacKey(hmackey_file) if rapi_cert_pem: # Assume rapi_pem contains a valid PEM-formatted certificate and key logging.debug("Writing RAPI certificate at %s", rapicert_file) utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True) else: utils.GenerateNewSslCert( new_rapi_cert, rapicert_file, 1, "Generating new RAPI certificate at %s" % rapicert_file) # SPICE spice_cert_exists = os.path.exists(spicecert_file) spice_cacert_exists = os.path.exists(spicecacert_file) if spice_cert_pem: # spice_cert_pem implies also spice_cacert_pem logging.debug("Writing SPICE certificate at %s", spicecert_file) utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True) logging.debug("Writing SPICE CA certificate at %s", spicecacert_file) utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True) elif new_spice_cert or not spice_cert_exists: if spice_cert_exists: utils.CreateBackup(spicecert_file) if spice_cacert_exists: utils.CreateBackup(spicecacert_file) logging.debug("Generating new self-signed SPICE certificate at %s", spicecert_file) (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1) # Self-signed certificate -> the public certificate is also the CA public # certificate logging.debug("Writing the public certificate to %s", spicecert_file) utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem) # Cluster domain secret if cds: logging.debug("Writing cluster domain secret to %s", cds_file) utils.WriteFile(cds_file, data=cds, backup=True) elif new_cds or not os.path.exists(cds_file): logging.debug("Generating new cluster domain secret at %s", cds_file) GenerateHmacKey(cds_file) def _InitGanetiServerSetup(master_name, cfg): """Setup the necessary configuration for the initial node daemon. This creates the nodepass file containing the shared password for the cluster, generates the SSL certificate and starts the node daemon. @type master_name: str @param master_name: Name of the master node @type cfg: ConfigWriter @param cfg: the configuration writer """ # Generate cluster secrets GenerateClusterCrypto(True, False, False, False, False, False, master_name) # Add the master's SSL certificate digest to the configuration. master_uuid = cfg.GetMasterNode() master_digest = utils.GetCertificateDigest() cfg.AddNodeToCandidateCerts(master_uuid, master_digest) cfg.Update(cfg.GetClusterInfo(), logging.error) ssconf.WriteSsconfFiles(cfg.GetSsconfValues()) if not os.path.exists( os.path.join(pathutils.DATA_DIR, "%s%s" % (constants.SSCONF_FILEPREFIX, constants.SS_MASTER_CANDIDATES_CERTS))): raise errors.OpExecError("Ssconf file for master candidate certificates" " was not written.") if not os.path.exists(pathutils.NODED_CERT_FILE): raise errors.OpExecError("The server certficate was not created properly.") if not os.path.exists(pathutils.NODED_CLIENT_CERT_FILE): raise errors.OpExecError("The client certificate was not created" " properly.") # set up the inter-node password and certificate result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED]) if result.failed: raise errors.OpExecError("Could not start the node daemon, command %s" " had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) _WaitForNodeDaemon(master_name) def _WaitForNodeDaemon(node_name): """Wait for node daemon to become responsive. """ def _CheckNodeDaemon(): # Pylint bug <http://www.logilab.org/ticket/35642> # pylint: disable=E1101 result = rpc.BootstrapRunner().call_version([node_name])[node_name] if result.fail_msg: raise utils.RetryAgain() try: utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT) except utils.RetryTimeout: raise errors.OpExecError("Node daemon on %s didn't answer queries within" " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT)) def _WaitForMasterDaemon(): """Wait for master daemon to become responsive. """ def _CheckMasterDaemon(): try: cl = luxi.Client() (cluster_name, ) = cl.QueryConfigValues(["cluster_name"]) except Exception: raise utils.RetryAgain() logging.debug("Received cluster name %s from master", cluster_name) try: utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT) except utils.RetryTimeout: raise errors.OpExecError("Master daemon didn't answer queries within" " %s seconds" % _DAEMON_READY_TIMEOUT) def _WaitForSshDaemon(hostname, port): """Wait for SSH daemon to become responsive. """ family = ssconf.SimpleStore().GetPrimaryIPFamily() hostip = netutils.GetHostname(name=hostname, family=family).ip def _CheckSshDaemon(): if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True): logging.debug("SSH daemon on %s:%s (IP address %s) has become" " responsive", hostname, port, hostip) else: raise utils.RetryAgain() try: utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT) except utils.RetryTimeout: raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't" " become responsive within %s seconds" % (hostname, port, hostip, _DAEMON_READY_TIMEOUT)) def _InitFileStorageDir(file_storage_dir): """Initialize if needed the file storage. @param file_storage_dir: the user-supplied value @return: either empty string (if file storage was disabled at build time) or the normalized path to the storage directory """ file_storage_dir = os.path.normpath(file_storage_dir) if not os.path.isabs(file_storage_dir): raise errors.OpPrereqError("File storage directory '%s' is not an absolute" " path" % file_storage_dir, errors.ECODE_INVAL) if not os.path.exists(file_storage_dir): try: os.makedirs(file_storage_dir, 0750) except OSError, err: raise errors.OpPrereqError("Cannot create file storage directory" " '%s': %s" % (file_storage_dir, err), errors.ECODE_ENVIRON) if not os.path.isdir(file_storage_dir): raise errors.OpPrereqError("The file storage directory '%s' is not" " a directory." % file_storage_dir, errors.ECODE_ENVIRON) return file_storage_dir def _PrepareFileBasedStorage( enabled_disk_templates, file_storage_dir, default_dir, file_disk_template, _storage_path_acceptance_fn, init_fn=_InitFileStorageDir, acceptance_fn=None): """Checks if a file-base storage type is enabled and inits the dir. @type enabled_disk_templates: list of string @param enabled_disk_templates: list of enabled disk templates @type file_storage_dir: string @param file_storage_dir: the file storage directory @type default_dir: string @param default_dir: default file storage directory when C{file_storage_dir} is 'None' @type file_disk_template: string @param file_disk_template: a disk template whose storage type is 'ST_FILE', 'ST_SHARED_FILE' or 'ST_GLUSTER' @type _storage_path_acceptance_fn: function @param _storage_path_acceptance_fn: checks whether the given file-based storage directory is acceptable @see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details @rtype: string @returns: the name of the actual file storage directory """ assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes( constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER )) file_storage_enabled = file_disk_template in enabled_disk_templates if file_storage_dir is None: if file_storage_enabled: file_storage_dir = default_dir else: file_storage_dir = "" if not acceptance_fn: acceptance_fn = \ lambda path: filestorage.CheckFileStoragePathAcceptance( path, exact_match_ok=True) _storage_path_acceptance_fn(logging.warning, file_storage_dir, enabled_disk_templates) if file_storage_enabled: try: acceptance_fn(file_storage_dir) except errors.FileStoragePathError as e: raise errors.OpPrereqError(str(e)) result_file_storage_dir = init_fn(file_storage_dir) else: result_file_storage_dir = file_storage_dir return result_file_storage_dir def _PrepareFileStorage( enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, acceptance_fn=None): """Checks if file storage is enabled and inits the dir. @see: C{_PrepareFileBasedStorage} """ return _PrepareFileBasedStorage( enabled_disk_templates, file_storage_dir, pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE, cluster.CheckFileStoragePathVsEnabledDiskTemplates, init_fn=init_fn, acceptance_fn=acceptance_fn) def _PrepareSharedFileStorage( enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, acceptance_fn=None): """Checks if shared file storage is enabled and inits the dir. @see: C{_PrepareFileBasedStorage} """ return _PrepareFileBasedStorage( enabled_disk_templates, file_storage_dir, pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE, cluster.CheckSharedFileStoragePathVsEnabledDiskTemplates, init_fn=init_fn, acceptance_fn=acceptance_fn) def _PrepareGlusterStorage( enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, acceptance_fn=None): """Checks if gluster storage is enabled and inits the dir. @see: C{_PrepareFileBasedStorage} """ return _PrepareFileBasedStorage( enabled_disk_templates, file_storage_dir, pathutils.DEFAULT_GLUSTER_STORAGE_DIR, constants.DT_GLUSTER, cluster.CheckGlusterStoragePathVsEnabledDiskTemplates, init_fn=init_fn, acceptance_fn=acceptance_fn) def _InitCheckEnabledDiskTemplates(enabled_disk_templates): """Checks the sanity of the enabled disk templates. """ if not enabled_disk_templates: raise errors.OpPrereqError("Enabled disk templates list must contain at" " least one member", errors.ECODE_INVAL) invalid_disk_templates = \ set(enabled_disk_templates) - constants.DISK_TEMPLATES if invalid_disk_templates: raise errors.OpPrereqError("Enabled disk templates list contains invalid" " entries: %s" % invalid_disk_templates, errors.ECODE_INVAL) def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates): """Restricts the ipolicy's disk templates to the enabled ones. This function clears the ipolicy's list of allowed disk templates from the ones that are not enabled by the cluster. @type ipolicy: dict @param ipolicy: the instance policy @type enabled_disk_templates: list of string @param enabled_disk_templates: the list of cluster-wide enabled disk templates """ assert constants.IPOLICY_DTS in ipolicy allowed_disk_templates = ipolicy[constants.IPOLICY_DTS] restricted_disk_templates = list(set(allowed_disk_templates) .intersection(set(enabled_disk_templates))) ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates def _InitCheckDrbdHelper(drbd_helper, drbd_enabled): """Checks the DRBD usermode helper. @type drbd_helper: string @param drbd_helper: name of the DRBD usermode helper that the system should use """ if not drbd_enabled: return if drbd_helper is not None: try: curr_helper = drbd.DRBD8.GetUsermodeHelper() except errors.BlockDeviceError, err: raise errors.OpPrereqError("Error while checking drbd helper" " (disable drbd with --enabled-disk-templates" " if you are not using drbd): %s" % str(err), errors.ECODE_ENVIRON) if drbd_helper != curr_helper: raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s" " is the current helper" % (drbd_helper, curr_helper), errors.ECODE_INVAL) def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914 master_netmask, master_netdev, file_storage_dir, shared_file_storage_dir, gluster_storage_dir, candidate_pool_size, ssh_key_type, ssh_key_bits, secondary_ip=None, vg_name=None, beparams=None, nicparams=None, ndparams=None, hvparams=None, diskparams=None, enabled_hypervisors=None, modify_etc_hosts=True, modify_ssh_setup=True, maintain_node_health=False, drbd_helper=None, uid_pool=None, default_iallocator=None, default_iallocator_params=None, primary_ip_version=None, ipolicy=None, prealloc_wipe_disks=False, use_external_mip_script=False, hv_state=None, disk_state=None, enabled_disk_templates=None, install_image=None, zeroing_image=None, compression_tools=None, enabled_user_shutdown=False): """Initialise the cluster. @type candidate_pool_size: int @param candidate_pool_size: master candidate pool size @type enabled_disk_templates: list of string @param enabled_disk_templates: list of disk_templates to be used in this cluster @type enabled_user_shutdown: bool @param enabled_user_shutdown: whether user shutdown is enabled cluster wide """ # TODO: complete the docstring if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised", errors.ECODE_STATE) data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR) queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR) archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR) for ddir in [queue_dir, data_dir, archive_dir]: if os.path.isdir(ddir): for entry in os.listdir(ddir): if not os.path.isdir(os.path.join(ddir, entry)): raise errors.OpPrereqError( "%s contains non-directory entries like %s. Remove left-overs of an" " old cluster before initialising a new one" % (ddir, entry), errors.ECODE_STATE) if not enabled_hypervisors: raise errors.OpPrereqError("Enabled hypervisors list must contain at" " least one member", errors.ECODE_INVAL) invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError("Enabled hypervisors contains invalid" " entries: %s" % invalid_hvs, errors.ECODE_INVAL) _InitCheckEnabledDiskTemplates(enabled_disk_templates) try: ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version) except errors.ProgrammerError: raise errors.OpPrereqError("Invalid primary ip version: %d." % primary_ip_version, errors.ECODE_INVAL) hostname = netutils.GetHostname(family=ipcls.family) if not ipcls.IsValid(hostname.ip): raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d" " address." % (hostname.ip, primary_ip_version), errors.ECODE_INVAL) if ipcls.IsLoopback(hostname.ip): raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback" " address. Please fix DNS or %s." % (hostname.ip, pathutils.ETC_HOSTS), errors.ECODE_ENVIRON) if not ipcls.Own(hostname.ip): raise errors.OpPrereqError("Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host" % hostname.ip, errors.ECODE_ENVIRON) clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): raise errors.OpPrereqError("Cluster IP already active", errors.ECODE_NOTUNIQUE) if not secondary_ip: if primary_ip_version == constants.IP6_VERSION: raise errors.OpPrereqError("When using a IPv6 primary address, a valid" " IPv4 address must be given as secondary", errors.ECODE_INVAL) secondary_ip = hostname.ip if not netutils.IP4Address.IsValid(secondary_ip): raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid" " IPv4 address." % secondary_ip, errors.ECODE_INVAL) if not netutils.IP4Address.Own(secondary_ip): raise errors.OpPrereqError("You gave %s as secondary IP," " but it does not belong to this host." % secondary_ip, errors.ECODE_ENVIRON) if master_netmask is not None: if not ipcls.ValidateNetmask(master_netmask): raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " % (master_netmask, primary_ip_version), errors.ECODE_INVAL) else: master_netmask = ipcls.iplen if vg_name: # Check if volume group is valid vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL) drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates _InitCheckDrbdHelper(drbd_helper, drbd_enabled) logging.debug("Stopping daemons (if any are running)") result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"]) if result.failed: raise errors.OpExecError("Could not stop daemons, command %s" " had exitcode %s and error '%s'" % (result.cmd, result.exit_code, result.output)) file_storage_dir = _PrepareFileStorage(enabled_disk_templates, file_storage_dir) shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates, shared_file_storage_dir) gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates, gluster_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL) if not nicparams.get('mode', None) == constants.NIC_MODE_OVS: # Do not do this check if mode=openvswitch, since the openvswitch is not # created yet result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) if result.failed: raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % (master_netdev, result.output.strip()), errors.ECODE_INVAL) dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)] utils.EnsureDirs(dirs) objects.UpgradeBeParams(beparams) utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) objects.NIC.CheckParameterSyntax(nicparams) full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy) _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates) if ndparams is not None: utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) else: ndparams = dict(constants.NDC_DEFAULTS) # This is ugly, as we modify the dict itself # FIXME: Make utils.ForceDictType pure functional or write a wrapper # around it if hv_state: for hvname, hvs_data in hv_state.items(): utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES) hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data) else: hv_state = dict((hvname, constants.HVST_DEFAULTS) for hvname in enabled_hypervisors) # FIXME: disk_state has no default values yet if disk_state: for storage, ds_data in disk_state.items(): if storage not in constants.DS_VALID_TYPES: raise errors.OpPrereqError("Invalid storage type in disk state: %s" % storage, errors.ECODE_INVAL) for ds_name, state in ds_data.items(): utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES) ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state) # hvparams is a mapping of hypervisor->hvparams dict for hv_name, hv_params in hvparams.iteritems(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) hv_class = hypervisor.GetHypervisor(hv_name) hv_class.CheckParameterSyntax(hv_params) # diskparams is a mapping of disk-template->diskparams dict for template, dt_params in diskparams.items(): param_keys = set(dt_params.keys()) default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys()) if not (param_keys <= default_param_keys): unknown_params = param_keys - default_param_keys raise errors.OpPrereqError("Invalid parameters for disk template %s:" " %s" % (template, utils.CommaJoin(unknown_params)), errors.ECODE_INVAL) utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) if template == constants.DT_DRBD8 and vg_name is not None: # The default METAVG value is equal to the VG name set at init time, # if provided dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name try: utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS) except errors.OpPrereqError, err: raise errors.OpPrereqError("While verify diskparam options: %s" % err, errors.ECODE_INVAL) # set up ssh config and /etc/hosts rsa_sshkey = "" dsa_sshkey = "" if os.path.isfile(pathutils.SSH_HOST_RSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB) rsa_sshkey = sshline.split(" ")[1] if os.path.isfile(pathutils.SSH_HOST_DSA_PUB): sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB) dsa_sshkey = sshline.split(" ")[1] if not rsa_sshkey and not dsa_sshkey: raise errors.OpPrereqError("Failed to find SSH public keys", errors.ECODE_ENVIRON) if modify_etc_hosts: utils.AddHostToEtcHosts(hostname.name, hostname.ip) if modify_ssh_setup: ssh.InitSSHSetup(ssh_key_type, ssh_key_bits) if default_iallocator is not None: alloc_script = utils.FindFile(default_iallocator, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile) if alloc_script is None: raise errors.OpPrereqError("Invalid default iallocator script '%s'" " specified" % default_iallocator, errors.ECODE_INVAL) else: # default to htools if utils.FindFile(constants.IALLOC_HAIL, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile): default_iallocator = constants.IALLOC_HAIL # check if we have all the users we need try: runtime.GetEnts() except errors.ConfigurationError, err: raise errors.OpPrereqError("Required system user/group missing: %s" % err, errors.ECODE_ENVIRON) candidate_certs = {} now = time.time() if compression_tools is not None: cluster.CheckCompressionTools(compression_tools) initial_dc_config = dict(active=True, interval=int(constants.MOND_TIME_INTERVAL * 1e6)) data_collectors = dict( (name, initial_dc_config.copy()) for name in constants.DATA_COLLECTOR_NAMES) # init of cluster config file cluster_config = objects.Cluster( serial_no=1, rsahostkeypub=rsa_sshkey, dsahostkeypub=dsa_sshkey, highest_used_port=(constants.FIRST_DRBD_PORT - 1), mac_prefix=mac_prefix, volume_group_name=vg_name, tcpudp_port_pool=set(), master_ip=clustername.ip, master_netmask=master_netmask, master_netdev=master_netdev, cluster_name=clustername.name, file_storage_dir=file_storage_dir, shared_file_storage_dir=shared_file_storage_dir, gluster_storage_dir=gluster_storage_dir, enabled_hypervisors=enabled_hypervisors, beparams={constants.PP_DEFAULT: beparams}, nicparams={constants.PP_DEFAULT: nicparams}, ndparams=ndparams, hvparams=hvparams, diskparams=diskparams, candidate_pool_size=candidate_pool_size, modify_etc_hosts=modify_etc_hosts, modify_ssh_setup=modify_ssh_setup, uid_pool=uid_pool, ctime=now, mtime=now, maintain_node_health=maintain_node_health, data_collectors=data_collectors, drbd_usermode_helper=drbd_helper, default_iallocator=default_iallocator, default_iallocator_params=default_iallocator_params, primary_ip_family=ipcls.family, prealloc_wipe_disks=prealloc_wipe_disks, use_external_mip_script=use_external_mip_script, ipolicy=full_ipolicy, hv_state_static=hv_state, disk_state_static=disk_state, enabled_disk_templates=enabled_disk_templates, candidate_certs=candidate_certs, osparams={}, osparams_private_cluster={}, install_image=install_image, zeroing_image=zeroing_image, compression_tools=compression_tools, enabled_user_shutdown=enabled_user_shutdown, ssh_key_type=ssh_key_type, ssh_key_bits=ssh_key_bits, ) master_node_config = objects.Node(name=hostname.name, primary_ip=hostname.ip, secondary_ip=secondary_ip, serial_no=1, master_candidate=True, offline=False, drained=False, ctime=now, mtime=now, ) InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) cfg = config.ConfigWriter(offline=True) ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE) cfg.Update(cfg.GetClusterInfo(), logging.error) ssconf.WriteSsconfFiles(cfg.GetSsconfValues()) master_uuid = cfg.GetMasterNode() if modify_ssh_setup: ssh.InitPubKeyFile(master_uuid, ssh_key_type) # set up the inter-node password and certificate _InitGanetiServerSetup(hostname.name, cfg) logging.debug("Starting daemons") result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"]) if result.failed: raise errors.OpExecError("Could not start daemons, command %s" " had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) _WaitForMasterDaemon() def InitConfig(version, cluster_config, master_node_config, cfg_file=pathutils.CLUSTER_CONF_FILE): """Create the initial cluster configuration. It will contain the current node, which will also be the master node, and no instances. @type version: int @param version: configuration version @type cluster_config: L{objects.Cluster} @param cluster_config: cluster configuration @type master_node_config: L{objects.Node} @param master_node_config: master node configuration @type cfg_file: string @param cfg_file: configuration file path """ uuid_generator = config.TemporaryReservationManager() cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID) master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID) cluster_config.master_node = master_node_config.uuid nodes = { master_node_config.uuid: master_node_config, } default_nodegroup = objects.NodeGroup( uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), name=constants.INITIAL_NODE_GROUP_NAME, members=[master_node_config.uuid], diskparams={}, ) nodegroups = { default_nodegroup.uuid: default_nodegroup, } now = time.time() maintenance = objects.Maintenance(serial_no=1, ctime=now, mtime=now) config_data = objects.ConfigData(version=version, cluster=cluster_config, nodegroups=nodegroups, nodes=nodes, instances={}, networks={}, disks={}, filters={}, maintenance=maintenance, serial_no=1, ctime=now, mtime=now) utils.WriteFile(cfg_file, data=serializer.Dump(config_data.ToDict()), mode=0600) def FinalizeClusterDestroy(master_uuid): """Execute the last steps of cluster destroy This function shuts down all the daemons, completing the destroy begun in cmdlib.LUDestroyOpcode. """ livelock = utils.livelock.LiveLock("bootstrap_destroy") cfg = config.GetConfig(None, livelock) modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup runner = rpc.BootstrapRunner() master_name = cfg.GetNodeName(master_uuid) master_params = cfg.GetMasterNetworkParameters() master_params.uuid = master_uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(master_name, master_params, ems) msg = result.fail_msg if msg: logging.warning("Could not disable the master IP: %s", msg) result = runner.call_node_stop_master(master_name) msg = result.fail_msg if msg: logging.warning("Could not disable the master role: %s", msg) result = runner.call_node_leave_cluster(master_name, modify_ssh_setup) msg = result.fail_msg if msg: logging.warning("Could not shutdown the node daemon and cleanup" " the node: %s", msg) def SetupNodeDaemon(opts, cluster_name, node, ssh_port): """Add a node to the cluster. This function must be called before the actual opcode, and will ssh to the remote node, copy the needed files, and start ganeti-noded, allowing the master to do the rest via normal rpc calls. @param cluster_name: the cluster name @param node: the name of the new node @param ssh_port: the SSH port of the new node """ data = { constants.NDS_CLUSTER_NAME: cluster_name, constants.NDS_NODE_DAEMON_CERTIFICATE: utils.ReadFile(pathutils.NODED_CERT_FILE), constants.NDS_HMAC: utils.ReadFile(pathutils.CONFD_HMAC_KEY), constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(), constants.NDS_START_NODE_DAEMON: True, constants.NDS_NODE_NAME: node, } ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP, ssh_port, data, debug=opts.debug, verbose=opts.verbose, use_cluster_key=True, ask_key=opts.ssh_key_check, strict_host_check=opts.ssh_key_check, ensure_version=True) _WaitForSshDaemon(node, ssh_port) _WaitForNodeDaemon(node) def MasterFailover(no_voting=False): """Failover the master node. This checks that we are not already the master, and will cause the current master to cease being master, and the non-master to become new master. @type no_voting: boolean @param no_voting: force the operation without remote nodes agreement (dangerous) @returns: the pair of an exit code and warnings to display """ sstore = ssconf.SimpleStore() old_master, new_master = ssconf.GetMasterAndMyself(sstore) node_names = sstore.GetNodeList() mc_list = sstore.GetMasterCandidates() if old_master == new_master: raise errors.OpPrereqError("This commands must be run on the node" " where you want the new master to be." " %s is already the master" % old_master, errors.ECODE_INVAL) if new_master not in mc_list: mc_no_master = [name for name in mc_list if name != old_master] raise errors.OpPrereqError("This node is not among the nodes marked" " as master candidates. Only these nodes" " can become masters. Current list of" " master candidates is:\n" "%s" % ("\n".join(mc_no_master)), errors.ECODE_STATE) if not no_voting: vote_list = GatherMasterVotes(node_names) if vote_list: voted_master = vote_list[0][0] if voted_master is None: raise errors.OpPrereqError("Cluster is inconsistent, most nodes did" " not respond.", errors.ECODE_ENVIRON) elif voted_master != old_master: raise errors.OpPrereqError("I have a wrong configuration, I believe" " the master is %s but the other nodes" " voted %s. Please resync the configuration" " of this node." % (old_master, voted_master), errors.ECODE_STATE) # end checks rcode = 0 warnings = [] logging.info("Setting master to %s, old master: %s", new_master, old_master) try: # Forcefully start WConfd so that we can access the configuration result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.WCONFD, "--force-node", "--no-voting", "--yes-do-it"]) if result.failed: raise errors.OpPrereqError("Could not start the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output), errors.ECODE_NOENT) # instantiate a real config writer, as we now know we have the # configuration data livelock = utils.livelock.LiveLock("bootstrap_failover") cfg = config.GetConfig(None, livelock, accept_foreign=True) old_master_node = cfg.GetNodeInfoByName(old_master) if old_master_node is None: raise errors.OpPrereqError("Could not find old master node '%s' in" " cluster configuration." % old_master, errors.ECODE_NOENT) cluster_info = cfg.GetClusterInfo() new_master_node = cfg.GetNodeInfoByName(new_master) if new_master_node is None: raise errors.OpPrereqError("Could not find new master node '%s' in" " cluster configuration." % new_master, errors.ECODE_NOENT) cluster_info.master_node = new_master_node.uuid # this will also regenerate the ssconf files, since we updated the # cluster info cfg.Update(cluster_info, logging.error) # if cfg.Update worked, then it means the old master daemon won't be # able now to write its own config file (we rely on locking in both # backend.UploadFile() and ConfigWriter._Write(); hence the next # step is to kill the old master logging.info("Stopping the master daemon on node %s", old_master) runner = rpc.BootstrapRunner() master_params = cfg.GetMasterNetworkParameters() master_params.uuid = old_master_node.uuid ems = cfg.GetUseExternalMipScript() result = runner.call_node_deactivate_master_ip(old_master, master_params, ems) msg = result.fail_msg if msg: warning = "Could not disable the master IP: %s" % (msg,) logging.warning("%s", warning) warnings.append(warning) result = runner.call_node_stop_master(old_master) msg = result.fail_msg if msg: warning = ("Could not disable the master role on the old master" " %s, please disable manually: %s" % (old_master, msg)) logging.error("%s", warning) warnings.append(warning) except errors.ConfigurationError, err: logging.error("Error while trying to set the new master: %s", str(err)) return 1, warnings finally: # stop WConfd again: result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD]) if result.failed: warning = ("Could not stop the configuration daemon," " command %s had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) logging.error("%s", warning) rcode = 1 logging.info("Checking master IP non-reachability...") master_ip = sstore.GetMasterIP() total_timeout = 30 # Here we have a phase where no master should be running def _check_ip(expected): if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected: raise utils.RetryAgain() try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False]) except utils.RetryTimeout: warning = ("The master IP is still reachable after %s seconds," " continuing but activating the master IP on the current" " node will probably fail" % total_timeout) logging.warning("%s", warning) warnings.append(warning) rcode = 1 if jstore.CheckDrainFlag(): logging.info("Undraining job queue") jstore.SetDrainFlag(False) logging.info("Starting the master daemons on the new master") result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master, no_voting) msg = result.fail_msg if msg: logging.error("Could not start the master role on the new master" " %s, please check: %s", new_master, msg) rcode = 1 # Finally verify that the new master managed to set up the master IP # and warn if it didn't. try: utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True]) except utils.RetryTimeout: warning = ("The master IP did not come up within %s seconds; the" " cluster should still be working and reachable via %s," " but not via the master IP address" % (total_timeout, new_master)) logging.warning("%s", warning) warnings.append(warning) rcode = 1 logging.info("Master failed over from %s to %s", old_master, new_master) return rcode, warnings def GetMaster(): """Returns the current master node. This is a separate function in bootstrap since it's needed by gnt-cluster, and instead of importing directly ssconf, it's better to abstract it in bootstrap, where we do use ssconf in other functions too. """ sstore = ssconf.SimpleStore() old_master, _ = ssconf.GetMasterAndMyself(sstore) return old_master def GatherMasterVotes(node_names): """Check the agreement on who is the master. This function will return a list of (node, number of votes), ordered by the number of votes. Errors will be denoted by the key 'None'. Note that the sum of votes is the number of nodes this machine knows, whereas the number of entries in the list could be different (if some nodes vote for another master). @type node_names: list @param node_names: the list of nodes to query for master info @rtype: list @return: list of (node, votes) """ if not node_names: # no nodes return [] results = rpc.BootstrapRunner().call_master_node_name(node_names) if not isinstance(results, dict): # this should not happen (unless internal error in rpc) logging.critical("Can't complete rpc call, aborting master startup") return [(None, len(node_names))] votes = {} for node_name in results: nres = results[node_name] msg = nres.fail_msg if msg: logging.warning("Error contacting node %s: %s", node_name, msg) node = None else: node = nres.payload if node not in votes: votes[node] = 1 else: votes[node] += 1 vote_list = [v for v in votes.items()] # sort first on number of votes then on name, since we want None # sorted later if we have the half of the nodes not responding, and # half voting all for the same master vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True) return vote_list def MajorityHealthy(): """Check if the majority of nodes is healthy Gather master votes from all nodes known to this node; return True if a strict majority of nodes is reachable and has some opinion on which node is master. Note that this will not guarantee any node to win an election but it ensures that a standard master-failover is still possible. """ node_names = ssconf.SimpleStore().GetNodeList() node_count = len(node_names) vote_list = GatherMasterVotes(node_names) if vote_list is None: return False total_votes = sum([count for (node, count) in vote_list if node is not None]) logging.info("Total %d nodes, %d votes: %s", node_count, total_votes, vote_list) return 2 * total_votes > node_count
{ "content_hash": "79c8800e83ca95f942b622bfdaa1b90e", "timestamp": "", "source": "github", "line_count": 1197, "max_line_length": 80, "avg_line_length": 38.48203842940685, "alnum_prop": 0.6476564704860734, "repo_name": "leshchevds/ganeti", "id": "2b668b9673642918de7112ccaecaac489e4691b5", "size": "47435", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/bootstrap.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Haskell", "bytes": "2664853" }, { "name": "JavaScript", "bytes": "8855" }, { "name": "M4", "bytes": "32087" }, { "name": "Makefile", "bytes": "97737" }, { "name": "Python", "bytes": "6099533" }, { "name": "Shell", "bytes": "122593" } ], "symlink_target": "" }
""" Server API Reference for Server API (REST/Json) OpenAPI spec version: 2.0.21 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class OrderStateListResponse(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, data=None, pagination=None): """ OrderStateListResponse - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'data': 'list[OrderState]', 'pagination': 'Pagination' } self.attribute_map = { 'data': 'data', 'pagination': 'pagination' } self._data = data self._pagination = pagination @property def data(self): """ Gets the data of this OrderStateListResponse. :return: The data of this OrderStateListResponse. :rtype: list[OrderState] """ return self._data @data.setter def data(self, data): """ Sets the data of this OrderStateListResponse. :param data: The data of this OrderStateListResponse. :type: list[OrderState] """ self._data = data @property def pagination(self): """ Gets the pagination of this OrderStateListResponse. :return: The pagination of this OrderStateListResponse. :rtype: Pagination """ return self._pagination @pagination.setter def pagination(self, pagination): """ Sets the pagination of this OrderStateListResponse. :param pagination: The pagination of this OrderStateListResponse. :type: Pagination """ self._pagination = pagination def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
{ "content_hash": "582892f70d5640cfa6d0e2a67fe0ebc7", "timestamp": "", "source": "github", "line_count": 134, "max_line_length": 77, "avg_line_length": 26.03731343283582, "alnum_prop": 0.5316709658928059, "repo_name": "kinow-io/kinow-python-sdk", "id": "e8482e034433fcc98c3a259e5181f4e464a23b07", "size": "3506", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kinow_client/models/order_state_list_response.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "4659182" }, { "name": "Shell", "bytes": "1666" } ], "symlink_target": "" }
import pandas as pd unrate = pd.read_csv('unrate.csv') unrate['DATE'] = pd.to_datetime(unrate['DATE']) print(unrate.head(12)) ## 6. Introduction to Matplotlib ## import matplotlib.pyplot as plt plt.plot() plt.show() ## 7. Adding Data ## plt.plot(unrate['DATE'].head(12),unrate['VALUE'].head(12)) ## 8. Fixing Axis Ticks ## plt.plot(unrate['DATE'].head(12),unrate['VALUE'].head(12)) plt.xticks(rotation=90) ## 9. Adding Axis Labels And A Title ## plt.plot(unrate['DATE'].head(12),unrate['VALUE'].head(12)) plt.xticks(rotation=90) plt.xlabel('Month') plt.ylabel('Unemployment Rate') plt.title('Monthly Unemployment Trends, 1948')
{ "content_hash": "0a0c1c048467b21a99266708b1f3b888", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 58, "avg_line_length": 23.51851851851852, "alnum_prop": 0.6960629921259842, "repo_name": "vipmunot/Data-Analysis-using-Python", "id": "6c77e3984d9a815c4bc027e44ccb8374f0ab209f", "size": "670", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Exploratory Data Visualization/Line Charts-215.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "19716" }, { "name": "Python", "bytes": "283672" }, { "name": "R", "bytes": "7194" }, { "name": "Shell", "bytes": "9169" } ], "symlink_target": "" }
__author__ = 'greghines' import pymongo client = pymongo.MongoClient() db = client['condor_2015-01-22'] classification_collection = db["condor_classifications"] subject_collection = db["condor_subjects"] print subject_collection.find({"state":"complete"}).count() for subject in subject_collection.find({"state":"complete"}).limit(40): print subject["metadata"]["file"]
{ "content_hash": "5cc809218f21f1826d949fff7c8778e8", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 71, "avg_line_length": 27.071428571428573, "alnum_prop": 0.7255936675461742, "repo_name": "camallen/aggregation", "id": "522c3293c5cfdea27b985a3595cc07c36af211a8", "size": "401", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "experimental/condor/completionStats.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "723" }, { "name": "Python", "bytes": "1676640" }, { "name": "Scala", "bytes": "629" }, { "name": "Shell", "bytes": "95" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("data_finder", "0006_loggedpostcode_api_user")] operations = [ migrations.AlterField( model_name="loggedpostcode", name="api_user", field=models.CharField(null=True, max_length=30, blank=True), ) ]
{ "content_hash": "d6983cfc691fb8168e324a59463c0835", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 73, "avg_line_length": 25.25, "alnum_prop": 0.6361386138613861, "repo_name": "DemocracyClub/UK-Polling-Stations", "id": "34d279e55ddefd1c50de3953d6b67a8000ba49c1", "size": "428", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "polling_stations/apps/data_finder/migrations/0007_auto_20170426_0951.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "32" }, { "name": "HTML", "bytes": "85540" }, { "name": "JavaScript", "bytes": "3399" }, { "name": "Procfile", "bytes": "49" }, { "name": "Python", "bytes": "1111337" }, { "name": "SCSS", "bytes": "5742" } ], "symlink_target": "" }
from unittest.mock import MagicMock from unittest.mock import patch from cauldron.session.writing.components import plotly_component from cauldron.test.support import scaffolds def _fake_import(*args, **kwargs): raise ImportError('Fake Error') class TestPlotlyComponent(scaffolds.ResultsTest): """...""" def test_import_error(self): """...""" with patch('builtins.__import__') as import_func: import_func.side_effect = _fake_import component = plotly_component.create(MagicMock()) self.assertEqual(len(component.files), 0) self.assertEqual(len(component.includes), 0) def test_version_one_import_error(self): """...""" with patch('builtins.__import__') as import_func: import_func.side_effect = _fake_import result = plotly_component.get_version_one_path() self.assertIsNone(result) def test_version_one(self): """...""" result = plotly_component.get_version_one_path() self.assertIsNotNone(result) def test_version_two_import_error(self): """...""" with patch('builtins.__import__') as import_func: import_func.side_effect = _fake_import result = plotly_component.get_version_two_path() self.assertIsNone(result) def test_version_two(self): """...""" result = plotly_component.get_version_two_path() self.assertIsNotNone(result)
{ "content_hash": "589869a319eb8bf63b9906afbd3c5355", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 64, "avg_line_length": 30.645833333333332, "alnum_prop": 0.6267845003399048, "repo_name": "sernst/cauldron", "id": "ff6e00ea3c80172c87b52a2e01075a94c94c2ff3", "size": "1471", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cauldron/test/session/writing/test_plotly_component.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "36" }, { "name": "CSS", "bytes": "1369" }, { "name": "Dockerfile", "bytes": "842" }, { "name": "HTML", "bytes": "21740" }, { "name": "JavaScript", "bytes": "48753" }, { "name": "Python", "bytes": "913057" }, { "name": "SCSS", "bytes": "17130" }, { "name": "Shell", "bytes": "300" }, { "name": "Vue", "bytes": "95790" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('flisol_event', '0004_auto_20141229_2309'), ] operations = [ migrations.AddField( model_name='flisolattendance', name='comment', field=models.TextField(help_text='let us know how you can help best', verbose_name='comment', blank=True), preserve_default=True, ), migrations.AddField( model_name='flisolmachine', name='comment', field=models.TextField(verbose_name='comment post installation', blank=True), preserve_default=True, ), migrations.AddField( model_name='flisolmachine', name='flisol_instance', field=models.ForeignKey(related_name='machines', default=1, verbose_name='instance', to='flisol_event.FlisolInstance'), preserve_default=False, ), migrations.AlterField( model_name='flisolattendance', name='flisol_instance', field=models.ForeignKey(related_name='attendants', verbose_name='instance', to='flisol_event.FlisolInstance'), preserve_default=True, ), ]
{ "content_hash": "4cdaa7e3376b8dbb1126667d786fd48e", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 131, "avg_line_length": 34.891891891891895, "alnum_prop": 0.6041828040278854, "repo_name": "ikks/flisol-connect", "id": "7f181a7f5dc742c9be1dc97deef6d40fe44a6a48", "size": "1315", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "flisol_event/migrations/0005_auto_20150107_2124.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "663" }, { "name": "JavaScript", "bytes": "7742" }, { "name": "Python", "bytes": "81440" } ], "symlink_target": "" }
import unittest import code_helper class Test0012(unittest.TestCase): def test_problem(self): primes = list(code_helper.range_prime(10000)) triangle_number = -1 for n in range(7000, 20000): triangle_number = n * (n + 1) / 2 divisors = 1 s = triangle_number for prime in primes: if s < prime: break if s % prime == 0: time = 1 while s % prime == 0: s /= prime time += 1 divisors *= time if divisors > 500: break self.assertEqual(triangle_number, 76576500)
{ "content_hash": "e45aec6533aad8a9da129cecfba858d9", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 53, "avg_line_length": 29.16, "alnum_prop": 0.44170096021947874, "repo_name": "mccxj/online-judge-code-example", "id": "bb6778ecdb438556e533c3c273782a6776df6d8a", "size": "729", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "projecteuler/p0012_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Java", "bytes": "393380" }, { "name": "Python", "bytes": "33727" } ], "symlink_target": "" }
NAME = "CocoNodz" VERSION_MAJOR = 0 VERSION_MINOR = 0 VERSION_PATCH = 1 version_info = [str(VERSION_MAJOR), str(VERSION_MINOR), str(VERSION_PATCH)] version = ".".join(version_info) __version__ = version __all__ = ["version", "version_info", "__version__"]
{ "content_hash": "90e0752540ef73b367218198ec6608fe", "timestamp": "", "source": "github", "line_count": 11, "max_line_length": 75, "avg_line_length": 23.454545454545453, "alnum_prop": 0.6627906976744186, "repo_name": "rkoschmitzky/coconodz", "id": "933080497e0d3b5f4a446fb0f7e7409d692d227d", "size": "258", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "version.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "142444" } ], "symlink_target": "" }
def ping(event, server): server.send_cmd('PONG :%s', event['server']) print 'PONG PING'
{ "content_hash": "e3a2976ecbaa9206821441ed4a42564a", "timestamp": "", "source": "github", "line_count": 4, "max_line_length": 48, "avg_line_length": 24.25, "alnum_prop": 0.6185567010309279, "repo_name": "iogf/candocabot", "id": "d6425e5b285e9e4f9125592f8628541de0e0171d", "size": "97", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ping.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "92092" } ], "symlink_target": "" }
import zounds import argparse samplerate = zounds.SR11025() BaseModel = zounds.stft(resample_to=samplerate, store_fft=True) @zounds.simple_in_memory_settings class Sound(BaseModel): pass if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--local-path', required=True, type=str, help='local path where the nsynth tar files should be stored') parser.add_argument( '--port', default=8888, type=int, help='port to run the in-browser REPL in') args = parser.parse_args() ns = zounds.NSynth(path=args.local_path) zounds.ingest(ns, Sound, multi_threaded=True) app = zounds.ZoundsApp( model=Sound, audio_feature=Sound.ogg, visualization_feature=Sound.fft, globals=globals(), locals=locals()) app.start(args.port)
{ "content_hash": "c54e0fe7155fec48b5d174b6d18df82a", "timestamp": "", "source": "github", "line_count": 36, "max_line_length": 70, "avg_line_length": 24.583333333333332, "alnum_prop": 0.6293785310734463, "repo_name": "JohnVinyard/zounds", "id": "b61192e3148c551ac9f3090e6c717b180694a60b", "size": "885", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/download_nsynth.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "1511" }, { "name": "HTML", "bytes": "5376" }, { "name": "JavaScript", "bytes": "11191" }, { "name": "Python", "bytes": "743652" }, { "name": "Shell", "bytes": "251" } ], "symlink_target": "" }
import itertools from heat.openstack.common import log from heat.common import exception LOG = log.getLogger(__name__) class ResourceInfo(object): """Base mapping of resource type to implementation.""" def __new__(cls, registry, path, value, **kwargs): '''Create a new ResourceInfo of the appropriate class.''' if cls != ResourceInfo: # Call is already for a subclass, so pass it through return super(ResourceInfo, cls).__new__(cls) name = path[-1] if name.endswith(('.yaml', '.template')): # a template url for the resource "Type" return TemplateResourceInfo(registry, path, value) elif not isinstance(value, basestring): return ClassResourceInfo(registry, path, value) elif value.endswith(('.yaml', '.template')): # a registered template return TemplateResourceInfo(registry, path, value) elif name.endswith('*'): return GlobResourceInfo(registry, path, value) else: return MapResourceInfo(registry, path, value) def __init__(self, registry, path, value): self.registry = registry self.path = path self.name = path[-1] self.value = value self.user_resource = True def __eq__(self, other): return (self.path == other.path and self.value == other.value and self.user_resource == other.user_resource) def __ne__(self, other): return not self.__eq__(other) def __lt__(self, other): if self.user_resource != other.user_resource: # user resource must be sorted above system ones. return self.user_resource > other.user_resource if len(self.path) != len(other.path): # more specific (longer) path must be sorted above system ones. return len(self.path) > len(other.path) return self.path < other.path def __gt__(self, other): return other.__lt__(self) def get_resource_info(self, resource_type=None, resource_name=None): return self def matches(self, resource_type): return False def __str__(self): return '[%s](User:%s) %s -> %s' % (self.description, self.user_resource, self.name, str(self.value)) class ClassResourceInfo(ResourceInfo): """Store the mapping of resource name to python class implementation.""" description = 'Plugin' def get_class(self): return self.value class TemplateResourceInfo(ResourceInfo): """Store the info needed to start a TemplateResource. """ description = 'Template' def __init__(self, registry, path, value): super(TemplateResourceInfo, self).__init__(registry, path, value) if self.name.endswith(('.yaml', '.template')): self.template_name = self.name else: self.template_name = value def get_class(self): from heat.engine.resources import template_resource return template_resource.TemplateResource class MapResourceInfo(ResourceInfo): """Store the mapping of one resource type to another. like: OS::Networking::FloatingIp -> OS::Neutron::FloatingIp """ description = 'Mapping' def get_class(self): return None def get_resource_info(self, resource_type=None, resource_name=None): return self.registry.get_resource_info(self.value, resource_name) class GlobResourceInfo(MapResourceInfo): """Store the mapping (with wild cards) of one resource type to another. like: OS::Networking::* -> OS::Neutron::* """ description = 'Wildcard Mapping' def get_resource_info(self, resource_type=None, resource_name=None): orig_prefix = self.name[:-1] new_type = self.value[:-1] + resource_type[len(orig_prefix):] return self.registry.get_resource_info(new_type, resource_name) def matches(self, resource_type): return resource_type.startswith(self.name[:-1]) class ResourceRegistry(object): """By looking at the environment, find the resource implementation.""" def __init__(self, global_registry): self._registry = {'resources': {}} self.global_registry = global_registry def load(self, json_snippet): self._load_registry([], json_snippet) def register_class(self, resource_type, resource_class): ri = ResourceInfo(self, [resource_type], resource_class) self._register_info([resource_type], ri) def _load_registry(self, path, registry): for k, v in iter(registry.items()): if isinstance(v, dict): self._load_registry(path + [k], v) else: self._register_info(path + [k], ResourceInfo(self, path + [k], v)) def _register_info(self, path, info): """place the new info in the correct location in the registry. path: a list of keys ['resources', 'my_server', 'OS::Compute::Server'] """ descriptive_path = '/'.join(path) name = path[-1] # create the structure if needed registry = self._registry for key in path[:-1]: if key not in registry: registry[key] = {} registry = registry[key] if name in registry and isinstance(registry[name], ResourceInfo): details = { 'path': descriptive_path, 'was': str(registry[name].value), 'now': str(info.value)} LOG.warn(_('Changing %(path)s from %(was)s to %(now)s') % details) else: LOG.info(_('Registering %(path)s -> %(value)s') % { 'path': descriptive_path, 'value': str(info.value)}) info.user_resource = (self.global_registry is not None) registry[name] = info def iterable_by(self, resource_type, resource_name=None): is_templ_type = resource_type.endswith(('.yaml', '.template')) if self.global_registry is not None and is_templ_type: # we only support dynamic resource types in user environments # not the global environment. # resource with a Type == a template # we dynamically create an entry as it has not been registered. if resource_type not in self._registry: res = ResourceInfo(self, [resource_type], None) self._register_info([resource_type], res) yield self._registry[resource_type] # handle a specific resource mapping. if resource_name: impl = self._registry['resources'].get(resource_name) if impl and resource_type in impl: yield impl[resource_type] # handle: "OS::Compute::Server" -> "Rackspace::Compute::Server" impl = self._registry.get(resource_type) if impl: yield impl # handle: "OS::*" -> "Dreamhost::*" def is_a_glob(resource_type): return resource_type.endswith('*') globs = itertools.ifilter(is_a_glob, self._registry.keys()) for glob in globs: if self._registry[glob].matches(resource_type): yield self._registry[glob] def get_resource_info(self, resource_type, resource_name=None, registry_type=None): """Find possible matches to the resource type and name. chain the results from the global and user registry to find a match. """ # use cases # 1) get the impl. # - filter_by(res_type=X), sort_by(res_name=W, is_user=True) # 2) in TemplateResource we need to get both the # TemplateClass and the ResourceClass # - filter_by(res_type=X, impl_type=TemplateResourceInfo), # sort_by(res_name=W, is_user=True) # - filter_by(res_type=X, impl_type=ClassResourceInfo), # sort_by(res_name=W, is_user=True) # 3) get_types() from the api # - filter_by(is_user=False) # 4) as_dict() to write to the db # - filter_by(is_user=True) if self.global_registry is not None: giter = self.global_registry.iterable_by(resource_type, resource_name) else: giter = [] matches = itertools.chain(self.iterable_by(resource_type, resource_name), giter) for info in sorted(matches): match = info.get_resource_info(resource_type, resource_name) if registry_type is None or isinstance(match, registry_type): return match def get_class(self, resource_type, resource_name=None): info = self.get_resource_info(resource_type, resource_name=resource_name) if info is None: msg = "Unknown resource Type : %s" % resource_type raise exception.StackValidationFailed(message=msg) return info.get_class() def as_dict(self): """Return user resources in a dict format.""" def _as_dict(level): tmp = {} for k, v in iter(level.items()): if isinstance(v, dict): tmp[k] = _as_dict(v) elif v.user_resource: tmp[k] = v.value return tmp return _as_dict(self._registry) def get_types(self): '''Return a list of valid resource types.''' def is_plugin(key): if isinstance(self._registry[key], ClassResourceInfo): return True return False return [k for k in self._registry if is_plugin(k)] SECTIONS = (PARAMETERS, RESOURCE_REGISTRY) = \ ('parameters', 'resource_registry') class Environment(object): def __init__(self, env=None, user_env=True): """Create an Environment from a dict of varing format. 1) old-school flat parameters 2) or newer {resource_registry: bla, parameters: foo} :param env: the json environment :param user_env: boolean, if false then we manage python resources too. """ if env is None: env = {} if user_env: from heat.engine import resources global_registry = resources.global_env().registry else: global_registry = None self.registry = ResourceRegistry(global_registry) self.registry.load(env.get(RESOURCE_REGISTRY, {})) if 'parameters' in env: self.params = env['parameters'] else: self.params = dict((k, v) for (k, v) in env.iteritems() if k != RESOURCE_REGISTRY) def load(self, env_snippet): self.registry.load(env_snippet.get(RESOURCE_REGISTRY, {})) self.params.update(env_snippet.get('parameters', {})) def user_env_as_dict(self): """Get the environment as a dict, ready for storing in the db.""" return {RESOURCE_REGISTRY: self.registry.as_dict(), PARAMETERS: self.params} def register_class(self, resource_type, resource_class): self.registry.register_class(resource_type, resource_class) def get_class(self, resource_type, resource_name=None): return self.registry.get_class(resource_type, resource_name) def get_types(self): return self.registry.get_types() def get_resource_info(self, resource_type, resource_name=None, registry_type=None): return self.registry.get_resource_info(resource_type, resource_name, registry_type)
{ "content_hash": "1e48f693b262fb89bcb42c25a75d4e83", "timestamp": "", "source": "github", "line_count": 322, "max_line_length": 79, "avg_line_length": 36.97826086956522, "alnum_prop": 0.5762156714537667, "repo_name": "rickerc/heat_audit", "id": "db9f2e2a67eb3f2d0b2f4689ac10d566eb2dcd99", "size": "12527", "binary": false, "copies": "3", "ref": "refs/heads/cis-havana-staging", "path": "heat/engine/environment.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2811491" }, { "name": "Shell", "bytes": "21618" } ], "symlink_target": "" }
from __future__ import with_statement from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig import sys sys.path.append("utuputki/") sys.path.append("../utuputki/") import settings from common import db settings.config_init() # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = db.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. # this will overwrite the ini-file sqlalchemy.url path # with the path given in the config of the main code config.set_main_option('sqlalchemy.url', settings.DATABASE_CONFIG) def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, compare_type=True ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online()
{ "content_hash": "be20e1dea023185b85d7b246a361b994", "timestamp": "", "source": "github", "line_count": 82, "max_line_length": 69, "avg_line_length": 28.573170731707318, "alnum_prop": 0.7102005975245412, "repo_name": "katajakasa/utuputki2", "id": "48a25ad61c56c1cde0d71d3ae1859759966420a7", "size": "2343", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "alembic/env.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "273" }, { "name": "HTML", "bytes": "13557" }, { "name": "JavaScript", "bytes": "67790" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "78209" }, { "name": "Shell", "bytes": "2261" } ], "symlink_target": "" }
import mock from oslo_config import cfg from oslo_log import log as logging import requests import testtools from mistral.actions import std_actions from mistral.db.v2 import api as db_api from mistral.services import workflows as wf_service from mistral.tests import base as test_base from mistral.tests.unit.engine import base from mistral.workflow import states LOG = logging.getLogger(__name__) # Use the set_default method to set value otherwise in certain test cases # the change in value is not permanent. cfg.CONF.set_default('auth_enable', False, group='pecan') ENV = { '__actions': { 'std.http': { 'auth': 'librarian:password123', 'timeout': 30, } } } EXPECTED_ENV_AUTH = ('librarian', 'password123') WORKFLOW1 = """ --- version: "2.0" wf1: type: direct tasks: task1: action: std.http url="https://api.library.org/books" publish: result: <% $ %> """ WORKFLOW2 = """ --- version: "2.0" wf2: type: direct tasks: task1: action: std.http url="https://api.library.org/books" timeout=60 publish: result: <% $ %> """ WORKFLOW1_WITH_ITEMS = """ --- version: "2.0" wf1_with_items: type: direct input: - links tasks: task1: with-items: link in <% $.links %> action: std.http url=<% $.link %> publish: result: <% $ %> """ WORKFLOW2_WITH_ITEMS = """ --- version: "2.0" wf2_with_items: type: direct input: - links tasks: task1: with-items: link in <% $.links %> action: std.http url=<% $.link %> timeout=60 publish: result: <% $ %> """ class ActionDefaultTest(base.EngineTestCase): @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_action_defaults_from_env(self): wf_service.create_workflows(WORKFLOW1) wf_ex = self.engine.start_workflow('wf1', None, env=ENV) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') requests.request.assert_called_with( 'GET', 'https://api.library.org/books', params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, verify=None, auth=EXPECTED_ENV_AUTH, timeout=ENV['__actions']['std.http']['timeout']) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) def test_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2) wf_ex = self.engine.start_workflow('wf2', None, env=ENV) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') requests.request.assert_called_with( 'GET', 'https://api.library.org/books', params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, verify=None, auth=EXPECTED_ENV_AUTH, timeout=60 ) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) @testtools.skip("Fix 'with-items'.") def test_with_items_action_defaults_from_env(self): wf_service.create_workflows(WORKFLOW1_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf1_with_items', wf_input, env=ENV ) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=ENV['__actions']['std.http']['timeout']) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True) @mock.patch.object( requests, 'request', mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK'))) @mock.patch.object( std_actions.HTTPAction, 'is_sync', mock.MagicMock(return_value=True)) @testtools.skip("Fix 'with-items'.") def test_with_items_action_defaults_from_env_not_applied(self): wf_service.create_workflows(WORKFLOW2_WITH_ITEMS) wf_input = { 'links': [ 'https://api.library.org/books', 'https://api.library.org/authors' ] } wf_ex = self.engine.start_workflow( 'wf2_with_items', wf_input, env=ENV ) self._await(lambda: self.is_execution_success(wf_ex.id)) wf_ex = db_api.get_workflow_execution(wf_ex.id) self.assertEqual(states.SUCCESS, wf_ex.state) self._assert_single_item(wf_ex.task_executions, name='task1') calls = [mock.call('GET', url, params=None, data=None, headers=None, cookies=None, allow_redirects=None, proxies=None, auth=EXPECTED_ENV_AUTH, verify=None, timeout=60) for url in wf_input['links']] requests.request.assert_has_calls(calls, any_order=True)
{ "content_hash": "d8d7cd0e5c005e89faed27f3ad12de13", "timestamp": "", "source": "github", "line_count": 215, "max_line_length": 79, "avg_line_length": 29.427906976744186, "alnum_prop": 0.5868500079026395, "repo_name": "dennybaa/mistral", "id": "5dbe8998aed2c4e617ce7a93f20bdd868cbd058b", "size": "6937", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "mistral/tests/unit/engine/test_action_defaults.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Mako", "bytes": "951" }, { "name": "Python", "bytes": "1037769" }, { "name": "Shell", "bytes": "18657" } ], "symlink_target": "" }
import hashlib import os import shutil import sys import tarfile import imp from testrunner.local import testsuite from testrunner.local import utils from testrunner.objects import testcase SIMDJS_ARCHIVE_REVISION = "07e2713e0c9ea19feb0732d5bd84770c87310d79" SIMDJS_ARCHIVE_MD5 = "cf6bddf99f18800b68e782054268ee3c" SIMDJS_URL = ( "https://github.com/johnmccutchan/ecmascript_simd/archive/%s.tar.gz") SIMDJS_SUITE_PATH = ["data", "src"] class SimdJsTestSuite(testsuite.TestSuite): def __init__(self, name, root): super(SimdJsTestSuite, self).__init__(name, root) self.testroot = os.path.join(self.root, *SIMDJS_SUITE_PATH) self.ParseTestRecord = None def ListTests(self, context): tests = [ testcase.TestCase(self, 'shell_test_runner'), ] for filename in os.listdir(os.path.join(self.testroot, 'benchmarks')): if (not filename.endswith('.js') or filename in ['run.js', 'run_browser.js', 'base.js']): continue name = filename.rsplit('.')[0] tests.append( testcase.TestCase(self, 'benchmarks/' + name)) return tests def GetFlagsForTestCase(self, testcase, context): return (testcase.flags + context.mode_flags + [os.path.join(self.root, "harness-adapt.js"), "--harmony", os.path.join(self.testroot, testcase.path + ".js"), os.path.join(self.root, "harness-finish.js")]) def GetSourceForTest(self, testcase): filename = os.path.join(self.testroot, testcase.path + ".js") with open(filename) as f: return f.read() def IsNegativeTest(self, testcase): return False def IsFailureOutput(self, output, testpath): if output.exit_code != 0: return True return "FAILED!" in output.stdout def DownloadData(self): revision = SIMDJS_ARCHIVE_REVISION archive_url = SIMDJS_URL % revision archive_name = os.path.join( self.root, "ecmascript_simd-%s.tar.gz" % revision) directory_name = os.path.join(self.root, "data") directory_old_name = os.path.join(self.root, "data.old") if not os.path.exists(archive_name): print "Downloading test data from %s ..." % archive_url utils.URLRetrieve(archive_url, archive_name) if os.path.exists(directory_name): if os.path.exists(directory_old_name): shutil.rmtree(directory_old_name) os.rename(directory_name, directory_old_name) if not os.path.exists(directory_name): print "Extracting ecmascript_simd-%s.tar.gz ..." % revision md5 = hashlib.md5() with open(archive_name, "rb") as f: for chunk in iter(lambda: f.read(8192), ""): md5.update(chunk) print "MD5 hash is %s" % md5.hexdigest() if md5.hexdigest() != SIMDJS_ARCHIVE_MD5: os.remove(archive_name) print "MD5 expected %s" % SIMDJS_ARCHIVE_MD5 raise Exception("MD5 hash mismatch of test data file") archive = tarfile.open(archive_name, "r:gz") if sys.platform in ("win32", "cygwin"): # Magic incantation to allow longer path names on Windows. archive.extractall(u"\\\\?\\%s" % self.root) else: archive.extractall(self.root) os.rename(os.path.join(self.root, "ecmascript_simd-%s" % revision), directory_name) def GetSuite(name, root): return SimdJsTestSuite(name, root)
{ "content_hash": "50312ca0c3273b14f5a2ebac33ccacdb", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 74, "avg_line_length": 35, "alnum_prop": 0.655952380952381, "repo_name": "dawangjiaowolaixunshan/runtime", "id": "c0390afd65ea2f38da8f4bc1191863bcd38bd91e", "size": "3526", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "deps/v8/test/simdjs/testcfg.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Assembly", "bytes": "29659" }, { "name": "C", "bytes": "853" }, { "name": "C++", "bytes": "2139966" }, { "name": "JavaScript", "bytes": "508976" }, { "name": "Python", "bytes": "6276" }, { "name": "Shell", "bytes": "3888" } ], "symlink_target": "" }
from input_algorithms.errors import BadSpec, BadSpecValue from delfick_error import DelfickError, ProgrammerError class SalmError(DelfickError): pass # Explicitly make these errors in this context BadSpec = BadSpec BadSpecValue = BadSpecValue ProgrammerError = ProgrammerError class BadYaml(SalmError): desc = "Invalid yaml file" class BadConfiguration(SalmError): desc = "Bad configuration" class BadOptionFormat(SalmError): desc = "Bad option format" class NoSuchTask(SalmError): desc = "No such task" class NoFunctionsSpecified(SalmError): desc = "No functions were found in the configuration" class NoSuchGroup(SalmError): desc = "Couldn't find specified group" class GroupNotSpecified(SalmError): desc = "Please specify a group with --group"
{ "content_hash": "5dce5b795361c356b909b853816e0355", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 57, "avg_line_length": 25.419354838709676, "alnum_prop": 0.766497461928934, "repo_name": "delfick/simple-aws-lambda-maker", "id": "c19b99f02841de5e53f4175a9ea05c6e518faee3", "size": "788", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "simple_aws_lambda_maker/errors.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "28280" } ], "symlink_target": "" }
import os from enjoycms import create_app, db from enjoycms.models import EnjoycmsUser from flask.ext.script import Manager, Shell app = create_app(os.getenv('FLASK_CONFIG') or 'default') manager = Manager(app) def make_shell_context(): return dict(app=app, db=db, EnjoycmsUser=EnjoycmsUser) manager.add_command("shell", Shell(make_context=make_shell_context)) manager.add_command('db', MigrateCommand) @manager.command def test(): """Run the unit tests.""" import unittest tests = unittest.TestLoader().discover('tests') unittest.TextTestRunner(verbosity=2).run(tests) if __name__ == '__main__': manager.run()
{ "content_hash": "5f056b148dd56daa00012f10f76b450a", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 72, "avg_line_length": 28.130434782608695, "alnum_prop": 0.714064914992272, "repo_name": "dev-tao/Enjoy-CMS", "id": "09645ab3a7f826d4df183aff485b4f74c8a8096d", "size": "672", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "manage.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "650710" }, { "name": "HTML", "bytes": "101534" }, { "name": "JavaScript", "bytes": "314964" }, { "name": "Python", "bytes": "19383" } ], "symlink_target": "" }
from django.apps import AppConfig from django.utils.translation import ugettext, ugettext_lazy class LocalConfig(AppConfig): name="mmg.jobtrak.public" # Translators: Admin Backend - Name of Public app (appears in the header bar) verbose_name=ugettext_lazy("Public")
{ "content_hash": "3f5d1aecdb348cbd25d819f2a50558a0", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 81, "avg_line_length": 35.875, "alnum_prop": 0.7456445993031359, "repo_name": "MarconiMediaGroup/JobTrak", "id": "c8a71f2d81b73bfc621e18b3a0ab7156478d44a2", "size": "287", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "web/code/mmg/jobtrak/public/apps.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "52495" }, { "name": "HTML", "bytes": "78825" }, { "name": "JavaScript", "bytes": "153517" }, { "name": "Python", "bytes": "80401" }, { "name": "Shell", "bytes": "1205" } ], "symlink_target": "" }
class Widget(object): def __init__(self, width, height): super(Widget, self).__init__() self.width = width self.height = height # Draw the widget # canvas: curses window object to draw the widget on # offset_{x,y}: offset of widget on window # min{x,y}, max{x,y}: coordinates of region of widget to redraw # minima are inclusive, maxima exclusive def draw(self, canvas, offsetx, offsety, minx, miny, maxx, maxy): pass def resize(self, width, height): self.width = width self.height = height def size(self): return (self.width, self.height) # Return whether we accepted the focus def onFocus(self): return False def offFocus(self): pass # Return whether change was succesfull internally # if not, widget loses focus WITHOUT call to offFocus # default implementation just call offFocus, and then returns false def changeFocus(self): self.offFocus() return False def keyEvent(self, key): pass
{ "content_hash": "0b86cd74083f71a500015cd0c6aaf92f", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 71, "avg_line_length": 29.35135135135135, "alnum_prop": 0.6160220994475138, "repo_name": "mrngm/madmin", "id": "541736a065cd5d399085e4742f3a47d3bc169f95", "size": "1086", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "gui_lib/widget.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "184377" }, { "name": "Shell", "bytes": "740" } ], "symlink_target": "" }
from django import db from django.utils import timezone from django_rq import job from metaci.cumulusci.models import ScratchOrgInstance @job("short") def prune_orgs(): """An RQ task to mark expired orgs as deleted. We don't need to bother calling delete_org on each expired scratch org, we'll trust that the org expires, and just efficiently flip the bits in MetaCI so that they don't show up on list views anymore. """ db.connection.close() pruneing_qs = ScratchOrgInstance.expired.all() count = pruneing_qs.update( deleted=True, time_deleted=timezone.now(), delete_error="Org is expired." ) return f"pruned {count} orgs"
{ "content_hash": "ac80a2ba713154bbbf26c59b5359e0ca", "timestamp": "", "source": "github", "line_count": 21, "max_line_length": 81, "avg_line_length": 32.19047619047619, "alnum_prop": 0.7144970414201184, "repo_name": "SalesforceFoundation/mrbelvedereci", "id": "e969694ffde23f5a9da0d1040bab82e9bca1701e", "size": "676", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "metaci/cumulusci/tasks.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "2069" }, { "name": "HTML", "bytes": "123214" }, { "name": "JavaScript", "bytes": "3993" }, { "name": "Python", "bytes": "245560" }, { "name": "Shell", "bytes": "4590" } ], "symlink_target": "" }
from i3pystatus import IntervalModule import psutil import getpass class MakeWatch(IntervalModule): """ Watches for make jobs and notifies when they are completed. requires: psutil """ settings = ( ("name", "Listen for a job other than 'make' jobs"), ("running_color", "Text color while the job is running"), ("idle_color", "Text color while the job is not running"), "format", ) running_color = "#FF0000" # red idle_color = "#00FF00" # green name = 'make' format = "{name}: {status}" def run(self): status = 'idle' for proc in psutil.process_iter(): cur_proc = proc.as_dict(attrs=['name', 'username']) if getpass.getuser() in cur_proc['username']: if cur_proc['name'] == self.name: status = proc.as_dict(attrs=['status'])['status'] if status == 'idle': color = self.idle_color else: color = self.running_color cdict = { "name": self.name, "status": status } self.output = { "full_text": self.format.format(**cdict), "color": color }
{ "content_hash": "1d9a1680e79cead1f3004497435401ad", "timestamp": "", "source": "github", "line_count": 44, "max_line_length": 69, "avg_line_length": 27.636363636363637, "alnum_prop": 0.5304276315789473, "repo_name": "plumps/i3pystatus", "id": "be3bd0fd4e587e9ef7be05bcefeddd501b127524", "size": "1216", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "i3pystatus/makewatch.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "223202" }, { "name": "Shell", "bytes": "794" } ], "symlink_target": "" }
import os import io import re import json import math import tarfile import zipfile import flask import werkzeug.exceptions from google.protobuf import text_format try: import caffe_pb2 except ImportError: # See issue #32 from caffe.proto import caffe_pb2 import caffe.draw import digits from digits.webapp import app, scheduler, autodoc from digits.utils.routing import request_wants_json import images.views import images as model_images NAMESPACE = '/models/' @app.route(NAMESPACE + '<job_id>.json', methods=['GET']) @app.route(NAMESPACE + '<job_id>', methods=['GET']) @autodoc(['models', 'api']) def models_show(job_id): """ Show a ModelJob Returns JSON when requested: {id, name, directory, status, snapshots: [epoch,epoch,...]} """ job = scheduler.get_job(job_id) if job is None: raise werkzeug.exceptions.NotFound('Job not found') if request_wants_json(): return flask.jsonify(job.json_dict(True)) else: if isinstance(job, model_images.ImageClassificationModelJob): return model_images.classification.views.show(job) elif isinstance(job, model_images.GenericImageModelJob): return model_images.generic.views.show(job) else: raise werkzeug.exceptions.BadRequest( 'Invalid job type') @app.route(NAMESPACE + 'customize', methods=['POST']) @autodoc('models') def models_customize(): """ Returns a customized file for the ModelJob based on completed form fields """ network = flask.request.args['network'] if not network: raise werkzeug.exceptions.BadRequest('network not provided') networks_dir = os.path.join(os.path.dirname(digits.__file__), 'standard-networks') for filename in os.listdir(networks_dir): path = os.path.join(networks_dir, filename) if os.path.isfile(path): match = re.match(r'%s.prototxt' % network, filename) if match: with open(path) as infile: return json.dumps({'network': infile.read()}) job = scheduler.get_job(network) if job is None: raise werkzeug.exceptions.NotFound('Job not found') snapshot = None try: epoch = int(flask.request.form['snapshot_epoch']) for filename, e in job.train_task().snapshots: if e == epoch: snapshot = job.path(filename) break except: pass return json.dumps({ 'network': text_format.MessageToString(job.train_task().network), 'snapshot': snapshot }) @app.route(NAMESPACE + 'visualize-network', methods=['POST']) @autodoc('models') def models_visualize_network(): """ Returns a visualization of the custom network as a string of PNG data """ net = caffe_pb2.NetParameter() text_format.Merge(flask.request.form['custom_network'], net) # Throws an error if name is None if not net.name: net.name = 'Network' return '<image src="data:image/png;base64,' + caffe.draw.draw_net(net, 'UD').encode('base64') + '" style="max-width:100%" />' @app.route(NAMESPACE + 'visualize-lr', methods=['POST']) @autodoc('models') def models_visualize_lr(): """ Returns a JSON object of data used to create the learning rate graph """ policy = flask.request.form['lr_policy'] lr = float(flask.request.form['learning_rate']) if policy == 'fixed': pass elif policy == 'step': step = int(flask.request.form['lr_step_size']) gamma = float(flask.request.form['lr_step_gamma']) elif policy == 'multistep': steps = [float(s) for s in flask.request.form['lr_multistep_values'].split(',')] current_step = 0 gamma = float(flask.request.form['lr_multistep_gamma']) elif policy == 'exp': gamma = float(flask.request.form['lr_exp_gamma']) elif policy == 'inv': gamma = float(flask.request.form['lr_inv_gamma']) power = float(flask.request.form['lr_inv_power']) elif policy == 'poly': power = float(flask.request.form['lr_poly_power']) elif policy == 'sigmoid': step = float(flask.request.form['lr_sigmoid_step']) gamma = float(flask.request.form['lr_sigmoid_gamma']) else: raise werkzeug.exceptions.BadRequest('Invalid policy') data = ['Learning Rate'] for i in xrange(101): if policy == 'fixed': data.append(lr) elif policy == 'step': data.append(lr * math.pow(gamma, math.floor(float(i)/step))) elif policy == 'multistep': if current_step < len(steps) and i >= steps[current_step]: current_step += 1 data.append(lr * math.pow(gamma, current_step)) elif policy == 'exp': data.append(lr * math.pow(gamma, i)) elif policy == 'inv': data.append(lr * math.pow(1.0 + gamma * i, -power)) elif policy == 'poly': data.append(lr * math.pow(1.0 - float(i)/100, power)) elif policy == 'sigmoid': data.append(lr / (1.0 + math.exp(gamma * (i - step)))) return json.dumps({'data': {'columns': [data]}}) @app.route(NAMESPACE + '<job_id>/download', methods=['GET', 'POST'], defaults={'extension': 'tar.gz'}) @app.route(NAMESPACE + '<job_id>/download.<extension>', methods=['GET', 'POST']) @autodoc('models') def models_download(job_id, extension): """ Return a tarball of all files required to run the model """ job = scheduler.get_job(job_id) if job is None: raise werkzeug.exceptions.NotFound('Job not found') epoch = -1 # GET ?epoch=n if 'epoch' in flask.request.args: epoch = float(flask.request.args['epoch']) # POST ?snapshot_epoch=n (from form) elif 'snapshot_epoch' in flask.request.form: epoch = float(flask.request.form['snapshot_epoch']) task = job.train_task() snapshot_filename = None if epoch == -1 and len(task.snapshots): epoch = task.snapshots[-1][1] snapshot_filename = task.snapshots[-1][0] else: for f, e in task.snapshots: if e == epoch: snapshot_filename = f break if not snapshot_filename: raise werkzeug.exceptions.BadRequest('Invalid epoch') b = io.BytesIO() if extension in ['tar', 'tar.gz', 'tgz', 'tar.bz2']: # tar file mode = '' if extension in ['tar.gz', 'tgz']: mode = 'gz' elif extension in ['tar.bz2']: mode = 'bz2' with tarfile.open(fileobj=b, mode='w:%s' % mode) as tf: for path, name in job.download_files(epoch): tf.add(path, arcname=name) elif extension in ['zip']: with zipfile.ZipFile(b, 'w') as zf: for path, name in job.download_files(epoch): zf.write(path, arcname=name) else: raise werkzeug.exceptions.BadRequest('Invalid extension') response = flask.make_response(b.getvalue()) response.headers['Content-Disposition'] = 'attachment; filename=%s_epoch_%s.%s' % (job.id(), epoch, extension) return response
{ "content_hash": "25b5daf93ad777944d41c700511180f9", "timestamp": "", "source": "github", "line_count": 212, "max_line_length": 129, "avg_line_length": 33.90094339622642, "alnum_prop": 0.6070683177960206, "repo_name": "hycis/DIGITS", "id": "9f1658df872d0ee88ee3e527d77241f2d6312a51", "size": "7257", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "digits/model/views.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "710" }, { "name": "HTML", "bytes": "140312" }, { "name": "JavaScript", "bytes": "103357" }, { "name": "Python", "bytes": "431088" }, { "name": "Shell", "bytes": "1377" } ], "symlink_target": "" }
import logging from functools import wraps from tornado.gen import Return from tornado.web import RequestHandler, HTTPError try: import cPickle as pickle except ImportError: import pickle try: import ujson as json except ImportError: import json log = logging.getLogger(__name__) class BaseHandler(RequestHandler): _NULL = object() THREAD_POOL = None STORAGE = None @property def thread_pool(self): return self.THREAD_POOL @staticmethod def _log_result(function): def logger(result): exc = result.exception() if exc: if not isinstance(exc, (Return, HTTPError)): log.exception(exc) else: log.debug( "Result of function %s is: %r", function.__name__, result.result() ) return logger @classmethod def threaded(cls, func): @wraps(func) def wrap(*args, **kwargs): f = cls.THREAD_POOL.submit(func, *args, **kwargs) f.add_done_callback(cls._log_result(func)) return f return wrap def get_secure_cookie(self, name, value=None, max_age_days=31, min_version=None): val = super(BaseHandler, self).get_secure_cookie( name, None, max_age_days=max_age_days, min_version=min_version ) if val is None: return value return pickle.loads(val) if val else val def set_secure_cookie(self, name, value, expires_days=30, version=None, **kwargs): return super(BaseHandler, self).set_secure_cookie( name, pickle.dumps(value, protocol=2), expires_days=expires_days, version=version, **kwargs ) threaded = BaseHandler.threaded
{ "content_hash": "8fe2549e9d69c1c2e1b1053dedfb2786", "timestamp": "", "source": "github", "line_count": 72, "max_line_length": 86, "avg_line_length": 25.27777777777778, "alnum_prop": 0.5846153846153846, "repo_name": "jgiannuzzi/pypi-server", "id": "fa361bd04d9c16d7f135897d8d1ea0c401160fcc", "size": "1838", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "pypi_server/handlers/base.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "1640" }, { "name": "HTML", "bytes": "13918" }, { "name": "JavaScript", "bytes": "11046" }, { "name": "Python", "bytes": "92740" } ], "symlink_target": "" }
import subprocess import glob import os import io import time import copy def check_queue_length(): proc = subprocess.Popen(["squeue", "-u", "dgellis"], stdout=subprocess.PIPE) return len(list(io.TextIOWrapper(proc.stdout, encoding="utf-8"))) def wait_for_long_queue(sleeping_time=60, limit=1000): while check_queue_length() > limit: time.sleep(sleeping_time) def main(): skulls = glob.glob("/work/aizenberg/dgellis/MICCAI_Implant_2020/training_set/complete_skull/*.nii.gz") cases1 = sorted([os.path.basename(s).split(".")[0] for s in skulls]) cases2 = copy.copy(cases1) template = os.path.join("/work/aizenberg/dgellis/MICCAI_Implant_2020/training_set/registrations", "augmented_{name}/sub-{case1}_space-{case2}_{name}.nii.gz") for i, case1 in enumerate(cases1): for case2 in cases2[(i+1):]: outputs_exist = list() for name in ("defective_skull", "implant"): outputs_exist.append(os.path.exists(template.format(case1=case1, case2=case2, name=name))) outputs_exist.append(os.path.exists(template.format(case1=case2, case2=case1, name=name))) if not all(outputs_exist): wait_for_long_queue() print("Submitting:", case1, "to", case2) subprocess.call(["sbatch", "/home/aizenberg/dgellis/fCNN/autoimplant/augmentation_script.sh", case1, case2]) else: print("Outputs already exist:", case1, "to", case2) if __name__ == "__main__": main()
{ "content_hash": "6831ed87056caea4a0d48788262cfc8c", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 116, "avg_line_length": 38.97560975609756, "alnum_prop": 0.6132665832290363, "repo_name": "ellisdg/3DUnetCNN", "id": "1109b2a4a286441f88bc582b956c72e57a19386b", "size": "1598", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/autoimplant2020/augmentation/submit_augmentations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "493190" } ], "symlink_target": "" }
""" minimal_window_string.py Created by Shengwei on 2014-07-06. """ # https://oj.leetcode.com/problems/minimum-window-substring/ # tags: hard, string, hashtable, minimal, edge cases, clarification (dups) """ Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n). For example, S = "ADOBECODEBANC" T = "ABC" Minimum window is "BANC". Note: If there is no such window in S that covers all characters in T, return the emtpy string "". If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S. """ """ Notes for 3 bugs while implementing: 1. should move left pointer when substring is set other than left > 0; alternatively, set left to -1 in the beginning and later check if it's > -1 2. should check counter == char_count only when counter just changed 3. should decrease counter only when lookup_dict entry is -1, which means it just dissatisfies the minimum requirements The last two are mainly due to the change of concept -- there can be dups in T. """ # TODO: # 1. refactor it using a queue # 2. do not need to store both index and the char class Solution: # @return a string def minWindow(self, S, T): """Think about a windown sliding through S.""" window, counter, char_count = [], 0, len(set(T)) left = right = 0 min_substring = '' # initialize lookup dictionary, with negative count of # each char in T; they are expected to be supplimented # by sliding the window on S lookup_dict = collections.defaultdict(int) for char in T: lookup_dict[char] -= 1 while right < len(S): if window and left > window[0][1]: # drop off the left most one from the window; # the count of such item in the window decreases to_be_removed = window.pop(0)[0] lookup_dict[to_be_removed] -= 1 # IMPORTANT! should check if it's -1 other than < 0 if lookup_dict[to_be_removed] == -1: # no such char is in the window now, decrease count counter -= 1 if S[right] in lookup_dict: # include S[right] in the window window.append((S[right], right)) lookup_dict[S[right]] += 1 if lookup_dict[S[right]] == 0: # requirement for S[right] has been just satisfied; # if there were more than enough S[right], no change counter += 1 # only check this when counter increases if counter == char_count: # all requirements for T have been satisfied; # shrink the window for mimimum substring while lookup_dict[window[0][0]] > 0: # the left most item in the window is # superfluous, drop it lookup_dict[window.pop(0)[0]] -= 1 # shrink the window to the tighter boundary left = window[0][1] # update minimum substring; after all chars in T have # been in the window, the window only shrinks or # at least has no change, so do not need comparison min_substring = S[left:right+1] # slide the window to the right right += 1 # move the left of the window after we've found a substring if min_substring: left += 1 return min_substring
{ "content_hash": "504a01db28462019c184cb86fdcfd52d", "timestamp": "", "source": "github", "line_count": 102, "max_line_length": 124, "avg_line_length": 38.09803921568628, "alnum_prop": 0.5499227997941328, "repo_name": "CodingVault/LeetCodeInPython", "id": "0b30eca73e05d38333630c2a9fecf1c1d67340eb", "size": "3926", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "minimal_window_string.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "313207" } ], "symlink_target": "" }
""" Django settings for my project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '6v7=8vr8*-%fdly)+-n)8q0v!^(1&+0=xz!5e81jb45h8da50n' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'filebrowser', 'app', ) MIDDLEWARE = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ "django.contrib.auth.context_processors.auth", "django.template.context_processors.request", "django.contrib.messages.context_processors.messages", ] }, }, ] DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' ROOT_URLCONF = 'my.urls' WSGI_APPLICATION = 'my.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ MEDIA_URL = '/media/' MEDIA_ROOT = "media" STATIC_URL = '/static/' STATIC_ROOT = "static" TEST_RUNNER = 'django.test.runner.DiscoverRunner' FILEBROWSER_MEDIA_ROOT = MEDIA_ROOT FILEBROWSER_DIRECTORY = '' FILEBROWSER_VERSIONS = { 'fb_thumb': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop'}, 'small_index': {'verbose_name': 'Smaill index', 'width': 80, 'height': 60, 'opts': 'upscale'}, 'small_ix_crop': {'verbose_name': 'Smaill index crop', 'width': 80, 'height': 60, 'opts': 'upscale crop'}, 'index': {'verbose_name': 'Index', 'width': 300, 'height': 225, 'opts': 'upscale'}, '_gal_ix': {'verbose_name': 'Gallery index', 'width': 150, 'height': 113, 'opts': ''}, 'cr_gal_ix': {'verbose_name': 'Gallery index cropped', 'width': 113, 'height': 113, 'opts': 'crop'}, 'fh_gal_ix': {'verbose_name': 'Gallery index full height', 'width': 113, 'height': 113, 'opts': ''}, 'gallery': {'verbose_name': 'Gallery item', 'width': 800, 'height': 800, 'opts': ''}, } FILEBROWSER_ADMIN_VERSIONS = ['small_index', 'small_ix_crop', 'index', 'cr_gal_ix', 'fh_gal_ix', '_gal_ix', 'gallery']
{ "content_hash": "7719cf23459e86f6d506e2a6f9582d66", "timestamp": "", "source": "github", "line_count": 122, "max_line_length": 118, "avg_line_length": 28.934426229508198, "alnum_prop": 0.6609065155807365, "repo_name": "agushuley/gu-django-filebrowser-no-grappelli-test", "id": "384f9dfdbe2cd5c545039b9b89e09c5cbda0520e", "size": "3530", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "my/settings.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "9054" } ], "symlink_target": "" }
from guicore.displayscreen import EventDispatch, NewMouse from enum import Enum from queue import Empty from controlevents import CEvent, TimedGetEvent import guicore.guiutils as guiutils import debug import logsupport from logsupport import ConsoleWarning import guicore.screenmgt as screenmgt import guicore.switcher as switcher import config import screens.__screens as screens import time from screens import screen, maintscreen import math import utils.hw as hw MouseStates = Enum('MouseStates', 'idle downwait upwait waitinguporlong swallowup') mousestate = MouseStates.idle longtaptime = 2 tapcount = 0 lastmeventtime = 0 lastmovetime = 0 mousemoved = False pos = (0, 0) motionpos = (0, 0) def _MoveDist(x, y): return math.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) screendiag = _MoveDist((hw.screenheight, hw.screenwidth), (0, 0)) dumptime = 0 def DumpEvent(event): global dumptime print('Interval: {} Event: {}'.format(event.mtime-dumptime,event)) dumptime = event.mtime def MouseDown(event): global mousestate, lastmeventtime, tapcount, pos, motionpos, lastmovetime, mousemoved DumpEvent(event) guiutils.HBEvents.Entry('MouseDown {} @ {}'.format(str(event.pos), event.mtime)) debug.debugPrint('Touch', 'MouseDown' + str(event.pos) + repr(event)) # screen touch events; this includes touches to non-sensitive area of screen screenmgt.SetActivityTimer(config.AS.DimTO, 'Screen touch') # refresh non-dimming in all cases including non=sensitive areas # this refresh is redundant in some cases where the touch causes other activities if mousestate != MouseStates.idle: logsupport.Logs.Log('Initial Mouse Down when not idle: {}'.format(mousestate), severity=ConsoleWarning) mousestate = MouseStates.idle if screenmgt.DimState() == 'Dim': # wake up the screen and if in a cover state go home swallow next Up config.sysStore.consolestatus = 'active' mousestate = MouseStates.swallowup if screenmgt.screenstate == 'Cover': switcher.SwitchScreen(screens.HomeScreen, 'Bright', 'Wake up from cover', newstate='Home') else: screenmgt.Brighten() # if any other screen just brighten return # wakeup touches are otherwise ignored # Screen was not Dim so the touch was meaningful pos = event.pos motionpos = pos mousemoved = False tapcount = 1 mousestate = MouseStates.upwait lastmeventtime = time.time() lastmovetime = lastmeventtime maxtapinterval = config.sysStore.MultiTapTime / 1000 while True: try: # print('Mousewait: {}'.format(maxtapinterval-(time.time()-lastmeventtime))) eventx = TimedGetEvent( maxtapinterval - (time.time() - lastmeventtime)) # if other events intervene this is technically wrong except Empty: if mousestate == MouseStates.upwait: mousestate = MouseStates.waitinguporlong elif mousestate == MouseStates.downwait: ProcessTap(tapcount, pos) mousestate = MouseStates.idle else: logsupport.Logs.Log('Weird mouse state {} in down timeout'.format(mousestate), severity=ConsoleWarning) mousestate = MouseStates.idle return # got a mouse event within multi window if up ignore and await the next down if eventx.type == CEvent.MouseDown: DumpEvent(eventx) lastmeventtime = time.time() if mousestate == MouseStates.upwait: logsupport.Logs.Log('Got Mouse Down while waiting for Up', severity=ConsoleWarning) elif mousestate == MouseStates.downwait: # multi tap tapcount += 1 mousestate = MouseStates.upwait elif eventx.type == CEvent.MouseUp: DumpEvent(eventx) lastmeventtime = time.time() if mousestate == MouseStates.upwait: # just swallow mousestate = MouseStates.downwait else: logsupport.Logs.Log('Got Mouse Up while waiting for Down', severity=ConsoleWarning) elif eventx.type == CEvent.MouseMotion: DumpEvent(eventx) lastmeventtime = time.time() CompressMotion(eventx) guiutils.HBEvents.Entry('Mouse Motion: {}'.format(repr(eventx))) elif eventx.type == CEvent.MouseIdle: DumpEvent(eventx) else: guiutils.HBEvents.Entry('Defer' + repr(eventx)) guiutils.Deferrals.append(eventx) # defer the event until after the clicks are sorted out def MouseUp(event): global mousestate DumpEvent(event) if mousestate == MouseStates.swallowup: pass elif mousestate == MouseStates.waitinguporlong: if tapcount > 1 or time.time() - lastmeventtime < longtaptime: # either a multitap or short single tap print('Got tap {}'.format(tapcount)) ProcessTap(tapcount, pos) else: # long tap print('Got long') uppos = event.pos dist = _MoveDist(uppos, pos) print('Dn: {} Up: {} Dist: {} Diag:{} Pct: {}'.format(pos, uppos, dist, screendiag, dist / screendiag)) ProcessTap(-1, pos) else: logsupport.Logs.Log('Weird mouse state in MouseUp {}'.format(mousestate), severity=ConsoleWarning) mousestate = MouseStates.idle return def CompressMotion(event): global motionpos, lastmovetime, mousemoved if _MoveDist(motionpos, event.pos) > 20 or time.time() - lastmovetime > 1: print('Reportmove: {}, dist {} time: {}'.format(motionpos, _MoveDist(motionpos, event.pos), time.time() - lastmovetime)) motionpos = event.pos lastmovetime = time.time() mousemoved = True if config.AS.WatchMotion: config.AS.Motion(event.pos) def MouseMotion(event): DumpEvent(event) #logsupport.Logs.Log('Mouse motion while in state {} (Event: {})'.format(mousestate, event), severity=ConsoleWarning) def MouseIdle(event): DumpEvent(event) return def GoToMaint(): if screenmgt.screenstate == 'Maint': # ignore if already in Maint logsupport.Logs.Log('Maintenance taps ignored - already in Maint mode') return # Go to maintenance logsupport.Logs.Log('Entering Console Maintenance') screen.PushToScreen(maintscreen.MaintScreen, newstate='Maint', msg='Push to Maint') return def ProcessTap(tapcnt, pos): global motionpos # print('Process {} {}'.format(tapcnt,pos)) if tapcnt == 3: if screenmgt.screenstate == 'Maint': # ignore triple taps if in maintenance mode logsupport.Logs.Log('Secondary chain taps ignored - in Maint mode') return # Switch screen chains if screens.HomeScreen != screens.HomeScreen2: # only do if there is a real secondary chain if screenmgt.Chain == 0: screenmgt.Chain = 1 switcher.SwitchScreen(screens.HomeScreen2, 'Bright', 'Chain switch to secondary', newstate='NonHome') else: screenmgt.Chain = 0 switcher.SwitchScreen(screens.HomeScreen, 'Bright', 'Chain switch to main', newstate='Home') return elif 3 < tapcnt < 8: GoToMaint() return elif tapcnt >= 8: logsupport.Logs.Log('Runaway {} taps - likely hardware issue'.format(tapcnt), severity=ConsoleWarning, hb=True) return if mousemoved: if _MoveDist(pos, (0, 0)) < 80 and _MoveDist(pos, motionpos) / screendiag > .75: print('Maint') GoToMaint() return else: print('Not diag {} {} {} {}'.format(_MoveDist(pos, (0, 0)), _MoveDist(pos, motionpos), pos, motionpos)) return if config.AS.Keys is not None: for K in config.AS.Keys.values(): if K.touched(pos): K.Pressed(tapcnt) return for K in config.AS.NavKeys.values(): if K.touched(pos): K.Proc() return if NewMouse: EventDispatch[CEvent.MouseDown] = MouseDown EventDispatch[CEvent.MouseUp] = MouseUp EventDispatch[CEvent.MouseMotion] = MouseMotion EventDispatch[CEvent.MouseIdle] = MouseIdle
{ "content_hash": "fa7e450d44088d20b61659dee90a1268", "timestamp": "", "source": "github", "line_count": 231, "max_line_length": 118, "avg_line_length": 32.06060606060606, "alnum_prop": 0.724007561436673, "repo_name": "kevinkahn/softconsole", "id": "ab515601f3f96a56544d5189394c4e319f0d0515", "size": "7406", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "FuturesEtc/mouseeventsinitial.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Euphoria", "bytes": "267" }, { "name": "Python", "bytes": "839903" }, { "name": "Shell", "bytes": "101927" } ], "symlink_target": "" }
import ctypes import socket import sys if sys.platform == 'win32': import wmi from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class HostUtils(object): def __init__(self): if sys.platform == 'win32': self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') def get_cpus_info(self): cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor " "WHERE ProcessorType = 3") cpus_list = [] for cpu in cpus: cpu_info = {'Architecture': cpu.Architecture, 'Name': cpu.Name, 'Manufacturer': cpu.Manufacturer, 'NumberOfCores': cpu.NumberOfCores, 'NumberOfLogicalProcessors': cpu.NumberOfLogicalProcessors} cpus_list.append(cpu_info) return cpus_list def is_cpu_feature_present(self, feature_key): return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key) def get_memory_info(self): """ Returns a tuple with total visible memory and free physical memory expressed in kB. """ mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, " "FreePhysicalMemory " "FROM win32_operatingsystem")[0] return (long(mem_info.TotalVisibleMemorySize), long(mem_info.FreePhysicalMemory)) def get_volume_info(self, drive): """ Returns a tuple with total size and free space expressed in bytes. """ logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace " "FROM win32_logicaldisk " "WHERE DeviceID='%s'" % drive)[0] return (long(logical_disk.Size), long(logical_disk.FreeSpace)) def get_windows_version(self): return self._conn_cimv2.Win32_OperatingSystem()[0].Version def get_local_ips(self): addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0) # Returns IPv4 and IPv6 addresses, ordered by protocol family addr_info.sort() return [a[4][0] for a in addr_info]
{ "content_hash": "c4d71f91e441b49a130b1e3b134d8ee4", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 76, "avg_line_length": 36.953125, "alnum_prop": 0.545031712473573, "repo_name": "maheshp/novatest", "id": "d28ce75a5e04c55fef92405f4853ab479c93fb7e", "size": "3049", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "nova/virt/hyperv/hostutils.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "JavaScript", "bytes": "7403" }, { "name": "Python", "bytes": "8947329" }, { "name": "Shell", "bytes": "17067" } ], "symlink_target": "" }
import unittest from svtplay_dl.fetcher.dash import DASH from svtplay_dl.fetcher.hls import HLS from svtplay_dl.fetcher.http import HTTP from svtplay_dl.subtitle import subtitle from svtplay_dl.utils.parser import setup_defaults from svtplay_dl.utils.stream import audio_role from svtplay_dl.utils.stream import format_prio from svtplay_dl.utils.stream import language_prio from svtplay_dl.utils.stream import sort_quality from svtplay_dl.utils.stream import subtitle_filter class streamTest_sort(unittest.TestCase): def test_sort(self): data = [ DASH(setup_defaults(), "http://example.com", 3000, None), HLS(setup_defaults(), "http://example.com", 2000, None), HTTP(setup_defaults(), "http://example.com", 3001, None), ] assert all( [ a[0] == b.bitrate for a, b in zip( sort_quality(data), [ HTTP(setup_defaults(), "http://example.com", 3001, None), DASH(setup_defaults(), "http://example.com", 3000, None), HLS(setup_defaults(), "http://example.com", 2000, None), ], ) ], ) class streamTestLanguage(unittest.TestCase): def test_language_prio(self): config = setup_defaults() test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None), DASH(setup_defaults(), "http://example.com", 3002, None), ] streams = language_prio(config, test_streams) assert len(streams) == 3 def test_language_prio_select(self): config = setup_defaults() config.set("audio_language", "en") test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None, language="en"), DASH(setup_defaults(), "http://example.com", 3001, None), DASH(setup_defaults(), "http://example.com", 3002, None, language="sv"), ] streams = language_prio(config, test_streams) assert len(streams) == 1 class streamTestFormat(unittest.TestCase): def test_language_prio(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"), DASH(setup_defaults(), "http://example.com", 3002, None), ] streams = format_prio(test_streams, ["h264-51"]) assert len(streams) == 1 def test_language_prio2(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"), DASH(setup_defaults(), "http://example.com", 3001, None, codec="h264", channels="51"), DASH(setup_defaults(), "http://example.com", 3002, None), ] streams = format_prio(test_streams, ["h264"]) assert len(streams) == 2 def test_language_prio3(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, channels="51"), DASH(setup_defaults(), "http://example.com", 3002, None), ] streams = format_prio(test_streams, ["h26e4"]) assert len(streams) == 0 class streamTestRole(unittest.TestCase): def test_language_prio(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None), DASH(setup_defaults(), "http://example.com", 3002, None), ] streams = audio_role(setup_defaults(), test_streams) assert len(streams) == 3 def test_language_prio2(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"), DASH(setup_defaults(), "http://example.com", 3002, None), ] config = setup_defaults() config.set("audio_role", "x-sv") streams = audio_role(config, test_streams) assert len(streams) == 1 def test_language_prio3(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"), DASH(setup_defaults(), "http://example.com", 3002, None), ] config = setup_defaults() config.set("audio_role", "sv") streams = audio_role(config, test_streams) assert len(streams) == 0 def test_language_prio4(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"), DASH(setup_defaults(), "http://example.com", 3002, None), ] config = setup_defaults() config.set("audio_language", "sv") streams = audio_role(config, test_streams) assert len(streams) == 3 def test_language_prio5(self): test_streams = [ DASH(setup_defaults(), "http://example.com", 3000, None), DASH(setup_defaults(), "http://example.com", 3001, None, role="x-sv"), DASH(setup_defaults(), "http://example.com", 3002, None), ] config = setup_defaults() config.set("audio_role", "isii") config.set("audio_language", "sv") streams = audio_role(config, test_streams) assert len(streams) == 0 class streamSubtile(unittest.TestCase): def test_subtitleFilter(self): test_subs = [ subtitle(setup_defaults(), "wrst", "http://example.com"), subtitle(setup_defaults(), "wrst", "http://example.com", "sv"), subtitle(setup_defaults(), "wrst", "http://example.com", "dk"), subtitle(setup_defaults(), "wrst", "http://example.com", "sv"), ] subs = subtitle_filter(test_subs) assert len(subs) == 3 def test_subtitleFilter2(self): config = setup_defaults() config.set("get_all_subtitles", True) test_subs = [ subtitle(config, "wrst", "http://example.com"), subtitle(config, "wrst", "http://example.com", subfix="sv"), subtitle(config, "wrst", "http://example.com", subfix="dk"), subtitle(config, "wrst", "http://example.com", subfix="no"), ] subs = subtitle_filter(test_subs) assert len(subs) == 4 def test_subtitleFilter3(self): config = setup_defaults() config.set("subtitle_preferred", "sv") test_subs = [ subtitle(config, "wrst", "http://example.com"), subtitle(config, "wrst", "http://example.com", subfix="sv"), subtitle(config, "wrst", "http://example.com", subfix="dk"), subtitle(config, "wrst", "http://example.com", subfix="no"), ] subs = subtitle_filter(test_subs) assert len(subs) == 1 def test_subtitleFilter4(self): config = setup_defaults() config.set("subtitle_preferred", "gr") test_subs = [ subtitle(config, "wrst", "http://example.com"), subtitle(config, "wrst", "http://example.com", subfix="sv"), subtitle(config, "wrst", "http://example.com", subfix="dk"), subtitle(config, "wrst", "http://example.com", subfix="no"), ] subs = subtitle_filter(test_subs) assert len(subs) == 0 def test_subtitleFilter5(self): config = setup_defaults() config.set("get_all_subtitles", True) test_subs = [ subtitle(config, "wrst", "http://example.com"), subtitle(config, "wrst", "http://example.com", subfix="sv"), subtitle(config, "wrst", "http://example.com", subfix="sv"), subtitle(config, "wrst", "http://example.com", subfix="no"), ] subs = subtitle_filter(test_subs) assert len(subs) == 3
{ "content_hash": "e97ba5e617a2f2d48e0b795ee076fa5a", "timestamp": "", "source": "github", "line_count": 203, "max_line_length": 98, "avg_line_length": 40.54187192118226, "alnum_prop": 0.5613608748481167, "repo_name": "spaam/svtplay-dl", "id": "61525bffa2cbfd9dc11e21dd10bec91d84891557", "size": "8230", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lib/svtplay_dl/tests/test_stream.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "372" }, { "name": "Makefile", "bytes": "2958" }, { "name": "Python", "bytes": "446163" }, { "name": "Shell", "bytes": "2423" } ], "symlink_target": "" }
import sys import os import rlglue.network.Network as Network from ClientEnvironment import ClientEnvironment # BEGIN: change made by: Akshay Narayan (05-01-2015:2243) from types import Reward # END: change made by: Akshay Narayan (05-01-2015:2243) from rlglue.versions import get_svn_codec_version from rlglue.versions import get_codec_version def loadEnvironment(theEnvironment): theSVNVersion=get_svn_codec_version() theCodecVersion=get_codec_version() client = ClientEnvironment(theEnvironment) host = Network.kLocalHost port = Network.kDefaultPort hostString = os.getenv("RLGLUE_HOST") portString = os.getenv("RLGLUE_PORT") if (hostString != None): host = hostString try: port = int(portString) except TypeError: port = Network.kDefaultPort print ("RL-Glue Python Environment Codec Version: "+theCodecVersion+" (Build "+theSVNVersion+")") # BEGIN: change made by: Akshay Narayan (05-01-2015:2244) print ("Number of objectives/rewards:" + Reward.getNumRewards()) # END: change made by: Akshay Narayan (05-01-2015:2244) print ("\tConnecting to " + host + " on port " + str(port) + "...") sys.stdout.flush() client.connect(host, port, Network.kRetryTimeout) print ("\t Environment Codec Connected") client.runEnvironmentEventLoop() client.close() def loadEnvironmentLikeScript(): #Assumes you've already done the checking that the number of args and such is good envModule = __import__(sys.argv[1]) envClass = getattr(envModule,sys.argv[1]) env = envClass() loadEnvironment(env)
{ "content_hash": "76859ccfc155b0b7dcf2afc95f39f1bc", "timestamp": "", "source": "github", "line_count": 50, "max_line_length": 98, "avg_line_length": 30.44, "alnum_prop": 0.7496714848883048, "repo_name": "okkhoy/mo-rlglue-python-codec", "id": "bfe352305a9ba1e847faff55914d6b4680dc4149", "size": "2385", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "rlglue/environment/EnvironmentLoader.py", "mode": "33261", "license": "mit", "language": [ { "name": "Python", "bytes": "108475" } ], "symlink_target": "" }
from traitsui.view import *
{ "content_hash": "d5d68f0b2a8b86b51af92c074eeb5c4c", "timestamp": "", "source": "github", "line_count": 1, "max_line_length": 27, "avg_line_length": 28, "alnum_prop": 0.7857142857142857, "repo_name": "enthought/etsproxy", "id": "63f81c111b4be00be4db9d878f0c59fa959f9c14", "size": "43", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "enthought/traits/ui/view.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "363714" } ], "symlink_target": "" }
"""Tests for plaso.output.pstorage.""" import os import unittest from plaso.lib import pfilter from plaso.lib import storage from plaso.output import interface from plaso.output import pstorage from tests import test_lib as shared_test_lib from tests.output import test_lib class PstorageTest(test_lib.OutputModuleTestCase): """Tests for the plaso storage outputter.""" def setUp(self): """Sets up the objects needed for this test.""" self.test_filename = os.path.join(u'test_data', u'psort_test.proto.plaso') # Show full diff results, part of TestCase so does not follow our naming # conventions. pfilter.TimeRangeCache.ResetTimeConstraints() def testOutput(self): with shared_test_lib.TempDirectory() as dirname: storage_file = os.path.join(dirname, u'plaso.plaso') # Copy events to pstorage dump. with storage.StorageFile(self.test_filename, read_only=True) as store: output_mediator = self._CreateOutputMediator(storage_object=store) output_module = pstorage.PlasoStorageOutputModule(output_mediator) output_module.SetFilePath(storage_file) with interface.EventBuffer( output_module, check_dedups=False) as output_buffer: event_object = store.GetSortedEntry() while event_object: output_buffer.Append(event_object) event_object = store.GetSortedEntry() # Make sure original and dump have the same events. original = storage.StorageFile(self.test_filename, read_only=True) dump = storage.StorageFile(storage_file, read_only=True) event_object_original = original.GetSortedEntry() event_object_dump = dump.GetSortedEntry() original_list = [] dump_list = [] while event_object_original: original_list.append(event_object_original.EqualityString()) dump_list.append(event_object_dump.EqualityString()) event_object_original = original.GetSortedEntry() event_object_dump = dump.GetSortedEntry() self.assertFalse(event_object_dump) for original_str, dump_str in zip( sorted(original_list), sorted(dump_list)): self.assertEqual(original_str, dump_str) if __name__ == '__main__': unittest.main()
{ "content_hash": "66d507a5c8b6fb4974d007a458a1a5e4", "timestamp": "", "source": "github", "line_count": 64, "max_line_length": 78, "avg_line_length": 35.234375, "alnum_prop": 0.6966740576496674, "repo_name": "8u1a/plaso", "id": "75e94f3819844ec6c715ae5a2e261358d7be85dd", "size": "2297", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/output/pstorage.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1276" }, { "name": "Makefile", "bytes": "1151" }, { "name": "Protocol Buffer", "bytes": "13930" }, { "name": "Python", "bytes": "3179107" }, { "name": "Shell", "bytes": "47305" } ], "symlink_target": "" }
import unittest from _fields_basic_helpers import create_simple_method from jsrn import validators from jsrn.exceptions import ValidationError VALIDATOR_TESTS = [ (validators.MaxValueValidator(10), 10, None), (validators.MaxValueValidator(10), 1, None), (validators.MaxValueValidator(10), 11, ValidationError), (validators.MinValueValidator(10), 10, None), (validators.MinValueValidator(10), 1, ValidationError), (validators.MinValueValidator(10), 11, None), (validators.MaxLengthValidator(10), "1234567890", None), (validators.MaxLengthValidator(10), "12345", None), (validators.MaxLengthValidator(10), "12345678901", ValidationError), (validators.MinLengthValidator(10), "1234567890", None), (validators.MinLengthValidator(10), "12345", ValidationError), (validators.MinLengthValidator(10), "12345678901", None), ] class ValidatorTestCase(unittest.TestCase): pass for idx, (field, value, expected) in enumerate(VALIDATOR_TESTS): name, method = create_simple_method(field, None, value, expected, idx) setattr(ValidatorTestCase, name, method)
{ "content_hash": "11dc166fc7140915ece434682b1ef47e", "timestamp": "", "source": "github", "line_count": 31, "max_line_length": 74, "avg_line_length": 35.83870967741935, "alnum_prop": 0.7407740774077408, "repo_name": "timsavage/jsrn", "id": "be339c683131495d40da64284a0e37544f22d56d", "size": "1135", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/validators.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "65600" }, { "name": "Shell", "bytes": "6820" } ], "symlink_target": "" }
import unittest import lxml.etree as etree import subprocess from tests.common import validate, URL class DescribeTest(unittest.TestCase): def setUp(self): self.url = URL + '?service=wps&request=describeprocess&version=1.0.0&identifier=all' self.schema_url = 'http://schemas.opengis.net/wps/1.0.0/wpsDescribeProcess_response.xsd' def test_valid(self): assert validate(self.url, self.schema_url) def load_tests(loader=None, tests=None, pattern=None): if not loader: loader = unittest.TestLoader() suite_list = [ loader.loadTestsFromTestCase(DescribeTest), ] return unittest.TestSuite(suite_list)
{ "content_hash": "ac086cd98a1adaa6107ddde5248c91bb", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 96, "avg_line_length": 28.91304347826087, "alnum_prop": 0.706766917293233, "repo_name": "PyWPS/pywps-4-demo", "id": "a6fc256adba866fdd7e51a04871998e28c5d2799", "size": "665", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/test_describe.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "1228" }, { "name": "Python", "bytes": "39526" } ], "symlink_target": "" }
""" TCIA CTP preparation utilities. """ import sys import os import re from .ctp_config import ctp_collection_for_name from ..helpers.logging import logger from .staging_error import StagingError # OHSU - this is OHSU-specific. # TODO - make the format a config item. PROP_FMT = 'QIN-%s-OHSU.ID-LOOKUP.properties' """ The format for the Patient ID map file name specified by CTP. """ def map_ctp(collection, *subjects, **opts): """ Creates the TCIA patient id map. The map is written to a property file in the destination directory. The property file name is given by :meth:`property_filename`. :param collection: the image collection :param subjects: the subject names :param opts: the following keyword option: :keyword dest: the destination directory :return: the subject map file path """ # Make the CTP id map. ctp_map = CTPPatientIdMap() ctp_map.add_subjects(collection, *subjects) # Write the id map property file. dest = opts.get('dest') if dest: dest = os.path.abspath(dest) if not os.path.exists(dest): os.makedirs(dest) else: dest = os.getcwd() logger(__name__).debug("Creating the TCIA subject map in %s..." % dest) out_file = os.path.join(dest, property_filename(collection)) output = open(out_file, 'w') ctp_map.write(output) output.close() logger(__name__).debug("Created the TCIA subject map %s." % out_file) return out_file def property_filename(collection): """ Returns the CTP id map property file name for the given collection. The Sarcoma collection is capitalized in the file name, Breast is not. """ if collection == 'Sarcoma': return PROP_FMT % collection.upper() else: return PROP_FMT % collection class CTPPatientIdMap(dict): """ CTPPatientIdMap is a dictionary augmented with a :meth:`map_subjects` input method to build the map and a :meth:`write` output method to print the CTP map properties. """ SOURCE_PAT = re.compile(""" ([a-zA-Z]+) # The study name _? # An optional underscore delimiter (\d+)$ # The patient number """, re.VERBOSE) """ The input Patient ID pattern is the study name followed by a number, e.g. ``Breast10``. """ CTP_FMT = '%s-%04d' """ The CTP Patient ID format with arguments (CTP collection name, input Patient ID number). """ MAP_FMT = 'ptid/%s=%s' """ The ID lookup entry format with arguments (input Paitent ID, CTP patient id). """ MSG_FMT = 'Mapped the QIN patient id %s to the CTP subject id %s.' """ The log message format with arguments (input Paitent ID, CTP patient id). """ def add_subjects(self, collection, *patient_ids): """ Adds the input => CTP Patient ID association for the given input DICOM patient ids. :param collection: the image collection name :param patient_ids: the DICOM Patient IDs to map :raise StagingError: if an input patient id format is not the study followed by the patient number """ ctp_coll = ctp_collection_for_name(collection) for in_pt_id in patient_ids: match = CTPPatientIdMap.SOURCE_PAT.match(in_pt_id) if not match: raise StagingError("Unsupported input QIN patient id format:" " %s" % in_pt_id) pt_nbr = int(match.group(2)) ctp_id = CTPPatientIdMap.CTP_FMT % (ctp_coll, pt_nbr) self[in_pt_id] = ctp_id logger(__name__).debug( CTPPatientIdMap.MSG_FMT % (in_pt_id, ctp_id)) def write(self, dest=sys.stdout): """ Writes this id map in the standard CTP format. :param dest: the IO stream on which to write this map (default stdout) """ for qin_id in sorted(self.iterkeys()): print >> dest, CTPPatientIdMap.MAP_FMT % (qin_id, self[qin_id])
{ "content_hash": "b723a77dcde734e7b01c6bb9c007f077", "timestamp": "", "source": "github", "line_count": 127, "max_line_length": 78, "avg_line_length": 32.32283464566929, "alnum_prop": 0.6107186358099879, "repo_name": "ohsu-qin/qipipe", "id": "7272a100f05e7080790b8a67f3b8b3cbb41b5067", "size": "4105", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "qipipe/staging/map_ctp.py", "mode": "33261", "license": "bsd-2-clause", "language": [ { "name": "Python", "bytes": "528376" } ], "symlink_target": "" }
from twisted.internet import defer from synapse.api.errors import SynapseError, Codes from synapse.push import PusherConfigException from .base import ClientV1RestServlet, client_path_pattern import simplejson as json class PusherRestServlet(ClientV1RestServlet): PATTERN = client_path_pattern("/pushers/set$") @defer.inlineCallbacks def on_POST(self, request): user, client = yield self.auth.get_user_by_req(request) content = _parse_json(request) pusher_pool = self.hs.get_pusherpool() if ('pushkey' in content and 'app_id' in content and 'kind' in content and content['kind'] is None): yield pusher_pool.remove_pusher( content['app_id'], content['pushkey'], user_name=user.to_string() ) defer.returnValue((200, {})) reqd = ['profile_tag', 'kind', 'app_id', 'app_display_name', 'device_display_name', 'pushkey', 'lang', 'data'] missing = [] for i in reqd: if i not in content: missing.append(i) if len(missing): raise SynapseError(400, "Missing parameters: "+','.join(missing), errcode=Codes.MISSING_PARAM) append = False if 'append' in content: append = content['append'] if not append: yield pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user( app_id=content['app_id'], pushkey=content['pushkey'], not_user_id=user.to_string() ) try: yield pusher_pool.add_pusher( user_name=user.to_string(), access_token=client.token_id, profile_tag=content['profile_tag'], kind=content['kind'], app_id=content['app_id'], app_display_name=content['app_display_name'], device_display_name=content['device_display_name'], pushkey=content['pushkey'], lang=content['lang'], data=content['data'] ) except PusherConfigException as pce: raise SynapseError(400, "Config Error: "+pce.message, errcode=Codes.MISSING_PARAM) defer.returnValue((200, {})) def on_OPTIONS(self, _): return 200, {} # XXX: C+ped from rest/room.py - surely this should be common? def _parse_json(request): try: content = json.loads(request.content.read()) if type(content) != dict: raise SynapseError(400, "Content must be a JSON object.", errcode=Codes.NOT_JSON) return content except ValueError: raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) def register_servlets(hs, http_server): PusherRestServlet(hs).register(http_server)
{ "content_hash": "e95ac20ddda90e0ae50b9aae80bfceeb", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 81, "avg_line_length": 34.116279069767444, "alnum_prop": 0.5644171779141104, "repo_name": "howethomas/synapse", "id": "c83287c028d5956142972a77f5f8a89e040405f4", "size": "3537", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "synapse/rest/client/v1/pusher.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "1020" }, { "name": "HTML", "bytes": "1223" }, { "name": "JavaScript", "bytes": "172643" }, { "name": "Perl", "bytes": "31842" }, { "name": "Python", "bytes": "1571632" }, { "name": "Shell", "bytes": "3281" } ], "symlink_target": "" }
import pytest from polygraphy.tools.args import ModelArgs, TfLoadArgs from tests.models.meta import TF_MODELS from tests.tools.args.helper import ArgGroupTestHelper tf = pytest.importorskip("tensorflow") class TestTfLoaderArgs: def test_load_graph(self): arg_group = ArgGroupTestHelper(TfLoadArgs(), deps=[ModelArgs()]) arg_group.parse_args([TF_MODELS["identity"].path, "--model-type=frozen"]) graph, outputs = arg_group.load_graph() assert isinstance(graph, tf.Graph) assert outputs == ["Identity_2:0"]
{ "content_hash": "12bd971937efa9d9e461d70a61fd0ed3", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 81, "avg_line_length": 34.5, "alnum_prop": 0.7119565217391305, "repo_name": "NVIDIA/TensorRT", "id": "dd2a4c2b5c11a3b15cd456d4146847a1ca3de872", "size": "1241", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "tools/Polygraphy/tests/tools/args/backend/tf/test_loader.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "804" }, { "name": "C", "bytes": "26267" }, { "name": "C++", "bytes": "174835683" }, { "name": "CMake", "bytes": "73882" }, { "name": "Cuda", "bytes": "713094" }, { "name": "Dockerfile", "bytes": "21378" }, { "name": "HTML", "bytes": "266" }, { "name": "Jupyter Notebook", "bytes": "2284036" }, { "name": "Makefile", "bytes": "9128" }, { "name": "PowerShell", "bytes": "162" }, { "name": "PureBasic", "bytes": "388" }, { "name": "Python", "bytes": "2541976" }, { "name": "Shell", "bytes": "20007" } ], "symlink_target": "" }
import datetime import tempfile from django.db import models # Can't import as "forms" due to implementation details in the test suite (the # current file is called "forms" and is already imported). from django import forms as django_forms from django.core.files.storage import FileSystemStorage temp_storage_location = tempfile.mkdtemp() temp_storage = FileSystemStorage(location=temp_storage_location) class BoundaryModel(models.Model): positive_integer = models.PositiveIntegerField(null=True, blank=True) class Defaults(models.Model): name = models.CharField(max_length=256, default='class default value') def_date = models.DateField(default = datetime.date(1980, 1, 1)) value = models.IntegerField(default=42) class ChoiceModel(models.Model): """For ModelChoiceField and ModelMultipleChoiceField tests.""" name = models.CharField(max_length=10) class FileModel(models.Model): file = models.FileField(storage=temp_storage, upload_to='tests') class FileForm(django_forms.Form): file1 = django_forms.FileField() __test__ = {'API_TESTS': """ >>> from django.forms.models import ModelForm >>> from django.core.files.uploadedfile import SimpleUploadedFile # FileModel with unicode filename and data ######################### >>> f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह')}, auto_id=False) >>> f.is_valid() True >>> f.cleaned_data {'file1': <SimpleUploadedFile: 我隻氣墊船裝滿晒鱔.txt (text/plain)>} >>> m = FileModel.objects.create(file=f.cleaned_data['file1']) # Boundary conditions on a PostitiveIntegerField ######################### >>> class BoundaryForm(ModelForm): ... class Meta: ... model = BoundaryModel >>> f = BoundaryForm({'positive_integer': 100}) >>> f.is_valid() True >>> f = BoundaryForm({'positive_integer': 0}) >>> f.is_valid() True >>> f = BoundaryForm({'positive_integer': -100}) >>> f.is_valid() False # Formfield initial values ######## If the model has default values for some fields, they are used as the formfield initial values. >>> class DefaultsForm(ModelForm): ... class Meta: ... model = Defaults >>> DefaultsForm().fields['name'].initial u'class default value' >>> DefaultsForm().fields['def_date'].initial datetime.date(1980, 1, 1) >>> DefaultsForm().fields['value'].initial 42 In a ModelForm that is passed an instance, the initial values come from the instance's values, not the model's defaults. >>> foo_instance = Defaults(name=u'instance value', def_date=datetime.date(1969, 4, 4), value=12) >>> instance_form = DefaultsForm(instance=foo_instance) >>> instance_form.initial['name'] u'instance value' >>> instance_form.initial['def_date'] datetime.date(1969, 4, 4) >>> instance_form.initial['value'] 12 """}
{ "content_hash": "291976cbe0b49b22fa0110fbaa7284fc", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 138, "avg_line_length": 34.725, "alnum_prop": 0.7041036717062635, "repo_name": "chewable/django", "id": "86bfb4b920c0791860630b93f5e986b3b8f8f639", "size": "2904", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/regressiontests/forms/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [], "symlink_target": "" }
""" Pretrained wizard of wikipedia end2end generative model. """ from parlai.core.build_data import download_models def download(datapath): opt = {'datapath': datapath} fnames = ['end2end_generator_0.tar.gz'] download_models( opt, fnames, 'wizard_of_wikipedia', version='v0.5', use_model_type=False )
{ "content_hash": "29c0fbeb9ad7193c0388cb791a66bc85", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 80, "avg_line_length": 25.23076923076923, "alnum_prop": 0.6859756097560976, "repo_name": "facebookresearch/ParlAI", "id": "4e36f136b9dd103fcacadf9c00855263656cc772", "size": "527", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "parlai/zoo/wizard_of_wikipedia/end2end_generator.py", "mode": "33188", "license": "mit", "language": [ { "name": "C++", "bytes": "2000" }, { "name": "CSS", "bytes": "38474" }, { "name": "Cuda", "bytes": "4118" }, { "name": "Dockerfile", "bytes": "1218" }, { "name": "HTML", "bytes": "645771" }, { "name": "JavaScript", "bytes": "405110" }, { "name": "Makefile", "bytes": "289" }, { "name": "Python", "bytes": "6802410" }, { "name": "Shell", "bytes": "26147" } ], "symlink_target": "" }
"""Stack object.""" from oslo_log import log as logging from oslo_versionedobjects import base from oslo_versionedobjects import fields import six from heat.common import exception from heat.common.i18n import _ from heat.common import identifier from heat.db.sqlalchemy import api as db_api from heat.objects import base as heat_base from heat.objects import fields as heat_fields from heat.objects import raw_template from heat.objects import stack_tag LOG = logging.getLogger(__name__) class Stack( heat_base.HeatObject, base.VersionedObjectDictCompat, base.ComparableVersionedObject, ): fields = { 'id': fields.StringField(), 'name': fields.StringField(), 'raw_template_id': fields.IntegerField(), 'backup': fields.BooleanField(), 'created_at': fields.DateTimeField(read_only=True), 'deleted_at': fields.DateTimeField(nullable=True), 'disable_rollback': fields.BooleanField(), 'nested_depth': fields.IntegerField(), 'owner_id': fields.StringField(nullable=True), 'stack_user_project_id': fields.StringField(nullable=True), 'tenant': fields.StringField(nullable=True), 'timeout': fields.IntegerField(nullable=True), 'updated_at': fields.DateTimeField(nullable=True), 'user_creds_id': fields.StringField(nullable=True), 'username': fields.StringField(nullable=True), 'action': fields.StringField(nullable=True), 'status': fields.StringField(nullable=True), 'status_reason': fields.StringField(nullable=True), 'raw_template_obj': fields.ObjectField('RawTemplate'), 'convergence': fields.BooleanField(), 'current_traversal': fields.StringField(), 'current_deps': heat_fields.JsonField(), 'prev_raw_template_id': fields.IntegerField(), 'prev_raw_template': fields.ObjectField('RawTemplate'), 'parent_resource_name': fields.StringField(nullable=True), } @staticmethod def _from_db_object(context, stack, db_stack): for field in stack.fields: if field == 'raw_template_obj': raw_template_obj = db_stack.__dict__.get('raw_template') if raw_template_obj is not None: # Object is already lazy loaded raw_template_obj = ( raw_template.RawTemplate.from_db_object( context, raw_template.RawTemplate(), raw_template_obj)) stack._raw_template = raw_template_obj else: stack[field] = db_stack.__dict__.get(field) stack._context = context stack.obj_reset_changes() return stack @property def raw_template(self): if hasattr(self, '_raw_template'): return self._raw_template LOG.warning('Loading a raw_template that should have been ' 'eagerly loaded for stack id %s' % self.id) self._raw_template = raw_template.RawTemplate.get_by_id( self._context, self['raw_template_id']) return self._raw_template @raw_template.setter def raw_template(self, value): self['raw_template_obj'] = value self._raw_template = value @classmethod def get_root_id(cls, context, stack_id): return db_api.stack_get_root_id(context, stack_id) @classmethod def get_by_id(cls, context, stack_id, **kwargs): db_stack = db_api.stack_get(context, stack_id, **kwargs) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_by_name_and_owner_id(cls, context, stack_name, owner_id): db_stack = db_api.stack_get_by_name_and_owner_id( context, six.text_type(stack_name), owner_id ) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_by_name(cls, context, stack_name): db_stack = db_api.stack_get_by_name(context, six.text_type(stack_name)) if not db_stack: return None stack = cls._from_db_object(context, cls(context), db_stack) return stack @classmethod def get_all(cls, context, limit=None, sort_keys=None, marker=None, sort_dir=None, filters=None, show_deleted=False, show_nested=False, show_hidden=False, tags=None, tags_any=None, not_tags=None, not_tags_any=None, eager_load=False): db_stacks = db_api.stack_get_all( context, limit=limit, sort_keys=sort_keys, marker=marker, sort_dir=sort_dir, filters=filters, show_deleted=show_deleted, show_nested=show_nested, show_hidden=show_hidden, tags=tags, tags_any=tags_any, not_tags=not_tags, not_tags_any=not_tags_any, eager_load=eager_load) for db_stack in db_stacks: try: yield cls._from_db_object(context, cls(context), db_stack) except exception.NotFound: pass @classmethod def get_all_by_owner_id(cls, context, owner_id): db_stacks = db_api.stack_get_all_by_owner_id(context, owner_id) for db_stack in db_stacks: try: yield cls._from_db_object(context, cls(context), db_stack) except exception.NotFound: pass @classmethod def get_all_by_root_owner_id(cls, context, root_owner_id): db_stacks = db_api.stack_get_all_by_root_owner_id(context, root_owner_id) for db_stack in db_stacks: try: yield cls._from_db_object(context, cls(context), db_stack) except exception.NotFound: pass @classmethod def count_all(cls, context, **kwargs): return db_api.stack_count_all(context, **kwargs) @classmethod def count_total_resources(cls, context, stack_id): return db_api.stack_count_total_resources(context, stack_id) @classmethod def create(cls, context, values): return cls._from_db_object(context, cls(context), db_api.stack_create(context, values)) @classmethod def update_by_id(cls, context, stack_id, values): """Update and return (boolean) if it was updated. Note: the underlying stack_update filters by current_traversal and stack_id. """ return db_api.stack_update(context, stack_id, values) @classmethod def select_and_update(cls, context, stack_id, values, exp_trvsl=None): """Update the stack by selecting on traversal ID. Uses UPDATE ... WHERE (compare and swap) to catch any concurrent update problem. If the stack is found with given traversal, it is updated. If there occurs a race while updating, only one will succeed and other will get return value of False. """ return db_api.stack_update(context, stack_id, values, exp_trvsl=exp_trvsl) @classmethod def persist_state_and_release_lock(cls, context, stack_id, engine_id, values): return db_api.persist_state_and_release_lock(context, stack_id, engine_id, values) @classmethod def delete(cls, context, stack_id): db_api.stack_delete(context, stack_id) def update_and_save(self, values): has_updated = self.__class__.update_by_id(self._context, self.id, values) if not has_updated: raise exception.NotFound(_('Attempt to update a stack with id: ' '%(id)s %(traversal)s %(msg)s') % { 'id': self.id, 'traversal': self.current_traversal, 'msg': 'that does not exist'}) def __eq__(self, another): self.refresh() # to make test object comparison work well return super(Stack, self).__eq__(another) def __ne__(self, other): return not self.__eq__(other) def refresh(self): db_stack = db_api.stack_get( self._context, self.id, show_deleted=True) if db_stack is None: message = _('No stack exists with id "%s"') % str(self.id) raise exception.NotFound(message) return self.__class__._from_db_object( self._context, self, db_stack ) @classmethod def encrypt_hidden_parameters(cls, tmpl): raw_template.RawTemplate.encrypt_hidden_parameters(tmpl) @classmethod def get_status(cls, context, stack_id): """Return action and status for the given stack.""" return db_api.stack_get_status(context, stack_id) def identifier(self): """Return an identifier for this stack.""" return identifier.HeatIdentifier(self.tenant, self.name, self.id) @property def tags(self): return stack_tag.StackTagList.get(self._context, self.id)
{ "content_hash": "2f019a12a14e377a15bb1be708914a5d", "timestamp": "", "source": "github", "line_count": 258, "max_line_length": 79, "avg_line_length": 36.968992248062015, "alnum_prop": 0.578842524638289, "repo_name": "noironetworks/heat", "id": "a6904865d3c6d86607dc9abc4733bedca302c97c", "size": "10111", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "heat/objects/stack.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "8804896" }, { "name": "Shell", "bytes": "64533" } ], "symlink_target": "" }
import sys import os # If extensions (or modules to document with autodoc) are in another # directory, add these directories to sys.path here. If the directory is # relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # Get the project root dir, which is the parent dir of this cwd = os.getcwd() project_root = os.path.dirname(cwd) # Insert the project root dir as the first element in the PYTHONPATH. # This lets us ensure that the source package is imported, and that its # version is used. sys.path.insert(0, project_root) import nba_prediction # -- General configuration --------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'nba-prediction' copyright = u"2017, Victor An" # The version info for the project you're documenting, acts as replacement # for |version| and |release|, also used in various other places throughout # the built documents. # # The short X.Y version. version = nba_prediction.__version__ # The full version, including alpha/beta/rc tags. release = nba_prediction.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to # some non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built # documents. #keep_warnings = False # -- Options for HTML output ------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a # theme further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as # html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the # top of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon # of the docs. This file should be a Windows icon file (.ico) being # 16x16 or 32x32 pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) # here, relative to this directory. They are copied after the builtin # static files, so a file named "default.css" will overwrite the builtin # "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names # to template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. # Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. # Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages # will contain a <link> tag referring to it. The value of this option # must be the base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'nba_predictiondoc' # -- Options for LaTeX output ------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'nba_prediction.tex', u'nba-prediction Documentation', u'Victor An', 'manual'), ] # The name of an image file (relative to this directory) to place at # the top of the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings # are parts, not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'nba_prediction', u'nba-prediction Documentation', [u'Victor An'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ---------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'nba_prediction', u'nba-prediction Documentation', u'Victor An', 'nba_prediction', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
{ "content_hash": "54f183e3a4c2953886ca69e1374161fd", "timestamp": "", "source": "github", "line_count": 260, "max_line_length": 76, "avg_line_length": 30.834615384615386, "alnum_prop": 0.7031308469502308, "repo_name": "classtag/nba-prediction", "id": "31a196f73f6aaf24e49d2f9554e40bb9bd2361c0", "size": "8466", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33261", "license": "mit", "language": [ { "name": "Makefile", "bytes": "2323" }, { "name": "Python", "bytes": "13324" } ], "symlink_target": "" }
import glob import gzip import sys import re from math import sqrt,log class GoogleNgrams: ''' gzip_dir is the name of a directory containing the gzipped parts of the ngrams data. ''' def __init__(self, gzip_dir): self.gzip_dir = gzip_dir # Index the start word of each file self.files = glob.glob(self.gzip_dir + '/*.gz') self.words = [] for f in self.files: start_word = self.get_start_word(f) self.words.append(start_word) def get_start_word(self, filename): with gzip.open(filename, 'r') as f: return f.readline().split('\t')[0] def find_files_with_word(self, query_word): files_to_search = [] for i in xrange(len(self.words)): if query_word >= self.words[i] and (i + 1 == len(self.words) or query_word <= self.words[i + 1]): files_to_search.append(self.files[i]) return files_to_search def run_query(self, query_word): filenames = self.find_files_with_word(query_word) results = {} for filename in filenames: q = query_word + '\t' with gzip.open(filename, 'r') as f: for line in f: if line.startswith(q): parts = line.split('\t') results[parts[1]] = int(parts[2]) return results
{ "content_hash": "dfe10fcd156c563d170b0b6be7b1ff4b", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 109, "avg_line_length": 30.434782608695652, "alnum_prop": 0.5464285714285714, "repo_name": "jayantk/pnp", "id": "dfa90659921598bef5aa9f756a68917c296197b4", "size": "1400", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "experiments/dipart/scripts/preprocess/ngrams.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "108611" }, { "name": "Scala", "bytes": "246577" }, { "name": "Shell", "bytes": "50174" } ], "symlink_target": "" }
""" .. _tut-inplace: Modifying data in-place ======================= Many of MNE-Python's data objects (`~mne.io.Raw`, `~mne.Epochs`, `~mne.Evoked`, etc) have methods that modify the data in-place (either optionally or obligatorily). This can be advantageous when working with large datasets because it reduces the amount of computer memory needed to perform the computations. However, it can lead to unexpected results if you're not aware that it's happening. This tutorial provides a few examples of in-place processing, and how and when to avoid it. As usual we'll start by importing the modules we need and loading some :ref:`example data <sample-dataset>`: """ # %% import os import mne sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', 'sample_audvis_raw.fif') # the preload flag loads the data into memory now raw = mne.io.read_raw_fif(sample_data_raw_file, preload=True) raw.crop(tmax=10.) # raw.crop() always happens in-place # %% # Signal processing # ----------------- # # Most MNE-Python data objects have built-in methods for filtering, including # high-, low-, and band-pass filters (`~mne.io.Raw.filter`), band-stop filters # (`~mne.io.Raw.notch_filter`), # Hilbert transforms (`~mne.io.Raw.apply_hilbert`), # and even arbitrary or user-defined functions (`~mne.io.Raw.apply_function`). # These typically **always** modify data in-place, so if we want to preserve # the unprocessed data for comparison, we must first make a copy of it. For # example: original_raw = raw.copy() raw.apply_hilbert() print(f'original data type was {original_raw.get_data().dtype}, after ' f'apply_hilbert the data type changed to {raw.get_data().dtype}.') # %% # Channel picking # --------------- # # Another group of methods where data is modified in-place are the # channel-picking methods. For example: print(f'original data had {original_raw.info["nchan"]} channels.') original_raw.pick('eeg') # selects only the EEG channels print(f'after picking, it has {original_raw.info["nchan"]} channels.') # %% # Note also that when picking only EEG channels, projectors that affected only # the magnetometers were dropped, since there are no longer any magnetometer # channels. # # # The ``copy`` parameter # ---------------------- # # Above we saw an example of using the `~mne.io.Raw.copy` method to facilitate # comparing data before and after processing. This is not needed when using # certain MNE-Python *functions*, because they have a *function parameter* # where you can specify ``copy=True`` (return a modified copy of the data) or # ``copy=False`` (operate in-place). For example, `mne.set_eeg_reference` is # one such function; notice that here we plot ``original_raw`` *after* the # rereferencing has been done, but ``original_raw`` is unaffected because # we specified ``copy=True``: # sphinx_gallery_thumbnail_number=2 rereferenced_raw, ref_data = mne.set_eeg_reference(original_raw, ['EEG 003'], copy=True) original_raw.plot() rereferenced_raw.plot() # %% # Another example is the picking function `mne.pick_info`, which operates on # `mne.Info` dictionaries rather than on data objects. See # :ref:`tut-info-class` for details. # # # Summary # ------- # # Generally speaking, you should expect that *methods of data objects* will # operate in-place, and *functions that take a data object as a parameter* will # operate on a copy of the data (unless the function has a ``copy`` parameter # and it defaults to ``False`` or you specify ``copy=False``). # During the exploratory phase of your analysis, where you might want # to try out the effects of different data cleaning approaches, you should get # used to patterns like ``raw.copy().filter(...).plot()`` or # ``raw.copy().apply_proj().plot_psd()`` if you want to avoid having to re-load # data and repeat earlier steps each time you change a computation (see the # :ref:`sect-meth-chain` section for more info on method chaining).
{ "content_hash": "bda4f1754a3f3e3b950d956a1b368849", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 79, "avg_line_length": 38.857142857142854, "alnum_prop": 0.7017156862745098, "repo_name": "mne-tools/mne-tools.github.io", "id": "6d6fa1dfaaef9b39dadd182d44ef3babe62d618e", "size": "4080", "binary": false, "copies": "3", "ref": "refs/heads/main", "path": "0.24/_downloads/ed0e89a2b14f0d8bc8b9521aed1b4063/15_inplace.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "708696" }, { "name": "Dockerfile", "bytes": "1820" }, { "name": "HTML", "bytes": "1526247783" }, { "name": "JavaScript", "bytes": "1323087" }, { "name": "Jupyter Notebook", "bytes": "24820047" }, { "name": "Python", "bytes": "18575494" } ], "symlink_target": "" }
from mock import MagicMock from scrapy import signals from twisted.internet.defer import Deferred from twisted.trial import unittest from scrapyrt.core import CrawlManager, ScrapyrtCrawlerProcess from .spiders import MetaSpider from .utils import get_settings class CralwerProcessTestCase(unittest.TestCase): def _mock_method(self, obj, method): msg = "can't mock, class {} doesn't have method {}".format( obj.__class__.__name__, method) assert hasattr(obj, method), msg setattr(obj, method, MagicMock(spec=lambda: None)) def test_signals(self): """Need to be sure that all signals are bind to appropriate handlers right after crawler is created. """ crawl_manager = CrawlManager('test', {'url': 'http://localhost'}) signals_and_handlers = [ ('item_scraped', 'get_item'), ('item_dropped', 'collect_dropped'), ('spider_idle', 'spider_idle'), ('spider_error', 'handle_spider_error'), ('request_scheduled', 'handle_scheduling'), ] for _, handler in signals_and_handlers: self._mock_method(crawl_manager, handler) settings = get_settings() crawler_process = ScrapyrtCrawlerProcess(settings, crawl_manager) dfd = crawler_process.crawl(MetaSpider) self.assertIsInstance(dfd, Deferred) crawler = crawl_manager.crawler for signal, handler in signals_and_handlers: crawler.signals.send_catch_log( signal=getattr(signals, signal), spider=crawler.spider) handler_mock = getattr(crawl_manager, handler) self.assertEquals(handler_mock.call_count, 1)
{ "content_hash": "53671ed66513a30ef43b3788d1e99b72", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 76, "avg_line_length": 38.15555555555556, "alnum_prop": 0.6423995340710542, "repo_name": "pawelmhm/scrapyrt", "id": "c418e17d6783d084dbcc7e5e17a43d92ce93d490", "size": "1741", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "tests/test_crawler_process.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Dockerfile", "bytes": "819" }, { "name": "HTML", "bytes": "471" }, { "name": "Python", "bytes": "104615" } ], "symlink_target": "" }
def <warning descr="Python version 3.7 does not allow 'async' and 'await' as names">a<caret>wait</warning>(): pass
{ "content_hash": "1ad15413b9d2a47a15c81c9ff934c3b1", "timestamp": "", "source": "github", "line_count": 2, "max_line_length": 109, "avg_line_length": 59, "alnum_prop": 0.7033898305084746, "repo_name": "msebire/intellij-community", "id": "308f572d59a7c1813cd66e3c2244b3bb94ee34e0", "size": "118", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/testData/quickFixes/PyRenameElementQuickFixTest/renameAwaitFunctionInPy36.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from file_system_image import FileSystemImage from support_team import SupportTeam from timeout import timeout from subprocess import check_call class Media: def __init__(self, id, url, status): self.id = id self.url = url self.status = status @timeout(30) def download(self): check_call(["rm", "-rf", "/tmp/image-downloading.jpg"]) check_call(["wget", self.url, "--quiet", "-O", "/tmp/image-downloading.jpg"]) check_call(["cp", "-f", "/tmp/image-downloading.jpg", "/tmp/image.jpg"]) SupportTeam.notify("%s - downloaded to /tmp/image.jpg" % self.id) return FileSystemImage(self, "/tmp/image.jpg") def __str__(self): return "media(%s, %s, %s)" % (self.id, self.status, self.url)
{ "content_hash": "412f2e1f96ded1104d1fb1a0ccdf45bf", "timestamp": "", "source": "github", "line_count": 22, "max_line_length": 85, "avg_line_length": 35, "alnum_prop": 0.6142857142857143, "repo_name": "camswords/raspberry-pi-instagram-printer", "id": "4c18cc226c41645ad6948b0904805f4278eaa1a1", "size": "770", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/lib/media.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "17368" }, { "name": "Shell", "bytes": "9030" } ], "symlink_target": "" }
import os, sys, subprocess passed = [] ignore = ('fails_', 'giws_', 'unreal_', 'verilog', 'nuitka_', 'nim_', 'java_', 'custom_', 'cpython_', 'nodejs_') ignoreosx = ['hello_cpython.md'] ## rust is broken on fedora? Travis uses Debian. TODO_FIX = ( 'async_channels_rust.md', # rustc: error while loading shared libraries: librustc_driver-4e7c5e5c.so: cannot open shared object file: No such file or directory 'hello_nim.md', 'hello_verilog.md', 'cpython_multithreaded.md', 'cpython_multithreaded_raw_capi.md', 'hello_threejs.md', 'hello_java.md', 'hello_caffe.md', 'hello_nuitka.md', 'hello_rapydscript.md', 'hello_coffee.md', 'hello_elm.md', 'hello_fullstack.md', ) files = os.listdir('../examples') files.reverse() for md in files: if md in TODO_FIX: print 'skip test: %s (TODO fix later)' %md continue elif not md.endswith('.md'): continue elif sys.platform=='darwin' and md in ignoreosx: continue print md if md.startswith( ignore ): continue subprocess.check_call([ 'python', '../rusthon.py', os.path.join('../examples', md) ]) passed.append( md ) print 'TESTS PASSED:' for md in passed: print ' %s' %md
{ "content_hash": "5755f111f3203dabc7e13d0579e6cad4", "timestamp": "", "source": "github", "line_count": 47, "max_line_length": 160, "avg_line_length": 24.46808510638298, "alnum_prop": 0.668695652173913, "repo_name": "rootfs/Rusthon", "id": "1f8744160d6ac96199f3b7ed3a007ccc0d04286d", "size": "1150", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "regtests/test-markdowns.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "23667" }, { "name": "HTML", "bytes": "44433" }, { "name": "Perl", "bytes": "66040" }, { "name": "Python", "bytes": "360782" }, { "name": "Shell", "bytes": "1033" } ], "symlink_target": "" }
""" RFC 3744 (WebDAV Access Control Protocol) XML Elements This module provides XML element definitions for use with WebDAV. See RFC 3744: http://www.ietf.org/rfc/rfc3744.txt """ __all__ = [] from txdav.xml.base import WebDAVElement, PCDATAElement from txdav.xml.base import WebDAVEmptyElement, WebDAVTextElement from txdav.xml.element import dav_namespace, registerElement, registerElementClass ## # Section 3 (Privileges) ## @registerElement @registerElementClass class Read (WebDAVEmptyElement): """ Privilege which controls methods that return information about the state of a resource, including the resource's properties. (RFC 3744, section 3.1) """ name = "read" # For DAV:write element (RFC 3744, section 3.2) see Write class in # rfc2518.py. @registerElement @registerElementClass class WriteProperties (WebDAVEmptyElement): """ Privilege which controls methods that modify the dead properties of a resource. (RFC 3744, section 3.3) """ name = "write-properties" @registerElement @registerElementClass class WriteContent (WebDAVEmptyElement): """ Privilege which controls methods that modify the content of an existing resource. (RFC 3744, section 3.4) """ name = "write-content" @registerElement @registerElementClass class Unlock (WebDAVEmptyElement): """ Privilege which controls the use of the UNLOCK method by a principal other than the lock owner. (RFC 3744, section 3.5) """ name = "unlock" @registerElement @registerElementClass class ReadACL (WebDAVEmptyElement): """ Privilege which controls the use of the PROPFIND method to retrieve the DAV:acl property of a resource. (RFC 3744, section 3.6) """ name = "read-acl" @registerElement @registerElementClass class ReadCurrentUserPrivilegeSet (WebDAVEmptyElement): """ Privilege which controls the use of the PROPFIND method to retrieve the DAV:current-user-privilege-set property of a resource. (RFC 3744, section 3.7) """ name = "read-current-user-privilege-set" @registerElement @registerElementClass class WriteACL (WebDAVEmptyElement): """ Privilege which controls the use of the ACL method to modify the DAV:acl property of a resource. (RFC 3744, section 3.8) """ name = "write-acl" @registerElement @registerElementClass class Bind (WebDAVEmptyElement): """ Privilege which allows a method to add a new member URL from the a collection resource. (RFC 3744, section 3.9) """ name = "bind" @registerElement @registerElementClass class Unbind (WebDAVEmptyElement): """ Privilege which allows a method to remove a member URL from the a collection resource. (RFC 3744, section 3.10) """ name = "unbind" @registerElement @registerElementClass class All (WebDAVEmptyElement): """ Aggregate privilege that contains the entire set of privileges that can be applied to a resource. (RFC 3744, section 3.11) Principal which matches all users. (RFC 3744, section 5.5.1) """ name = "all" ## # Section 4 (Principal Properties) ## @registerElement @registerElementClass class Principal (WebDAVElement): """ Indicates a principal resource type. (RFC 3744, section 4) Identifies the principal to which an ACE applies. (RFC 3744, section 5.5.1) """ name = "principal" allowed_children = { (dav_namespace, "href"): (0, 1), (dav_namespace, "all"): (0, 1), (dav_namespace, "authenticated"): (0, 1), (dav_namespace, "unauthenticated"): (0, 1), (dav_namespace, "property"): (0, 1), (dav_namespace, "self"): (0, 1), } def validate(self): super(Principal, self).validate() if len(self.children) > 1: raise ValueError( "Exactly one of DAV:href, DAV:all, DAV:authenticated, " "DAV:unauthenticated, DAV:property or DAV:self is required for " "{0}, got: {1!r}".format( self.sname(), self.children ) ) @registerElement @registerElementClass class AlternateURISet (WebDAVElement): """ Property which contains the URIs of network resources with additional descriptive information about the principal. (RFC 3744, section 4.1) """ name = "alternate-URI-set" hidden = True protected = True allowed_children = {(dav_namespace, "href"): (0, None)} @registerElement @registerElementClass class PrincipalURL (WebDAVElement): """ Property which contains the URL that must be used to identify this principal in an ACL request. (RFC 3744, section 4.2) """ name = "principal-URL" hidden = True protected = True allowed_children = {(dav_namespace, "href"): (0, 1)} @registerElement @registerElementClass class GroupMemberSet (WebDAVElement): """ Property which identifies the principals that are direct members of a group principal. (RFC 3744, section 4.3) """ name = "group-member-set" hidden = True allowed_children = {(dav_namespace, "href"): (0, None)} @registerElement @registerElementClass class GroupMembership (WebDAVElement): """ Property which identifies the group principals in which a principal is directly a member. (RFC 3744, section 4.4) """ name = "group-membership" hidden = True protected = True allowed_children = {(dav_namespace, "href"): (0, None)} ## # Section 5 (Access Control Properties) ## # For DAV:owner element (RFC 3744, section 5.1) see Owner class in # rfc2518.py. @registerElement @registerElementClass class Group (WebDAVElement): """ Property which identifies a particular principal as being the group principal of a resource. (RFC 3744, section 5.2) """ name = "group" hidden = True protected = True # may be protected, per RFC 3744, section 5.2 allowed_children = {(dav_namespace, "href"): (0, 1)} @registerElement @registerElementClass class SupportedPrivilegeSet (WebDAVElement): """ Property which identifies the privileges defined for a resource. (RFC 3744, section 5.3) """ name = "supported-privilege-set" hidden = True protected = True allowed_children = {(dav_namespace, "supported-privilege"): (0, None)} @registerElement @registerElementClass class SupportedPrivilege (WebDAVElement): """ Identifies a privilege defined for a resource. (RFC 3744, section 5.3) """ name = "supported-privilege" allowed_children = { (dav_namespace, "privilege"): (1, 1), (dav_namespace, "abstract"): (0, 1), (dav_namespace, "description"): (1, 1), (dav_namespace, "supported-privilege"): (0, None), } @registerElement @registerElementClass class Privilege (WebDAVElement): """ Identifies a privilege. (RFC 3744, sections 5.3 and 5.5.1) """ name = "privilege" allowed_children = {WebDAVElement: (0, None)} def isAggregateOf(self, subprivilege, supportedPrivileges): """ Check whether this privilege is an aggregate of another. @param subprivilege: a L{Privilege} @param supportedPrivileges: a L{SupportedPrivilegeSet} @return: C{True} is this privilege is an aggregate of C{subprivilege} according to C{supportedPrivileges}. """ # DAV: all is an aggregate of all privileges if len(self.children) == 1 and self.children[0].qname() == (dav_namespace, "all"): return True def isAggregate(supportedPrivilege): sp = supportedPrivilege.childOfType(Privilege) if sp == self: def find(supportedPrivilege): if supportedPrivilege.childOfType(Privilege) == subprivilege: return True for child in supportedPrivilege.childrenOfType(SupportedPrivilege): if find(child): return True else: return False return find(supportedPrivilege) else: for child in supportedPrivilege.childrenOfType(SupportedPrivilege): if isAggregate(child): return True else: return False for supportedPrivilege in supportedPrivileges.children: if isAggregate(supportedPrivilege): return True else: return False def expandAggregate(self, supportedPrivileges): """ Expand this privilege into the set of privileges aggregated under it based on the structure of the given supported privileges. If this privilege is not an aggregate, just return it as-is. @param supportedPrivileges: a L{SupportedPrivilegeSet} @return: the list of expanded L{Privileges} """ # Find ourselves in supported privileges def find(supportedPrivilege): """ Find the supportPrivilege which matches this privilege. """ if supportedPrivilege.childOfType(Privilege) == self: return supportedPrivilege for child in supportedPrivilege.childrenOfType(SupportedPrivilege): result = find(child) if result is not None: return result else: return None for supportedPrivilege in supportedPrivileges.children: result = find(supportedPrivilege) if result is not None: break else: return [self] # Now add sub-privileges recursively aggregates = [] def add(supportedPrivilege): """ Add all sub-privileges to the list. """ aggregates.append(supportedPrivilege.childOfType(Privilege)) for child in supportedPrivilege.childrenOfType(SupportedPrivilege): add(child) add(result) return aggregates @registerElement @registerElementClass class Abstract (WebDAVElement): """ Identifies a privilege as abstract. (RFC 3744, section 5.3) """ name = "abstract" @registerElement @registerElementClass class Description (WebDAVTextElement): """ A human-readable description of what privilege controls access to. (RFC 3744, sections 5.3 and 9.5) """ name = "description" allowed_attributes = {"xml:lang": True} @registerElement @registerElementClass class CurrentUserPrivilegeSet (WebDAVElement): """ Property which contains the exact set of privileges (as computer by the server) granted to the currently authenticated HTTP user. (RFC 3744, section 5.4) """ name = "current-user-privilege-set" hidden = True protected = True allowed_children = {(dav_namespace, "privilege"): (0, None)} # For DAV:privilege element (RFC 3744, section 5.4) see Privilege class above. @registerElement @registerElementClass class ACL (WebDAVElement): """ Property which specifies the list of access control entries which define what privileges are granted to which users for a resource. (RFC 3744, section 5.5) """ name = "acl" hidden = True protected = True allowed_children = {(dav_namespace, "ace"): (0, None)} @registerElement @registerElementClass class ACE (WebDAVElement): """ Specifies the list of access control entries which define what privileges are granted to which users for a resource. (RFC 3744, section 5.5) """ name = "ace" allowed_children = { (dav_namespace, "principal"): (0, 1), (dav_namespace, "invert"): (0, 1), (dav_namespace, "grant"): (0, 1), (dav_namespace, "deny"): (0, 1), (dav_namespace, "protected"): (0, 1), (dav_namespace, "inherited"): (0, 1), } def __init__(self, *children, **attributes): super(ACE, self).__init__(*children, **attributes) self.principal = None self.invert = None self.allow = None self.privileges = None self.inherited = None self.protected = False my_children = [] for child in self.children: namespace, name = child.qname() if isinstance(child, PCDATAElement): continue if (namespace == dav_namespace): if name in ("principal", "invert"): if self.principal is not None: raise ValueError( "Only one of DAV:principal or DAV:invert allowed in {0}, got: {1}".format( self.sname(), self.children ) ) if name == "invert": self.invert = True self.principal = child.children[0] else: self.invert = False self.principal = child elif name in ("grant", "deny"): if self.allow is not None: raise ValueError( "Only one of DAV:grant or DAV:deny allowed in {0}, got: {1}".format( self.sname(), self.children ) ) self.allow = (name == "grant") self.privileges = child.children elif name == "inherited": self.inherited = str(child.children[0]) elif name == "protected": self.protected = True my_children.append(child) self.children = tuple(my_children) if self.principal is None: raise ValueError( "One of DAV:principal or DAV:invert is required in {0}, got: {1}".format( self.sname(), self.children ) ) assert self.invert is not None if self.allow is None: raise ValueError( "One of DAV:grant or DAV:deny is required in {0}, got: {1}".format( self.sname(), self.children ) ) assert self.privileges is not None # For DAV:principal element (RFC 3744, section 5.5.1) see Principal # class above. # For DAV:all element (RFC 3744, section 5.5.1) see All class above. @registerElement @registerElementClass class Authenticated (WebDAVEmptyElement): """ Principal which matches authenticated users. (RFC 3744, section 5.5.1) """ name = "authenticated" @registerElement @registerElementClass class Unauthenticated (WebDAVEmptyElement): """ Principal which matches unauthenticated users. (RFC 3744, section 5.5.1) """ name = "unauthenticated" # For DAV:property element (RFC 3744, section 5.5.1) see Property # class above. @registerElement @registerElementClass class Self (WebDAVEmptyElement): """ Principal which matches a user if a resource is a principal and the user matches the resource. (RFC 3744, sections 5.5.1 and 9.3) """ name = "self" @registerElement @registerElementClass class Invert (WebDAVElement): """ Principal which matches a user if the user does not match the principal contained by this principal. (RFC 3744, section 5.5.1) """ name = "invert" allowed_children = {(dav_namespace, "principal"): (1, 1)} @registerElement @registerElementClass class Grant (WebDAVElement): """ Grants the contained privileges to a principal. (RFC 3744, section 5.5.2) """ name = "grant" allowed_children = {(dav_namespace, "privilege"): (1, None)} @registerElement @registerElementClass class Deny (WebDAVElement): """ Denies the contained privileges to a principal. (RFC 3744, section 5.5.2) """ name = "deny" allowed_children = {(dav_namespace, "privilege"): (1, None)} # For DAV:privilege element (RFC 3744, section 5.5.2) see Privilege # class above. @registerElement @registerElementClass class Protected (WebDAVEmptyElement): """ Identifies an ACE as protected. (RFC 3744, section 5.5.3) """ name = "protected" @registerElement @registerElementClass class Inherited (WebDAVElement): """ Indicates that an ACE is inherited from the resource identified by the contained DAV:href element. (RFC 3744, section 5.5.4) """ name = "inherited" allowed_children = {(dav_namespace, "href"): (1, 1)} @registerElement @registerElementClass class ACLRestrictions (WebDAVElement): """ Property which defines the types of ACLs supported by this server, to avoid clients needlessly getting errors. (RFC 3744, section 5.6) """ name = "acl-restrictions" hidden = True protected = True allowed_children = { (dav_namespace, "grant-only"): (0, 1), (dav_namespace, "no-invert"): (0, 1), (dav_namespace, "deny-before-grant"): (0, 1), (dav_namespace, "required-principal"): (0, 1), } @registerElement @registerElementClass class GrantOnly (WebDAVEmptyElement): """ Indicates that ACEs with deny clauses are not allowed. (RFC 3744, section 5.6.1) """ name = "grant-only" @registerElement @registerElementClass class NoInvert (WebDAVEmptyElement): """ Indicates that ACEs with the DAV:invert element are not allowed. (RFC 3744, section 5.6.2) """ name = "no-invert" @registerElement @registerElementClass class DenyBeforeGrant (WebDAVEmptyElement): """ Indicates that all deny ACEs must precede all grant ACEs. (RFC 3744, section 5.6.3) """ name = "deny-before-grant" @registerElement @registerElementClass class RequiredPrincipal (WebDAVElement): """ Indicates which principals must have an ACE defined in an ACL. (RFC 3744, section 5.6.4) """ name = "required-principal" allowed_children = { (dav_namespace, "all"): (0, 1), (dav_namespace, "authenticated"): (0, 1), (dav_namespace, "unauthenticated"): (0, 1), (dav_namespace, "self"): (0, 1), (dav_namespace, "href"): (0, None), (dav_namespace, "property"): (0, None), } def validate(self): super(RequiredPrincipal, self).validate() type = None for child in self.children: if type is None: type = child.qname() elif child.qname() != type: raise ValueError( "Only one of DAV:all, DAV:authenticated, DAV:unauthenticated, " "DAV:self, DAV:href or DAV:property allowed for {0}, got: {1}".format( self.sname(), self.children ) ) @registerElement @registerElementClass class InheritedACLSet (WebDAVElement): """ Property which contains a set of URLs that identify other resources that also control the access to this resource. (RFC 3744, section 5.7) """ name = "inherited-acl-set" hidden = True protected = True allowed_children = {(dav_namespace, "href"): (0, None)} @registerElement @registerElementClass class PrincipalCollectionSet (WebDAVElement): """ Property which contains a set of URLs that identify the root collections that contain the principals that are available on the server that implements a resource. (RFC 3744, section 5.8) """ name = "principal-collection-set" hidden = True protected = True allowed_children = {(dav_namespace, "href"): (0, None)} ## # Section 7 (Access Control and existing methods) ## @registerElement @registerElementClass class NeedPrivileges (WebDAVElement): """ Error which indicates insufficient privileges. (RFC 3744, section 7.1.1) """ name = "need-privileges" allowed_children = {(dav_namespace, "resource"): (0, None)} @registerElement @registerElementClass class Resource (WebDAVElement): """ Identifies which resource had insufficient privileges. (RFC 3744, section 7.1.1) """ name = "resource" allowed_children = { (dav_namespace, "href"): (1, 1), (dav_namespace, "privilege"): (1, 1), } ## # Section 9 (Access Control Reports) ## @registerElement @registerElementClass class ACLPrincipalPropSet (WebDAVElement): """ Report which returns, for all principals in the DAV:acl property (of the resource identified by the Request-URI) that are identified by http(s) URLs or by a DAV:property principal, the value of the properties specified in the REPORT request body. (RFC 3744, section 9.2) """ name = "acl-principal-prop-set" allowed_children = {WebDAVElement: (0, None)} def validate(self): super(ACLPrincipalPropSet, self).validate() prop = False for child in self.children: if child.qname() == (dav_namespace, "prop"): if prop: raise ValueError( "Only one DAV:prop allowed for {0}, got: {1}".format( self.sname(), self.children ) ) prop = True @registerElement @registerElementClass class PrincipalMatch (WebDAVElement): """ Report used to identify all members (at any depth) of the collection identified by the Request-URI that are principals and that match the current user. (RFC 3744, section 9.3) """ name = "principal-match" allowed_children = { (dav_namespace, "principal-property"): (0, 1), (dav_namespace, "self"): (0, 1), (dav_namespace, "prop"): (0, 1), } def validate(self): super(PrincipalMatch, self).validate() # This element can be empty when uses in supported-report-set if not len(self.children): return principalPropertyOrSelf = False for child in self.children: namespace, name = child.qname() if (namespace == dav_namespace) and name in ("principal-property", "self"): if principalPropertyOrSelf: raise ValueError( "Only one of DAV:principal-property or DAV:self allowed in {0}, got: {1}".format( self.sname(), self.children ) ) principalPropertyOrSelf = True if not principalPropertyOrSelf: raise ValueError( "One of DAV:principal-property or DAV:self is required in {0}, got: {1}".format( self.sname(), self.children ) ) @registerElement @registerElementClass class PrincipalProperty (WebDAVElement): """ Identifies a property. (RFC 3744, section 9.3) """ name = "principal-property" allowed_children = {WebDAVElement: (0, None)} # For DAV:self element (RFC 3744, section 9.3) see Self class above. @registerElement @registerElementClass class PrincipalPropertySearch (WebDAVElement): """ Report which performs a search for all principals whose properties contain character data that matches the search criteria specified in the request. (RFC 3744, section 9.4) """ name = "principal-property-search" allowed_children = { (dav_namespace, "property-search"): (0, None), # This is required but this element must be empty in supported-report-set (dav_namespace, "prop"): (0, 1), (dav_namespace, "apply-to-principal-collection-set"): (0, 1), } allowed_attributes = {"test": False} @registerElement @registerElementClass class PropertySearch (WebDAVElement): """ Contains a DAV:prop element enumerating the properties to be searched and a DAV:match element, containing the search string. (RFC 3744, section 9.4) """ name = "property-search" allowed_children = { (dav_namespace, "prop"): (1, 1), (dav_namespace, "match"): (1, 1), } @registerElement @registerElementClass class Match (WebDAVTextElement): """ Contains a search string. (RFC 3744, section 9.4) """ name = "match" @registerElement @registerElementClass class PrincipalSearchPropertySet (WebDAVElement): """ Report which identifies those properties that may be searched using the DAV:principal-property-search report. (RFC 3744, section 9.5) """ name = "principal-search-property-set" allowed_children = {(dav_namespace, "principal-search-property"): (0, None)} @registerElement @registerElementClass class PrincipalSearchProperty (WebDAVElement): """ Contains exactly one searchable property, and a description of the property. (RFC 3744, section 9.5) """ name = "principal-search-property" allowed_children = { (dav_namespace, "prop"): (1, 1), (dav_namespace, "description"): (1, 1), } @registerElement @registerElementClass class NumberOfMatchesWithinLimits (WebDAVEmptyElement): """ Error which indicates too many results """ name = "number-of-matches-within-limits" # For DAV:description element (RFC 3744, section 9.5) see Description # class above.
{ "content_hash": "f90a5025c5ab537a32e5a9a0e7758792", "timestamp": "", "source": "github", "line_count": 976, "max_line_length": 131, "avg_line_length": 26.045081967213115, "alnum_prop": 0.6212037765538946, "repo_name": "trevor/calendarserver", "id": "d793651d5e8ab7c9ba78e5b1656d2dcd4f85ddcf", "size": "26542", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "txdav/xml/rfc3744.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "4214" }, { "name": "D", "bytes": "13143" }, { "name": "JavaScript", "bytes": "76566" }, { "name": "Python", "bytes": "9260291" }, { "name": "Shell", "bytes": "78964" } ], "symlink_target": "" }
import inspect import warnings from pandas.util._exceptions import find_stack_level from pandas._testing import * # noqa:F401,F403,PDF014 warnings.warn( ( "pandas.util.testing is deprecated. Use the functions in the " "public API at pandas.testing instead." ), FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), )
{ "content_hash": "35c1908d2daa882372c4f9564655415d", "timestamp": "", "source": "github", "line_count": 15, "max_line_length": 70, "avg_line_length": 24.466666666666665, "alnum_prop": 0.7084468664850136, "repo_name": "datapythonista/pandas", "id": "5585ea0b5862844b8aa4c52adf74a3bae0141a8a", "size": "367", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "pandas/util/testing.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "131" }, { "name": "C", "bytes": "355524" }, { "name": "CSS", "bytes": "1662" }, { "name": "Cython", "bytes": "1178139" }, { "name": "Dockerfile", "bytes": "1933" }, { "name": "HTML", "bytes": "456449" }, { "name": "Makefile", "bytes": "505" }, { "name": "Python", "bytes": "19048364" }, { "name": "Shell", "bytes": "10511" }, { "name": "Smarty", "bytes": "8486" }, { "name": "XSLT", "bytes": "1196" } ], "symlink_target": "" }
"""Download handlers for http and https schemes """ from twisted.internet import reactor from pyrake.utils.misc import load_object class HTTP10DownloadHandler(object): def __init__(self, settings): self.HTTPClientFactory = load_object(settings['DOWNLOADER_HTTPCLIENTFACTORY']) self.ClientContextFactory = load_object(settings['DOWNLOADER_CLIENTCONTEXTFACTORY']) def download_request(self, request, spider): """Return a deferred for the HTTP download""" factory = self.HTTPClientFactory(request) self._connect(factory) return factory.deferred def _connect(self, factory): host, port = factory.host, factory.port if factory.scheme == 'https': return reactor.connectSSL(host, port, factory, self.ClientContextFactory()) else: return reactor.connectTCP(host, port, factory)
{ "content_hash": "b5cc3bcdcb9dc597d70e148f5fcc404f", "timestamp": "", "source": "github", "line_count": 25, "max_line_length": 92, "avg_line_length": 36.84, "alnum_prop": 0.6634093376764386, "repo_name": "elkingtowa/pyrake", "id": "80607dfb1a0edc82161156778ef754f451a30455", "size": "921", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "build/lib/pyrake/core/downloader/handlers/http10.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "9681" }, { "name": "Perl", "bytes": "1311" }, { "name": "Python", "bytes": "1950905" }, { "name": "Shell", "bytes": "3209" } ], "symlink_target": "" }
import numpy as np from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import skip_if_32bit from sklearn import datasets from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso from sklearn.svm import LinearSVC from sklearn.feature_selection import SelectFromModel from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import PassiveAggressiveClassifier iris = datasets.load_iris() data, y = iris.data, iris.target rng = np.random.RandomState(0) def test_invalid_input(): clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=None, tol=None) for threshold in ["gobbledigook", ".5 * gobbledigook"]: model = SelectFromModel(clf, threshold=threshold) model.fit(data, y) assert_raises(ValueError, model.transform, data) def test_input_estimator_unchanged(): # Test that SelectFromModel fits on a clone of the estimator. est = RandomForestClassifier() transformer = SelectFromModel(estimator=est) transformer.fit(data, y) assert_true(transformer.estimator is est) def test_feature_importances(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) est = RandomForestClassifier(n_estimators=50, random_state=0) for threshold, func in zip(["mean", "median"], [np.mean, np.median]): transformer = SelectFromModel(estimator=est, threshold=threshold) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'feature_importances_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) importances = transformer.estimator_.feature_importances_ feature_mask = np.abs(importances) > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) def test_sample_weight(): # Ensure sample weights are passed to underlying estimator X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # Check with sample weights sample_weight = np.ones(y.shape) sample_weight[y == 1] *= 100 est = LogisticRegression(random_state=0, fit_intercept=False) transformer = SelectFromModel(estimator=est) transformer.fit(X, y, sample_weight=None) mask = transformer._get_support_mask() transformer.fit(X, y, sample_weight=sample_weight) weighted_mask = transformer._get_support_mask() assert not np.all(weighted_mask == mask) transformer.fit(X, y, sample_weight=3 * sample_weight) reweighted_mask = transformer._get_support_mask() assert np.all(weighted_mask == reweighted_mask) def test_coef_default_threshold(): X, y = datasets.make_classification( n_samples=100, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # For the Lasso and related models, the threshold defaults to 1e-5 transformer = SelectFromModel(estimator=Lasso(alpha=0.1)) transformer.fit(X, y) X_new = transformer.transform(X) mask = np.abs(transformer.estimator_.coef_) > 1e-5 assert_array_almost_equal(X_new, X[:, mask]) @skip_if_32bit def test_2d_coef(): X, y = datasets.make_classification( n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0, n_classes=4) est = LogisticRegression() for threshold, func in zip(["mean", "median"], [np.mean, np.median]): for order in [1, 2, np.inf]: # Fit SelectFromModel a multi-class problem transformer = SelectFromModel(estimator=LogisticRegression(), threshold=threshold, norm_order=order) transformer.fit(X, y) assert_true(hasattr(transformer.estimator_, 'coef_')) X_new = transformer.transform(X) assert_less(X_new.shape[1], X.shape[1]) # Manually check that the norm is correctly performed est.fit(X, y) importances = np.linalg.norm(est.coef_, axis=0, ord=order) feature_mask = importances > func(importances) assert_array_almost_equal(X_new, X[:, feature_mask]) def test_partial_fit(): est = PassiveAggressiveClassifier(random_state=0, shuffle=False, max_iter=5, tol=None) transformer = SelectFromModel(estimator=est) transformer.partial_fit(data, y, classes=np.unique(y)) old_model = transformer.estimator_ transformer.partial_fit(data, y, classes=np.unique(y)) new_model = transformer.estimator_ assert_true(old_model is new_model) X_transform = transformer.transform(data) transformer.fit(np.vstack((data, data)), np.concatenate((y, y))) assert_array_almost_equal(X_transform, transformer.transform(data)) # check that if est doesn't have partial_fit, neither does SelectFromModel transformer = SelectFromModel(estimator=RandomForestClassifier()) assert_false(hasattr(transformer, "partial_fit")) def test_calling_fit_reinitializes(): est = LinearSVC(random_state=0) transformer = SelectFromModel(estimator=est) transformer.fit(data, y) transformer.set_params(estimator__C=100) transformer.fit(data, y) assert_equal(transformer.estimator_.C, 100) def test_prefit(): # Test all possible combinations of the prefit parameter. # Passing a prefit parameter with the selected model # and fitting a unfit model with prefit=False should give same results. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf) model.fit(data, y) X_transform = model.transform(data) clf.fit(data, y) model = SelectFromModel(clf, prefit=True) assert_array_almost_equal(model.transform(data), X_transform) # Check that the model is rewritten if prefit=False and a fitted model is # passed model = SelectFromModel(clf, prefit=False) model.fit(data, y) assert_array_almost_equal(model.transform(data), X_transform) # Check that prefit=True and calling fit raises a ValueError model = SelectFromModel(clf, prefit=True) assert_raises(ValueError, model.fit, data, y) def test_threshold_string(): est = RandomForestClassifier(n_estimators=50, random_state=0) model = SelectFromModel(est, threshold="0.5*mean") model.fit(data, y) X_transform = model.transform(data) # Calculate the threshold from the estimator directly. est.fit(data, y) threshold = 0.5 * np.mean(est.feature_importances_) mask = est.feature_importances_ > threshold assert_array_almost_equal(X_transform, data[:, mask]) def test_threshold_without_refitting(): # Test that the threshold can be set without refitting the model. clf = SGDClassifier(alpha=0.1, max_iter=10, shuffle=True, random_state=0, tol=None) model = SelectFromModel(clf, threshold="0.1 * mean") model.fit(data, y) X_transform = model.transform(data) # Set a higher threshold to filter out more features. model.threshold = "1.0 * mean" assert_greater(X_transform.shape[1], model.transform(data).shape[1])
{ "content_hash": "f56f7b23b75905bddaadd2fe7ae48a25", "timestamp": "", "source": "github", "line_count": 200, "max_line_length": 78, "avg_line_length": 39.435, "alnum_prop": 0.6845441866362368, "repo_name": "clemkoa/scikit-learn", "id": "6efec43dce37bbe947dc4d5f377eee5e383336b4", "size": "7887", "binary": false, "copies": "21", "ref": "refs/heads/master", "path": "sklearn/feature_selection/tests/test_from_model.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "C", "bytes": "451996" }, { "name": "C++", "bytes": "140322" }, { "name": "Makefile", "bytes": "1512" }, { "name": "PowerShell", "bytes": "17042" }, { "name": "Python", "bytes": "7322224" }, { "name": "Shell", "bytes": "20749" } ], "symlink_target": "" }
from __future__ import print_function import sys sys.path.append('../') sys.path.append('../../') import argparse import logging import time import os import glob import pickle from progressbar import ProgressBar import xml.etree.ElementTree as ET import csv import numpy as np from chainer import optimizers, cuda, Variable import cv2 from train_new import extract_data, load_data from cnn1 import syuwa_cnn from scripts.draw_image import draw_filters, draw_filters_sq, draw_image from scripts.draw_loss import draw_loss_curve from scripts.draw_histogram import draw_histogram from scripts.confmat import save_confmat_fig, print_confmat xp = np def create_result_dir(args): if args.restart_from is None: result_dir = 'results/cnn_only' result_dir += '_' + time.strftime('%Y-%m-%d_%H-%M-%S_') result_dir += str(time.time()).replace('.', '') if not os.path.exists(result_dir): os.makedirs(result_dir) log_fn = '%s/log.txt' % result_dir logging.basicConfig( format='%(asctime)s [%(levelname)s] %(message)s', filename=log_fn, level=logging.DEBUG) logging.info(args) else: result_dir = '.' log_fn = 'log.txt' logging.basicConfig( format='%(asctime)s [%(levelname)s] %(message)s', filename=log_fn, level=logging.DEBUG) logging.info(args) return log_fn, result_dir def get_optimizer(opt): # prepare optimizer if opt == 'MomentumSGD': optimizer = optimizers.MomentumSGD(lr=args.lr, momentum=0.7) elif opt == 'Adam': optimizer = optimizers.Adam(alpha=args.alpha) elif opt == 'AdaGrad': optimizer = optimizers.AdaGrad(lr=args.lr) else: raise Exception('No optimizer is selected') return optimizer def train(train_vis, train_dep, train_labels, N, num_label, model,opt,args): pbar = ProgressBar(N) cnn_correct_cnt = 0 sum_frame = 0 total_loss = np.array(0, dtype=np.float32) opt.setup(model) conf_array = np.zeros((num_label, num_label), dtype=np.int32) for i in range(0, N): x_vis_batch = xp.asarray(train_vis[i], dtype=np.float32) x_dep_batch = xp.asarray(train_dep[i], dtype=np.float32) y_batch = xp.asarray(train_labels[i], dtype=np.int32).reshape(-1) sum_frame += y_batch.shape[0] opt.zero_grads() loss, pred = model.forward( x_vis_batch, x_dep_batch, y_batch) pred = xp.argmax(pred.data, axis=1) if args.gpu >= 0: cnn_correct_cnt += xp.asnumpy(xp.sum(pred == y_batch)) else: cnn_correct_cnt += np.sum(pred == y_batch) if y_batch.size == 1: conf_array[y_batch, pred] += 1 else: for j in xrange(y_batch.size): conf_array[y_batch[j], pred[j]] += 1 loss.backward() opt.update() if args.opt in ['AdaGrad', 'MomentumSGD']: opt.weight_decay(decay=args.weight_decay) pbar.update(i + 1 if (i + 1) < N else N) if args.gpu >= 0: import cupy total_loss += cupy.asnumpy(loss.data) else: total_loss += loss.data """ 正答率計算 """ cnn_accuracy = cnn_correct_cnt / float(sum_frame) return total_loss / sum_frame, cnn_accuracy, conf_array """ 入力の正規化 今回は動画全体に対してLocal Contrast Normalizationを行う 入力:ndarray(T, width, height) """ def norm(x): if not x.dtype == np.float32: x = x.astype(np.float32) x = (x - np.mean(x)) / (np.std(x) + np.finfo(np.float32).eps) return x def validate(test_vis, test_dep, test_labels, N_test, num_label, model, args): # validate pbar = ProgressBar(N_test) cnn_correct_cnt = 0 sum_frame = 0 pred_list = [] total_loss = np.array(0.0, dtype=np.float32) conf_array = np.zeros((num_label, num_label), dtype=np.int32) for i in range(0, N_test): # shape(T, width ,height) x_vis_batch = xp.asarray(test_vis[i], dtype=np.float32) x_dep_batch = xp.asarray(test_dep[i], dtype=np.float32) y_batch = xp.asarray(test_labels[i], dtype=np.int32).reshape(-1) sum_frame += y_batch.shape[0] loss, pred = model.forward( x_vis_batch, x_dep_batch, y_batch) pred = xp.argmax(pred.data, axis=1) if args.gpu >= 0: cnn_correct_cnt += xp.asnumpy(xp.sum(pred == y_batch)) else: cnn_correct_cnt += np.sum(pred == y_batch) if y_batch.size == 1: conf_array[y_batch, pred] += 1 else: for j in xrange(y_batch.size): conf_array[y_batch[j], pred[j]] += 1 if args.gpu >= 0: import cupy total_loss += cupy.asnumpy(loss.data) else: total_loss += loss.data pred_list.append(pred.tolist()) pbar.update(i + 1 if (i + 1) < N_test else N_test) """ 正答率を計算 """ cnn_accuracy = cnn_correct_cnt / float(sum_frame) return total_loss / sum_frame, cnn_accuracy, pred_list, conf_array if __name__ == '__main__': parser = argparse.ArgumentParser() # 本来はモデル選択用だが名前のみ使用 parser.add_argument('--model', type=str, default='cnn_only') parser.add_argument('--gpu', type=int, default=0) parser.add_argument('--device_num', type=int, default=0) parser.add_argument('--epoch', type=int, default=100) parser.add_argument('--batchsize', type=int, default=10) parser.add_argument('--snapshot', type=int, default=5) parser.add_argument('--visualize', type=int, default=10) # 学習済みモデルの再開 parser.add_argument('--restart_from', type=str) parser.add_argument('--epoch_offset', type=int, default=0) # 学習データが格納されているディレクトリ名 parser.add_argument('--datadir', type=str, default='../../data/syuwa_minimum') parser.add_argument('--dataset_split', type=str, default='tse4') parser.add_argument('--size', type=int, default=28) parser.add_argument('--norm', type=int, default=0) # 学習パラメータの調整 parser.add_argument('--opt', type=str, default='MomentumSGD', choices=['MomentumSGD', 'Adam', 'AdaGrad']) parser.add_argument('--weight_decay', type=float, default=0.0005) parser.add_argument('--alpha', type=float, default=0.001) parser.add_argument('--lr', type=float, default=0.001) parser.add_argument('--lr_decay_freq', type=int, default=10) parser.add_argument('--lr_decay_ratio', type=float, default=0.5) parser.add_argument('--seed', type=int, default=1701) parser.add_argument('--no_opencv', type=int, default=0) args = parser.parse_args() # 乱数のシードを指定 np.random.seed(args.seed) global xp xp = cuda.cupy if args.gpu >= 0 else np # create result dir log_fn, result_dir = create_result_dir(args) print("Result directory: %s" % result_dir) logging.info('Prepareing Dataset...') # データセットの取得 train_vis, train_dep, train_labels, test_vis, test_dep, test_labels = load_data(args.datadir, "%s/label.csv" % args.datadir, args.dataset_split, args.no_opencv) if args.no_opencv == 1: f = open("%s/train_vis.npy" % args.datadir, "r") train_vis = pickle.load(f) f.close() f = open("%s/test_vis.npy" % args.datadir, "r") test_vis = pickle.load(f) f.close() def return_max(labels): result = 0 for i in xrange(len(labels)): for j in xrange(len(labels[i])): if labels[i][j] > result: result = labels[i][j] return result # 出力素子数はデータを見て決定 num_labels = max(return_max(train_labels), return_max(test_labels)) + 1 print("Train data loaded: %d" % len(train_vis)) print("Test data loaded: %d" % len(test_vis)) print("num of labels: %d" % num_labels) logging.info("Train data loaded: %d" % len(train_vis)) logging.info("Test data loaded: %d" % len(test_vis)) logging.info("num of labels: %d" % num_labels) # prepare model model = syuwa_cnn(num_labels) if args.restart_from is not None: model = pickle.load(open(args.restart_from, 'rb')) if args.gpu >= 0: import cupy cuda.check_cuda_available() cuda.get_device(args.device_num).use() model.to_gpu() def xparray(data): if args.gpu >= 0: return cupy.asnumpy(data) else: return data opt = get_optimizer(args.opt) opt.setup(model) train_vis = [np.asarray(x).astype(np.float32) for x in train_vis] train_dep = [np.asarray(x).astype(np.float32) for x in train_dep] test_vis = [np.asarray(x).astype(np.float32) for x in test_vis] test_dep = [np.asarray(x).astype(np.float32) for x in test_dep] if args.norm == 1: train_vis = map(norm, train_vis) train_dep = map(norm, train_dep) test_vis = map(norm, test_vis) test_dep = map(norm, test_dep) logging.info('start training...') N = len(train_vis) N_test = len(test_vis) # 学習ループ n_epoch = args.epoch # 1度に学習させるのは1ファイル num_per_epoch = args.batchsize for epoch in range(1, n_epoch + 1): # train if args.opt == 'MomentumSGD': print('learning rate:', opt.lr) if epoch % args.lr_decay_freq == 0: opt.lr *= args.lr_decay_ratio print('learning rate:', opt.lr) logging.info('learning rate: %f' % opt.lr) perm = np.random.permutation(N) train_vis_epoch = [x for x in np.array(train_vis)[perm[0:min(num_per_epoch,N)]]] train_dep_epoch = [x for x in np.array(train_dep)[perm[0:min(num_per_epoch,N)]]] train_labels_epoch = [np.asarray([x]).astype(np.int32) for x in np.array(train_labels)[perm[0:min(num_per_epoch,N)]]] mean_loss, cnn_accuracy, conf_array_train = train(train_vis_epoch, train_dep_epoch, train_labels_epoch, min(num_per_epoch, N), num_labels, model, opt, args) msg = 'epoch:{:02d}\ttrain loss={}\ttrain accuracy={}'.format( epoch + args.epoch_offset, mean_loss, cnn_accuracy) logging.info(msg) print('\n%s' % msg) perm = np.random.permutation(len(test_vis)) # 1度にテストする最大動画数 # snapshotを取るタイミングで全データを使ってテスト精度を取る num_test_per_epoch = N_test if epoch % args.snapshot == 0 else min(50,N_test) test_vis_epoch = [x for x in np.array(test_vis)[perm[0:num_test_per_epoch]]] test_dep_epoch = [x for x in np.array(test_dep)[perm[0:num_test_per_epoch]]] test_labels_epoch = [np.asarray([x]).astype(np.int32) for x in np.array(test_labels)[perm[0:num_test_per_epoch]]] # validate mean_loss, lstm_accuracy, pred, conf_array_test = validate( test_vis_epoch, test_dep_epoch, test_labels_epoch, num_test_per_epoch, num_labels, model, args) msg = 'epoch:{:02d}\ttest loss={}\ttest accuracy={}'.format( epoch + args.epoch_offset, mean_loss, cnn_accuracy) logging.info(msg) print('\n%s' % msg) print('Prediction:\n{0}'.format(xparray(pred))) # エポックごとにモデルを打ち出す if epoch == 1 or epoch % args.snapshot == 0: model_fn = '%s/%s_epoch_%d.chainermodel' % ( result_dir, args.model, epoch + args.epoch_offset) pickle.dump(model, open(model_fn, 'wb'), -1) # CNNで各層を可視化 # 1,2層の可視化 if epoch % args.visualize == 0: draw_filters(xparray(model.conv11.W), '%s/log_conv11_epoch_%d.jpg' % (result_dir, epoch)) draw_filters(xparray(model.conv21.W), '%s/log_conv21_epoch_%d.jpg' % (result_dir, epoch)) draw_filters_sq(xparray(model.conv12.W), '%s/log_conv12_epoch_%d.jpg' % (result_dir, epoch), 16) draw_filters_sq(xparray(model.conv22.W), '%s/log_conv22_epoch_%d.jpg' % (result_dir, epoch), 16) # 適当なファイルの画像を入力する video_num = int(np.random.random() * len(test_vis)) s_image_vis = xp.asarray(test_vis[video_num]).astype(np.float32) s_image_dep = xp.asarray(test_dep[video_num]).astype(np.float32) # プーリング1層まで通した結果を出力 frame_num = int(s_image_vis.shape[0]/ 2) draw_image(xparray(model.extract_pool11(s_image_vis[frame_num,:,:]).data), '%s/sample_vis_pool1_%d.jpg' % (result_dir, epoch)) draw_image(xparray(model.extract_pool21(s_image_dep[frame_num,:,:]).data), '%s/sample_dep_pool1_%d.jpg' % (result_dir, epoch)) # 学習曲線を出力 draw_loss_curve(log_fn, '%s/log.jpg' % result_dir) print("Confusion Matrix for train data:") print_confmat(conf_array_train) print("Confusion Matrix for test data:") print_confmat(conf_array_test) np.savetxt('%s/confmat_train_epoch_%d.csv' % (result_dir, epoch), conf_array_train, delimiter=',', fmt='%d') np.savetxt('%s/confmat_test_epoch_%d.csv' % (result_dir, epoch), conf_array_test, delimiter=',', fmt='%d') # テストセットの予測を出力 f = open('%s/pred_test_epoch_%d.csv' % (result_dir, epoch), 'w') writer = csv.writer(f) for i in range(len(pred)): writer.writerow(test_labels_epoch[i].tolist()) writer.writerow(pred[i]) f.close()
{ "content_hash": "4ef9ce1dbb7e443a34931ba2d728194c", "timestamp": "", "source": "github", "line_count": 390, "max_line_length": 164, "avg_line_length": 35.45897435897436, "alnum_prop": 0.5714802227203702, "repo_name": "TakuTsuzuki/Hackathon2015", "id": "55d00f1e61a3841fe151dcddcef5987cc021bff6", "size": "14362", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "Doi/cnn_only/train.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "36349" }, { "name": "HTML", "bytes": "59" }, { "name": "JavaScript", "bytes": "30754" }, { "name": "Jupyter Notebook", "bytes": "39079" }, { "name": "PHP", "bytes": "17783" }, { "name": "Python", "bytes": "536916" }, { "name": "Shell", "bytes": "220" } ], "symlink_target": "" }
from panda3d.direct import WaitInterval from panda3d.core import Vec3 from direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func from direct.interval.IntervalGlobal import LerpScaleInterval from direct.interval.IntervalGlobal import WaitInterval, ActorInterval, FunctionInterval from direct.task.Task import Task from direct.directnotify import DirectNotifyGlobal from direct.fsm import StateData from toontown.minigame.OrthoWalk import OrthoWalk from toontown.minigame.MinigameRulesPanel import MinigameRulesPanel from toontown.parties import PartyGlobals from direct.fsm import ClassicFSM, State class PartyCatchActivityToonSD(StateData.StateData): notify = DirectNotifyGlobal.directNotify.newCategory('PartyCatchActivityToonSD') FallBackAnim = 'slip-backward' FallFwdAnim = 'slip-forward' CatchNeutralAnim = 'catch-neutral' CatchRunAnim = 'catch-run' EatNeutralAnim = 'catch-eatneutral' EatNRunAnim = 'catch-eatnrun' animList = [FallBackAnim, FallFwdAnim, CatchNeutralAnim, CatchRunAnim, EatNeutralAnim, EatNRunAnim] def __init__(self, avId, activity): PartyCatchActivityToonSD.notify.debug('init : avId = %s, activity = %s ' % (avId, activity)) self.avId = avId self.activity = activity self.isLocal = avId == base.localAvatar.doId self.toon = self.activity.getAvatar(self.avId) self.unexpectedExit = False self.fsm = ClassicFSM.ClassicFSM('CatchActivityAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['notPlaying', 'normal', 'rules']), State.State('notPlaying', self.enterNotPlaying, self.exitNotPlaying, ['normal', 'rules', 'cleanup']), State.State('rules', self.enterRules, self.exitRules, ['normal', 'cleanup']), State.State('normal', self.enterNormal, self.exitNormal, ['eatFruit', 'fallBack', 'fallForward', 'notPlaying']), State.State('eatFruit', self.enterEatFruit, self.exitEatFruit, ['normal', 'fallBack', 'fallForward', 'eatFruit', 'notPlaying']), State.State('fallBack', self.enterFallBack, self.exitFallBack, ['normal', 'notPlaying']), State.State('fallForward', self.enterFallForward, self.exitFallForward, ['normal', 'notPlaying']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup') self.enteredAlready = False def load(self): self.setAnimState('off', 1.0) for anim in self.animList: self.toon.pose(anim, 0) def unload(self): del self.fsm def enter(self): if not self.enteredAlready: self.enteredAlready = True self.fsm.enterInitialState() self._exiting = False def exit(self, unexpectedExit = False): if self._exiting: return self._exiting = True self.unexpectedExit = unexpectedExit if not self.unexpectedExit: self.fsm.requestFinalState() del self._exiting def enterInit(self): self.notify.debug('enterInit') self.toon.startBlink() self.toon.stopLookAround() if self.isLocal: self.activity.initOrthoWalk() self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def exitInit(self): pass def enterNotPlaying(self): self.toon.stopBlink() self.toon.startLookAround() self.setAnimState('neutral', 1.0) if self.isLocal: self.activity.orthoWalk.stop() self.dropShadow.setColor(self.origDropShadowColor) def exitNotPlaying(self): self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def enterRules(self): if self.isLocal: self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) self.activity.orthoWalk.stop() self.accept(self.activity.rulesDoneEvent, self.handleRulesDone) self.rulesPanel = MinigameRulesPanel('PartyRulesPanel', self.activity.getTitle(), self.activity.getInstructions(), self.activity.rulesDoneEvent, PartyGlobals.DefaultRulesTimeout) base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], False) self.rulesPanel.load() self.rulesPanel.enter() else: self.fsm.request('normal') def handleRulesDone(self): self.fsm.request('normal') def exitRules(self): self.setAnimState('off', 1.0) self.ignore(self.activity.rulesDoneEvent) if hasattr(self, 'rulesPanel'): self.rulesPanel.exit() self.rulesPanel.unload() del self.rulesPanel base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], True) def enterNormal(self): self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) if self.isLocal: self.activity.orthoWalk.start() self.toon.lerpLookAt(Vec3.forward() + Vec3.up(), time=0.2, blink=0) def exitNormal(self): self.setAnimState('off', 1.0) if self.isLocal: self.activity.orthoWalk.stop() self.toon.lerpLookAt(Vec3.forward(), time=0.2, blink=0) def eatFruit(self, fruitModel, handNode): if self.fsm.getCurrentState().getName() == 'eatFruit': self.fsm.request('normal') self.fsm.request('eatFruit', [fruitModel, handNode]) def enterEatFruit(self, fruitModel, handNode): self.notify.debug('enterEatFruit') self.setAnimState('CatchEating', 1.0) if self.isLocal: self.activity.orthoWalk.start() self.fruitModel = fruitModel renderScale = fruitModel.getScale(render) fruitModel.reparentTo(handNode) fruitModel.setScale(render, renderScale) duration = self.toon.getDuration('catch-eatneutral') self.eatIval = Sequence(Parallel(WaitInterval(duration), Sequence(LerpScaleInterval(fruitModel, duration / 2.0, fruitModel.getScale() * 0.5, blendType='easeInOut'), Func(fruitModel.hide))), Func(self.fsm.request, 'normal'), name=self.toon.uniqueName('eatingIval')) self.eatIval.start() def exitEatFruit(self): self.eatIval.pause() del self.eatIval self.fruitModel.reparentTo(hidden) self.fruitModel.removeNode() del self.fruitModel self.setAnimState('off', 1.0) if self.isLocal: self.activity.orthoWalk.stop() def enterFallBack(self): self.notify.debug('enterFallBack') if self.isLocal: base.playSfx(self.activity.sndOof) duration = 1.0 animName = self.FallBackAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallBackIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallBackIval.start() def exitFallBack(self): self.fallBackIval.pause() del self.fallBackIval def enterFallForward(self): self.notify.debug('enterFallForward') if self.isLocal: base.playSfx(self.activity.sndOof) duration = 2.0 animName = self.FallFwdAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame pauseFrame = 19 frameRate = self.toon.getFrameRate(animName) newRate = frames / (duration * 0.5) playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallFwdIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=pauseFrame / newRate, playRate=playRate), WaitInterval(duration / 2.0), ActorInterval(self.toon, animName, startTime=pauseFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallFwdIval.start() def exitFallForward(self): self.fallFwdIval.pause() del self.fallFwdIval def enterCleanup(self): self.notify.debug('enterCleanup') self.toon.stopBlink() self.toon.startLookAround() if self.isLocal: self.activity.orthoWalk.stop() self.activity.destroyOrthoWalk() self.dropShadow.setColor(self.origDropShadowColor) def exitCleanup(self): pass def setAnimState(self, newState, playRate): if not self.unexpectedExit: self.toon.setAnimState(newState, playRate) else: self.notify.debug('setAnimState(): Toon unexpectedExit flag is set.')
{ "content_hash": "637a336de5a5a413a03466d43ab7f892", "timestamp": "", "source": "github", "line_count": 235, "max_line_length": 326, "avg_line_length": 40.702127659574465, "alnum_prop": 0.6382645060115003, "repo_name": "DedMemez/ODS-August-2017", "id": "425bea5f605b0741992cb82e3c67574a3a2a3ecf", "size": "9667", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "parties/PartyCatchActivityToonSD.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "10152014" }, { "name": "Shell", "bytes": "707" } ], "symlink_target": "" }
""" Example usage of 1-D stack plot widget """ # imports to future-proof the code from __future__ import (absolute_import, division, print_function, unicode_literals) from six.moves import zip import sys import numpy as np from collections import OrderedDict from xray_vision import QtCore, QtGui from xray_vision.qt_widgets import Stack1DMainWindow from xray_vision.messenger.mpl.stack_1d import Stack1DMessenger import logging logger = logging.getLogger(__name__) def data_gen(num_sets=1, phase_shift=0.1, vert_shift=0.1, horz_shift=0.1): """ Generate some data Parameters ---------- num_sets: int number of 1-D data sets to generate Returns ------- x : np.ndarray x-coordinates y : list of np.ndarray y-coordinates """ x_axis = np.arange(0, 25, .01) x = [] y = [] for idx in range(num_sets): x.append(x_axis + horz_shift) y.append(np.sin(x_axis + idx * phase_shift) + idx * vert_shift) return x, y class demo_1d(QtGui.QMainWindow): def __init__(self, parent=None): QtGui.QMainWindow.__init__(self, parent) # Generate data num_sets = 100 x_data, y_data = data_gen(num_sets=num_sets, phase_shift=0, horz_shift=0, vert_shift=0) data_list = [] key_list = [] for (lbl, x, y) in zip(range(num_sets), x_data, y_data): data_list.append((x, y)) key_list.append(lbl) # init the 1d stack main window self.setWindowTitle('OneDimStack Example') self._main_window = Stack1DMainWindow(data_list=data_list, key_list=key_list) self._main_window.setFocus() self.setCentralWidget(self._main_window) # add the demo buttons # declare button to generate data for testing/example purposes btn_datagen = QtGui.QPushButton("add data set", parent=self._main_window._ctrl_widget) # declare button to append data to existing data set btn_append = QtGui.QPushButton("append data", parent=self._main_window._ctrl_widget) btn_datagen.clicked.connect(self.datagen) btn_append.clicked.connect(self.append_data) ctl_box = self._main_window._ctrl_widget demo_box = ctl_box.create_container('demo box') demo_box._add_widget('append', btn_append) demo_box._add_widget('gen', btn_datagen) # connect signals to test harness self.sig_append_demo_data.connect( self._main_window._messenger.sl_append_data) self.sig_add_demo_data.connect( self._main_window._messenger.sl_add_data) # Qt Signals for Demo sig_append_demo_data = QtCore.Signal(list, list, list) sig_add_demo_data = QtCore.Signal(list, list, list) @QtCore.Slot() def append_data(self): num_sets = 7 # get some fake data x, y = data_gen(num_sets, phase_shift=0, horz_shift=25, vert_shift=0) # emit the signal self.sig_append_demo_data.emit(range(num_sets), x, y) @QtCore.Slot() def datagen(self): num_data = 10 names = np.random.random_integers(10000, size=num_data).tolist() self.sig_add_demo_data.emit(names, *data_gen(num_data, phase_shift=0, horz_shift=0, vert_shift=0)) if __name__ == "__main__": app = QtGui.QApplication(sys.argv) tt = demo_1d() tt.show() sys.exit(app.exec_())
{ "content_hash": "fa45dbe1b8014fc54dffc75f781b516d", "timestamp": "", "source": "github", "line_count": 117, "max_line_length": 78, "avg_line_length": 31.153846153846153, "alnum_prop": 0.5835390946502058, "repo_name": "licode/xray-vision", "id": "57e1698e993be9ad368485c43b38716b8645fb0a", "size": "6127", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "qt_apps/DEMO_plot1D.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "211396" }, { "name": "Shell", "bytes": "39" } ], "symlink_target": "" }
schools_list = "https://www.lectio.dk/lectio/login_list.aspx?showall=1" assigment_list = "https://www.lectio.dk/lectio/{{SCHOOL_ID}}/OpgaverElev.aspx?elevid={{STUDENT_ID}}" login_url = "https://www.lectio.dk/lectio/{{SCHOOL_ID}}/login.aspx?lecafdeling={{BRANCH_ID}}" picture_url = "https://www.lectio.dk/lectio/{{SCHOOL_ID}}/GetImage.aspx?pictureid={{PICTURE_ID}}" front_page_url = "https://www.lectio.dk/lectio/{{SCHOOL_ID}}/forside.aspx"
{ "content_hash": "635f5e043e552dc6f0f23788410338a4", "timestamp": "", "source": "github", "line_count": 5, "max_line_length": 100, "avg_line_length": 87.8, "alnum_prop": 0.7198177676537585, "repo_name": "boh1996/lectio-assignments-google-calendar", "id": "06e12ac7cd29987fcab7d85574ca95d43fd02896", "size": "439", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "lectioapi/urls.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "21118" } ], "symlink_target": "" }
import logging import dbus from nirikshak.common import plugins from nirikshak.workers import base LOG = logging.getLogger(__name__) @plugins.register('systemd_enabled') class SystemdEnabledWorker(base.Worker): @base.match_expected_output @base.validate(required=('service',), optional=('status',)) def work(self, **kwargs): k = kwargs['input']['args'] sysbus = dbus.SystemBus() systemd1 = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1') manager = dbus.Interface(systemd1, 'org.freedesktop.systemd1.Manager') service = k['service'] try: status = str(manager.GetUnitFileState(service)) except Exception: status = None LOG.error("Error in retrieving enabled status for %s service", service, exc_info=True) LOG.info("%s service is %s", k['service'], status) return status
{ "content_hash": "e5d8c422e66c24e3ba8f0ae6675bd81c", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 78, "avg_line_length": 32.7, "alnum_prop": 0.6167176350662589, "repo_name": "thenakliman/nirikshak", "id": "be8e0fddaf716812deb73f04664e928f187caf7e", "size": "1570", "binary": false, "copies": "1", "ref": "refs/heads/basic_framework", "path": "nirikshak/workers/systemd/enabled.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "150354" }, { "name": "Shell", "bytes": "3573" } ], "symlink_target": "" }
import OpenPNM import scipy as sp print('-----> Using OpenPNM version: '+OpenPNM.__version__) pn = OpenPNM.Network.Cubic(shape=[10,10,40],spacing=0.0001) pn.add_boundaries() Ps = pn.pores('boundary',mode='not') Ts = pn.find_neighbor_throats(pores=Ps,mode='intersection',flatten=True) geom = OpenPNM.Geometry.Toray090(network=pn,pores=Ps,throats=Ts) Ps = pn.pores('boundary') Ts = pn.find_neighbor_throats(pores=Ps,mode='not_intersection') boun = OpenPNM.Geometry.Boundary(network=pn,pores=Ps,throats=Ts) air = OpenPNM.Phases.Air(network=pn) #--------------------------------------------------------------------------------------------- Ps = pn.pores() Ts = pn.throats() phys_air = OpenPNM.Physics.Standard(network=pn,phase=air,pores=Ps,throats=Ts) #Add some additional models to phys_air phys_air['pore.item1'] = 0.5e-13 phys_air['pore.item2'] = 1.5 phys_air['pore.item3'] = 2.5e-14 phys_air['pore.item4'] = 0.9e-13 phys_air['pore.item5'] = -4e-14 phys_air.add_model(model=OpenPNM.Physics.models.generic_source_term.power_law, propname='pore.blah1', A1='pore.item1', A2='pore.item2', A3='pore.item3') phys_air.add_model(model=OpenPNM.Physics.models.generic_source_term.linear, propname='pore.blah2', A1='pore.item4', A2='pore.item5') #------------------------------------------------------------------------------ '''Perform Fickian Diffusion''' #------------------------------------------------------------------------------ alg = OpenPNM.Algorithms.FickianDiffusion(network=pn,phase=air) # Assign Dirichlet boundary conditions to top and bottom surface pores BC1_pores = pn.pores('right_boundary') alg.set_boundary_conditions(bctype='Dirichlet', bcvalue=0.6, pores=BC1_pores) BC2_pores = pn.pores('left_boundary') alg.set_boundary_conditions(bctype='Neumann', bcvalue=0.2e-13, pores=BC2_pores) alg.set_source_term(source_name='pore.blah1',pores=sp.r_[500:700],tol=1e-9) alg.set_source_term(source_name='pore.blah2',pores=sp.r_[800:900],maxiter=0) alg.setup() alg.solve(iterative_solver='cg',tol=1e-20) alg.return_results() print('--------------------------------------------------------------') try: print('steps: ',alg._steps) print('tol_reached: ',alg._tol_reached) except: pass print('--------------------------------------------------------------') print('reaction from the physics for pores [500:700]:',\ sp.sum(0.5e-13*air['pore.mole_fraction'][sp.r_[500:700]]**1.5+2.5e-14)) print('rate from the physics for pores [500:700]:',\ alg.rate(sp.r_[500:700])[0]) print('--------------------------------------------------------------') print('reaction from the physics for pores [800:900]:',\ sp.sum(0.9e-13*air['pore.mole_fraction'][sp.r_[800:900]]-4e-14)) print('rate from the physics for pores [800:900]:',\ alg.rate(sp.r_[800:900])[0])
{ "content_hash": "317b2c7bf0da4efc1b205b092f30556e", "timestamp": "", "source": "github", "line_count": 68, "max_line_length": 94, "avg_line_length": 42.98529411764706, "alnum_prop": 0.5706465959630517, "repo_name": "stadelmanma/OpenPNM", "id": "715775fb46893f7247c4818c275d4ad41cdf73ff", "size": "2923", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "test/integration/test_source3.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "980018" } ], "symlink_target": "" }
"""The test for the bayesian sensor platform.""" import unittest from homeassistant.setup import setup_component from homeassistant.components.binary_sensor import bayesian from tests.common import get_test_home_assistant class TestBayesianBinarySensor(unittest.TestCase): """Test the threshold sensor.""" def setup_method(self, method): """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() def teardown_method(self, method): """Stop everything that was started.""" self.hass.stop() def test_sensor_numeric_state(self): """Test sensor on numeric state platform observations.""" config = { 'binary_sensor': { 'platform': 'bayesian', 'name': 'Test_Binary', 'observations': [{ 'platform': 'numeric_state', 'entity_id': 'sensor.test_monitored', 'below': 10, 'above': 5, 'prob_given_true': 0.6 }, { 'platform': 'numeric_state', 'entity_id': 'sensor.test_monitored1', 'below': 7, 'above': 5, 'prob_given_true': 0.9, 'prob_given_false': 0.1 }], 'prior': 0.2, } } assert setup_component(self.hass, 'binary_sensor', config) self.hass.states.set('sensor.test_monitored', 4) self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert [] == state.attributes.get('observations') assert 0.2 == state.attributes.get('probability') assert state.state == 'off' self.hass.states.set('sensor.test_monitored', 6) self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 4) self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 6) self.hass.states.set('sensor.test_monitored1', 6) self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert [{ 'prob_false': 0.4, 'prob_true': 0.6 }, { 'prob_false': 0.1, 'prob_true': 0.9 }] == state.attributes.get('observations') assert round(abs(0.77-state.attributes.get('probability')), 7) == 0 assert state.state == 'on' self.hass.states.set('sensor.test_monitored', 6) self.hass.states.set('sensor.test_monitored1', 0) self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 4) self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert 0.2 == state.attributes.get('probability') assert state.state == 'off' self.hass.states.set('sensor.test_monitored', 15) self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert state.state == 'off' def test_sensor_state(self): """Test sensor on state platform observations.""" config = { 'binary_sensor': { 'name': 'Test_Binary', 'platform': 'bayesian', 'observations': [{ 'platform': 'state', 'entity_id': 'sensor.test_monitored', 'to_state': 'off', 'prob_given_true': 0.8, 'prob_given_false': 0.4 }], 'prior': 0.2, 'probability_threshold': 0.32, } } assert setup_component(self.hass, 'binary_sensor', config) self.hass.states.set('sensor.test_monitored', 'on') state = self.hass.states.get('binary_sensor.test_binary') assert [] == state.attributes.get('observations') assert 0.2 == state.attributes.get('probability') assert state.state == 'off' self.hass.states.set('sensor.test_monitored', 'off') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'on') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'off') self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert [{ 'prob_true': 0.8, 'prob_false': 0.4 }] == state.attributes.get('observations') assert round(abs(0.33-state.attributes.get('probability')), 7) == 0 assert state.state == 'on' self.hass.states.set('sensor.test_monitored', 'off') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'on') self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert round(abs(0.2-state.attributes.get('probability')), 7) == 0 assert state.state == 'off' def test_threshold(self): """Test sensor on probabilty threshold limits.""" config = { 'binary_sensor': { 'name': 'Test_Binary', 'platform': 'bayesian', 'observations': [{ 'platform': 'state', 'entity_id': 'sensor.test_monitored', 'to_state': 'on', 'prob_given_true': 1.0, }], 'prior': 0.5, 'probability_threshold': 1.0, } } assert setup_component(self.hass, 'binary_sensor', config) self.hass.states.set('sensor.test_monitored', 'on') self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert round(abs(1.0-state.attributes.get('probability')), 7) == 0 assert state.state == 'on' def test_multiple_observations(self): """Test sensor with multiple observations of same entity.""" config = { 'binary_sensor': { 'name': 'Test_Binary', 'platform': 'bayesian', 'observations': [{ 'platform': 'state', 'entity_id': 'sensor.test_monitored', 'to_state': 'blue', 'prob_given_true': 0.8, 'prob_given_false': 0.4 }, { 'platform': 'state', 'entity_id': 'sensor.test_monitored', 'to_state': 'red', 'prob_given_true': 0.2, 'prob_given_false': 0.4 }], 'prior': 0.2, 'probability_threshold': 0.32, } } assert setup_component(self.hass, 'binary_sensor', config) self.hass.states.set('sensor.test_monitored', 'off') state = self.hass.states.get('binary_sensor.test_binary') assert [] == state.attributes.get('observations') assert 0.2 == state.attributes.get('probability') assert state.state == 'off' self.hass.states.set('sensor.test_monitored', 'blue') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'off') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'blue') self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert [{ 'prob_true': 0.8, 'prob_false': 0.4 }] == state.attributes.get('observations') assert round(abs(0.33-state.attributes.get('probability')), 7) == 0 assert state.state == 'on' self.hass.states.set('sensor.test_monitored', 'blue') self.hass.block_till_done() self.hass.states.set('sensor.test_monitored', 'red') self.hass.block_till_done() state = self.hass.states.get('binary_sensor.test_binary') assert round(abs(0.11-state.attributes.get('probability')), 7) == 0 assert state.state == 'off' def test_probability_updates(self): """Test probability update function.""" prob_true = [0.3, 0.6, 0.8] prob_false = [0.7, 0.4, 0.2] prior = 0.5 for pt, pf in zip(prob_true, prob_false): prior = bayesian.update_probability(prior, pt, pf) assert round(abs(0.720000-prior), 7) == 0 prob_true = [0.8, 0.3, 0.9] prob_false = [0.6, 0.4, 0.2] prior = 0.7 for pt, pf in zip(prob_true, prob_false): prior = bayesian.update_probability(prior, pt, pf) assert round(abs(0.9130434782608695-prior), 7) == 0
{ "content_hash": "a49b2fb65f68901966929343879dd053", "timestamp": "", "source": "github", "line_count": 271, "max_line_length": 75, "avg_line_length": 33.210332103321036, "alnum_prop": 0.5158888888888888, "repo_name": "tinloaf/home-assistant", "id": "b52459ec47dbe91e565ef18457133cc3d77c8f98", "size": "9000", "binary": false, "copies": "4", "ref": "refs/heads/dev", "path": "tests/components/binary_sensor/test_bayesian.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "1175" }, { "name": "Dockerfile", "bytes": "1099" }, { "name": "Python", "bytes": "13135313" }, { "name": "Ruby", "bytes": "745" }, { "name": "Shell", "bytes": "17137" } ], "symlink_target": "" }
"""Tracks the latency of a host by sending ICMP echo requests (ping).""" from datetime import timedelta import logging import re import subprocess import sys import voluptuous as vol from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorDevice from homeassistant.const import CONF_HOST, CONF_NAME import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) ATTR_ROUND_TRIP_TIME_AVG = "round_trip_time_avg" ATTR_ROUND_TRIP_TIME_MAX = "round_trip_time_max" ATTR_ROUND_TRIP_TIME_MDEV = "round_trip_time_mdev" ATTR_ROUND_TRIP_TIME_MIN = "round_trip_time_min" CONF_PING_COUNT = "count" DEFAULT_NAME = "Ping Binary sensor" DEFAULT_PING_COUNT = 5 DEFAULT_DEVICE_CLASS = "connectivity" SCAN_INTERVAL = timedelta(minutes=5) PING_MATCHER = re.compile( r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)\/(?P<mdev>\d+.\d+)" ) PING_MATCHER_BUSYBOX = re.compile( r"(?P<min>\d+.\d+)\/(?P<avg>\d+.\d+)\/(?P<max>\d+.\d+)" ) WIN32_PING_MATCHER = re.compile(r"(?P<min>\d+)ms.+(?P<max>\d+)ms.+(?P<avg>\d+)ms") PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PING_COUNT, default=DEFAULT_PING_COUNT): cv.positive_int, } ) def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the Ping Binary sensor.""" name = config.get(CONF_NAME) host = config.get(CONF_HOST) count = config.get(CONF_PING_COUNT) add_entities([PingBinarySensor(name, PingData(host, count))], True) class PingBinarySensor(BinarySensorDevice): """Representation of a Ping Binary sensor.""" def __init__(self, name, ping): """Initialize the Ping Binary sensor.""" self._name = name self.ping = ping @property def name(self): """Return the name of the device.""" return self._name @property def device_class(self): """Return the class of this sensor.""" return DEFAULT_DEVICE_CLASS @property def is_on(self): """Return true if the binary sensor is on.""" return self.ping.available @property def device_state_attributes(self): """Return the state attributes of the ICMP checo request.""" if self.ping.data is not False: return { ATTR_ROUND_TRIP_TIME_AVG: self.ping.data["avg"], ATTR_ROUND_TRIP_TIME_MAX: self.ping.data["max"], ATTR_ROUND_TRIP_TIME_MDEV: self.ping.data["mdev"], ATTR_ROUND_TRIP_TIME_MIN: self.ping.data["min"], } def update(self): """Get the latest data.""" self.ping.update() class PingData: """The Class for handling the data retrieval.""" def __init__(self, host, count): """Initialize the data object.""" self._ip_address = host self._count = count self.data = {} self.available = False if sys.platform == "win32": self._ping_cmd = [ "ping", "-n", str(self._count), "-w", "1000", self._ip_address, ] else: self._ping_cmd = [ "ping", "-n", "-q", "-c", str(self._count), "-W1", self._ip_address, ] def ping(self): """Send ICMP echo request and return details if success.""" pinger = subprocess.Popen( self._ping_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) try: out = pinger.communicate() _LOGGER.debug("Output is %s", str(out)) if sys.platform == "win32": match = WIN32_PING_MATCHER.search(str(out).split("\n")[-1]) rtt_min, rtt_avg, rtt_max = match.groups() return {"min": rtt_min, "avg": rtt_avg, "max": rtt_max, "mdev": ""} if "max/" not in str(out): match = PING_MATCHER_BUSYBOX.search(str(out).split("\n")[-1]) rtt_min, rtt_avg, rtt_max = match.groups() return {"min": rtt_min, "avg": rtt_avg, "max": rtt_max, "mdev": ""} match = PING_MATCHER.search(str(out).split("\n")[-1]) rtt_min, rtt_avg, rtt_max, rtt_mdev = match.groups() return {"min": rtt_min, "avg": rtt_avg, "max": rtt_max, "mdev": rtt_mdev} except (subprocess.CalledProcessError, AttributeError): return False def update(self): """Retrieve the latest details from the host.""" self.data = self.ping() self.available = bool(self.data)
{ "content_hash": "7e8695db48dd7f8a164e157c59ba5a82", "timestamp": "", "source": "github", "line_count": 151, "max_line_length": 86, "avg_line_length": 31.688741721854306, "alnum_prop": 0.5650992685475444, "repo_name": "leppa/home-assistant", "id": "4d9a99c678e6c3348fa8ba3ccc9996e09829ce1c", "size": "4785", "binary": false, "copies": "3", "ref": "refs/heads/dev", "path": "homeassistant/components/ping/binary_sensor.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "18957740" }, { "name": "Shell", "bytes": "6846" } ], "symlink_target": "" }
import os import mock from django.test import TestCase from waliki.models import Page from .factories import PageFactory from waliki.settings import deep_update from waliki import settings rst = """ Title ===== some rst markup """ rst_html = """\n <h2>Title</h2>\n <p>some rst markup</p>\n""" md = """ # Hi I'm Markdown """ md_html = """<h2 id="hi">Hi</h2>\n<p>I'm Markdown</p>\n""" class TestPage(TestCase): def test_content_saved_on_attribute_set(self): page = Page(path='test.rst') page.raw = rst path = os.path.join(settings.WALIKI_DATA_DIR, 'test.rst') self.assertEqual(page.abspath, path) self.assertTrue(os.path.exists(path)) content = open(path).read() self.assertEqual(content, rst) def test_raw_empty_if_file_doesnt_exist(self): page = Page(path='test3.rst') assert not os.path.exists(page.abspath) self.assertEqual(page.raw, "") def test_path_populated_from_slug_if_not_given(self): page = Page(slug='some/slug') page.save() self.assertEqual(page.path, 'some/slug.rst') def test_slug_strip_slashes(self): page = Page(slug='/some/slug/') page.save() self.assertEqual(page.slug, 'some/slug') def test_update_extension(self): page = PageFactory(raw='lala') assert page.markup == 'reStructuredText' old_path = page.abspath assert os.path.exists(old_path) page.markup = 'Markdown' page.update_extension() self.assertTrue(os.path.exists(page.abspath)) self.assertFalse(os.path.exists(old_path)) self.assertTrue(page.path.endswith('.md')) self.assertEqual(page.raw, 'lala') class TestRestructuredText(TestCase): def test_body(self): page = PageFactory(path='test-rst.rst', raw=rst) self.assertEqual(page.body, rst_html) def test_preview(self): self.assertEqual(Page.preview('reStructuredText', rst), rst_html) def test_link_explicit(self): with mock.patch('waliki._markups.get_url') as get_url: get_url.return_value = 'xxx' html = Page.preview('reStructuredText', 'a link_') self.assertEqual(html, '\n <p>a <a href="xxx">link</a></p>\n') def test_missing_text(self): html = Page.preview('reStructuredText', '`***`_') self.assertIn('problematic', html) class TestMarkdown(TestCase): def test_body(self): page = PageFactory(path='test.md', markup='Markdown', raw=md) self.assertEqual(page.body, md_html) def test_preview(self): self.assertEqual(Page.preview('Markdown', md), md_html) def test_link_explicit(self): s = settings.WALIKI_MARKUPS_SETTINGS.get('Markdown') s['extension_configs']['wikilinks']['build_url'] = mock.Mock(return_value='xxx') with mock.patch('waliki.models.settings') as s_mock: s_mock.WALIKI_MARKUPS_SETTINGS.get.return_value = s html = Page.preview('Markdown', 'a [[Link]]') self.assertEqual(html, '<p>a <a class="wikilink" href="xxx">Link</a></p>\n') class TestMarkupSettings(TestCase): def test_deep_update(self): d = {'reStructuredText': { 'settings_overrides': { # noqa 'initial_header_level': 2, 'record_dependencies': True} }, 'Markdown': ['...'] } u = {'reStructuredText': { 'settings_overrides': { # noqa 'initial_header_level': 1 }}} expected = {'reStructuredText': { 'settings_overrides': { # noqa 'initial_header_level': 1, 'record_dependencies': True} }, 'Markdown': ['...'] } self.assertEqual(deep_update(d, u), expected)
{ "content_hash": "b560ab18368430878284be866a1b52d9", "timestamp": "", "source": "github", "line_count": 125, "max_line_length": 88, "avg_line_length": 31.552, "alnum_prop": 0.577079107505071, "repo_name": "beres/waliki", "id": "443e4e8ed863d1b918d65d345d7cee9e6fcfd5cc", "size": "3990", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tests/test_models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "29217" }, { "name": "HTML", "bytes": "53966" }, { "name": "JavaScript", "bytes": "49372" }, { "name": "Makefile", "bytes": "1245" }, { "name": "Python", "bytes": "213630" }, { "name": "XSLT", "bytes": "3542" } ], "symlink_target": "" }
import gcloudutils import sys import requests import argparse from infoblox_client import connector from infoblox_client import objects from requests.auth import HTTPBasicAuth from requests.packages.urllib3.exceptions import InsecureRequestWarning from oauth2client.client import GoogleCredentials from googleapiclient import discovery requests.packages.urllib3.disable_warnings(InsecureRequestWarning) credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) parser = argparse.ArgumentParser( description='Create a number of VMs in Google Compute Engine. Google Cloud SDK must be installed and configured (gcloud init) and google-api-python-client and infoblox-client Python libraries must be installed.') parser.add_argument('name', nargs='+', help='List of FQDNs for VMs to delete separated by spaces') args=parser.parse_args() niosip = '10.60.27.4' niosuser = 'admin' niospw = 'infoblox' project='mythic-brook-146218' zone='us-west1-a' name = args.name[0] opts = {'host': niosip, 'username': niosuser, 'password': niospw} conn = connector.Connector(opts) for name in args.name: splitname = name.split('.',1) hostname = splitname[0] domain = splitname[1] print(gcloudutils.delete_instance(compute, project, zone, hostname)) hr = objects.HostRecord.search(conn, name=name) objects.HostRecord.delete(hr)
{ "content_hash": "ec343e2bb47a4009322e1502ee90f9a2", "timestamp": "", "source": "github", "line_count": 37, "max_line_length": 220, "avg_line_length": 37.648648648648646, "alnum_prop": 0.7875089734386217, "repo_name": "brampling/infoblox-gcp-poc", "id": "9ec893d1fc7877dfe5d112a05ddf7abd911d2f6d", "size": "1411", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "delete_vm.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "13399" } ], "symlink_target": "" }
'''Get an object of type: whitelisted_url and save the object to a report file''' __author__ = 'Jim Olsen <jim.olsen@tanium.com>' __version__ = '2.1.5' import os import sys sys.dont_write_bytecode = True my_file = os.path.abspath(sys.argv[0]) my_name = os.path.splitext(os.path.basename(my_file))[0] my_dir = os.path.dirname(my_file) parent_dir = os.path.dirname(my_dir) lib_dir = os.path.join(parent_dir, 'lib') path_adds = [lib_dir] [sys.path.append(aa) for aa in path_adds if aa not in sys.path] import pytan import pytan.binsupport if __name__ == "__main__": pytan.binsupport.version_check(reqver=__version__) parser = pytan.binsupport.setup_get_object_argparser(obj='whitelisted_url', doc=__doc__) pytan.binsupport.add_get_object_report_argparser(parser=parser) args = parser.parse_args() handler = pytan.binsupport.process_handler_args(parser=parser, args=args) response = pytan.binsupport.process_get_object_args( parser=parser, handler=handler, obj='whitelisted_url', args=args, )
{ "content_hash": "05e4b4cb62efd48a0065aaa7b3979070", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 92, "avg_line_length": 34.36666666666667, "alnum_prop": 0.7032007759456838, "repo_name": "tanium/pytan", "id": "c09c2928ce9d68562612b6874ffd4eb0d8009d90", "size": "1198", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "bin/get_whitelisted_url.py", "mode": "33261", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "13251" }, { "name": "CSS", "bytes": "32442" }, { "name": "HTML", "bytes": "1232764" }, { "name": "JavaScript", "bytes": "375167" }, { "name": "Makefile", "bytes": "4287" }, { "name": "Python", "bytes": "2541262" }, { "name": "Shell", "bytes": "3194" } ], "symlink_target": "" }
from contextlib import closing import os import socket import sys import httpmsg import urlparse # The default port for HTTP. DEFAULT_PORT = 80 def _read_socket(sock): ''' Reads all available data from a socket. ''' msg = bytes() read_size = 8192 while True: read = sock.recv(read_size) if len(read) > 0: msg += read else: # End of data. return msg def send_request(host, port, msg): ''' Sends a request on a new network connection and receives a response. Performs the following: - Creates a TCP network connection to 'port' on 'host'. - Once connected, sends 'msg'. - Reads data from the connection. - Closes the network connection. @type msg bytes @return The response message. @rtype bytes ''' with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: sock.connect((host, port)) sock.send(msg) return _read_socket(sock) def get(url): ''' Requests a URL and returns its contents. ''' # First parse the URL. parts = urlparse.parse(url) host = parts.authority # Assume the authority is the server hostname. port = DEFAULT_PORT path = parts.full_path # This includes the path, query, and fragment. # If no path was specified, assume the default path, /. if path == '': path = '/' # Build a GET request with only the 'host' header, and an empty body. header = {'Host': host} request = httpmsg.Request('GET', path, header, b'') req_msg = httpmsg.format_request(request) # Send the request and get the response. resp_msg = send_request(host, port, req_msg) # Parse the response. response = httpmsg.parse_response(resp_msg) if response.status == 200: # Success. return response.body else: # Not successful; don't return data. raise RuntimeError( "GET of " + url + " returned " + str(response.status) + ": " + response.reason) #------------------------------------------------------------------------------- if __name__ == "__main__": # Assume the command line consists of a single URL. _, url = sys.argv # Get web page. data = get(url) # Print the raw data, without decoding. os.write(1, data)
{ "content_hash": "22650a502a8c753720063fdc7216f6c1", "timestamp": "", "source": "github", "line_count": 94, "max_line_length": 80, "avg_line_length": 25.26595744680851, "alnum_prop": 0.5810526315789474, "repo_name": "alexhsamuel/codex", "id": "0f0003b9c44a137b536dfee7e4f2cb6b09b92503", "size": "2375", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "solutions/wget.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "31311" } ], "symlink_target": "" }
from a10sdk.common.A10BaseClass import A10BaseClass class Fragmentation(A10BaseClass): """Class Description:: DS-Lite fragmentation parameters. Class fragmentation supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/cgnv6/ds-lite/fragmentation`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required=[] self.b_key = "fragmentation" self.a10_url="/axapi/v3/cgnv6/ds-lite/fragmentation" self.DeviceProxy = "" self.inbound = {} self.outbound = {} for keys, value in kwargs.items(): setattr(self,keys, value)
{ "content_hash": "7a2090f75a7d0898dad741181027724c", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 116, "avg_line_length": 25.857142857142858, "alnum_prop": 0.6386740331491713, "repo_name": "a10networks/a10sdk-python", "id": "7d066abe1c4bad082943d39886a487e39a059b31", "size": "905", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "a10sdk/core/cgnv6/cgnv6_ds_lite_fragmentation.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6956372" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('violations', '0002_violationtype_readable_description'), ] operations = [ migrations.AlterField( model_name='violationtype', name='readable_description', field=models.CharField(help_text='The readable description for this type of violation.This description will be used if available.', max_length=200, null=True, verbose_name='readable description', blank=True), ), ]
{ "content_hash": "88cb9e9fa52d46d80fa3a73a333502bd", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 220, "avg_line_length": 32.666666666666664, "alnum_prop": 0.6768707482993197, "repo_name": "ebrelsford/django-phillydata", "id": "d47eaffb7896786e9e7cc1bd6fa8e77e2235948d", "size": "612", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "phillydata/violations/migrations/0003_auto_20141015_1947.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "359" }, { "name": "Python", "bytes": "150372" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Electricity', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateField()), ('hour', models.PositiveSmallIntegerField()), ('usage', models.DecimalField(max_digits=10, decimal_places=2)), ], options={ 'ordering': ['date', 'hour'], }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='electricity', unique_together=set([('date', 'hour')]), ), ]
{ "content_hash": "4f80d4ea32438d491860c858bd28d2be", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 95, "avg_line_length": 29.533333333333335, "alnum_prop": 0.5022573363431151, "repo_name": "ojarva/home-info-display", "id": "37e1bc0887d21b63d29bbe0cd8246f483805b14b", "size": "910", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "homedisplay/info_electricity/migrations/0001_initial.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "22171" }, { "name": "CoffeeScript", "bytes": "115283" }, { "name": "HTML", "bytes": "51598" }, { "name": "JavaScript", "bytes": "9902" }, { "name": "Python", "bytes": "310675" }, { "name": "Shell", "bytes": "1617" } ], "symlink_target": "" }
""" Cancer Research Institute's Table of Tumor Antigens Resulting from Mutations http://cancerimmunity.org/peptide/mutations/ """ from os.path import join import pandas as pd import numpy as np from static_data import DATA_DIR def load_dataframe( mhc_class = None, hla_type = None): path = join(DATA_DIR, 'cri_mutations.csv') df = pd.read_csv(path) mhc2 = df.HLA.str.startswith('D') if mhc_class == 1: return df.ix[~mhc2] elif mhc_class == 2: return df.ix[mhc2] elif hla_type: return df.ix[df.HLA == hla_type] else: return df def load_peptides(*args, **kwargs): df = load_dataframe(*args, **kwargs) return set(df.Peptide)
{ "content_hash": "6c5871d78a82ba996274bf9aea3eea66", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 48, "avg_line_length": 23.633333333333333, "alnum_prop": 0.6389280677009873, "repo_name": "cpcloud/pepdata", "id": "cc001cfd64d05cf4cf983cde0c6301e3287d7423", "size": "1309", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pepdata/cri_tumor_antigens.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "114025" } ], "symlink_target": "" }
import unittest class Test_entry_from_resource(unittest.TestCase): @staticmethod def _call_fut(resource, client, loggers): from google.cloud.logging._helpers import entry_from_resource return entry_from_resource(resource, client, loggers) def test_unknown_type(self): with self.assertRaises(ValueError): self._call_fut({}, None, {}) def _payload_helper(self, key, class_name): import mock resource = {key: 'yup'} client = object() loggers = {} mock_class = EntryMock() name = 'google.cloud.logging._helpers.' + class_name with mock.patch(name, new=mock_class): result = self._call_fut(resource, client, loggers) self.assertIs(result, mock_class.sentinel) self.assertEqual(mock_class.called, (resource, client, loggers)) def test_text_payload(self): self._payload_helper('textPayload', 'TextEntry') def test_json_payload(self): self._payload_helper('jsonPayload', 'StructEntry') def test_proto_payload(self): self._payload_helper('protoPayload', 'ProtobufEntry') class EntryMock(object): def __init__(self): self.sentinel = object() self.called = None def from_api_repr(self, resource, client, loggers): self.called = (resource, client, loggers) return self.sentinel
{ "content_hash": "a32ef120b00d28c497afd7d2ac2e94a1", "timestamp": "", "source": "github", "line_count": 48, "max_line_length": 72, "avg_line_length": 29.020833333333332, "alnum_prop": 0.6331658291457286, "repo_name": "Fkawala/gcloud-python", "id": "8f8e43f367343ffc1300322fb7395e48b8668f3c", "size": "1970", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "logging/unit_tests/test__helpers.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3366" }, { "name": "PowerShell", "bytes": "7195" }, { "name": "Protocol Buffer", "bytes": "89702" }, { "name": "Python", "bytes": "3403274" }, { "name": "Shell", "bytes": "7548" } ], "symlink_target": "" }
from copy import deepcopy from typing import Any, TYPE_CHECKING from azure.core.rest import HttpRequest, HttpResponse from azure.mgmt.core import ARMPipelineClient from . import models from ._configuration import CostManagementClientConfiguration from ._serialization import Deserializer, Serializer from .operations import ( AlertsOperations, BenefitRecommendationsOperations, BenefitUtilizationSummariesOperations, DimensionsOperations, ExportsOperations, ForecastOperations, GenerateCostDetailsReportOperations, GenerateDetailedCostReportOperationResultsOperations, GenerateDetailedCostReportOperationStatusOperations, GenerateDetailedCostReportOperations, GenerateReservationDetailsReportOperations, Operations, PriceSheetOperations, QueryOperations, ScheduledActionsOperations, ViewsOperations, ) if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials import TokenCredential class CostManagementClient: # pylint: disable=client-accepts-api-version-keyword,too-many-instance-attributes """CostManagement management client provides access to CostManagement resources for Azure Enterprise Subscriptions. :ivar operations: Operations operations :vartype operations: azure.mgmt.costmanagement.operations.Operations :ivar views: ViewsOperations operations :vartype views: azure.mgmt.costmanagement.operations.ViewsOperations :ivar alerts: AlertsOperations operations :vartype alerts: azure.mgmt.costmanagement.operations.AlertsOperations :ivar forecast: ForecastOperations operations :vartype forecast: azure.mgmt.costmanagement.operations.ForecastOperations :ivar dimensions: DimensionsOperations operations :vartype dimensions: azure.mgmt.costmanagement.operations.DimensionsOperations :ivar query: QueryOperations operations :vartype query: azure.mgmt.costmanagement.operations.QueryOperations :ivar generate_reservation_details_report: GenerateReservationDetailsReportOperations operations :vartype generate_reservation_details_report: azure.mgmt.costmanagement.operations.GenerateReservationDetailsReportOperations :ivar exports: ExportsOperations operations :vartype exports: azure.mgmt.costmanagement.operations.ExportsOperations :ivar generate_cost_details_report: GenerateCostDetailsReportOperations operations :vartype generate_cost_details_report: azure.mgmt.costmanagement.operations.GenerateCostDetailsReportOperations :ivar generate_detailed_cost_report: GenerateDetailedCostReportOperations operations :vartype generate_detailed_cost_report: azure.mgmt.costmanagement.operations.GenerateDetailedCostReportOperations :ivar generate_detailed_cost_report_operation_results: GenerateDetailedCostReportOperationResultsOperations operations :vartype generate_detailed_cost_report_operation_results: azure.mgmt.costmanagement.operations.GenerateDetailedCostReportOperationResultsOperations :ivar generate_detailed_cost_report_operation_status: GenerateDetailedCostReportOperationStatusOperations operations :vartype generate_detailed_cost_report_operation_status: azure.mgmt.costmanagement.operations.GenerateDetailedCostReportOperationStatusOperations :ivar price_sheet: PriceSheetOperations operations :vartype price_sheet: azure.mgmt.costmanagement.operations.PriceSheetOperations :ivar scheduled_actions: ScheduledActionsOperations operations :vartype scheduled_actions: azure.mgmt.costmanagement.operations.ScheduledActionsOperations :ivar benefit_recommendations: BenefitRecommendationsOperations operations :vartype benefit_recommendations: azure.mgmt.costmanagement.operations.BenefitRecommendationsOperations :ivar benefit_utilization_summaries: BenefitUtilizationSummariesOperations operations :vartype benefit_utilization_summaries: azure.mgmt.costmanagement.operations.BenefitUtilizationSummariesOperations :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials.TokenCredential :param base_url: Service URL. Default value is "https://management.azure.com". :type base_url: str :keyword api_version: Api Version. Default value is "2022-10-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "TokenCredential", base_url: str = "https://management.azure.com", **kwargs: Any ) -> None: self._config = CostManagementClientConfiguration(credential=credential, **kwargs) self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) self.views = ViewsOperations(self._client, self._config, self._serialize, self._deserialize) self.alerts = AlertsOperations(self._client, self._config, self._serialize, self._deserialize) self.forecast = ForecastOperations(self._client, self._config, self._serialize, self._deserialize) self.dimensions = DimensionsOperations(self._client, self._config, self._serialize, self._deserialize) self.query = QueryOperations(self._client, self._config, self._serialize, self._deserialize) self.generate_reservation_details_report = GenerateReservationDetailsReportOperations( self._client, self._config, self._serialize, self._deserialize ) self.exports = ExportsOperations(self._client, self._config, self._serialize, self._deserialize) self.generate_cost_details_report = GenerateCostDetailsReportOperations( self._client, self._config, self._serialize, self._deserialize ) self.generate_detailed_cost_report = GenerateDetailedCostReportOperations( self._client, self._config, self._serialize, self._deserialize ) self.generate_detailed_cost_report_operation_results = GenerateDetailedCostReportOperationResultsOperations( self._client, self._config, self._serialize, self._deserialize ) self.generate_detailed_cost_report_operation_status = GenerateDetailedCostReportOperationStatusOperations( self._client, self._config, self._serialize, self._deserialize ) self.price_sheet = PriceSheetOperations(self._client, self._config, self._serialize, self._deserialize) self.scheduled_actions = ScheduledActionsOperations( self._client, self._config, self._serialize, self._deserialize ) self.benefit_recommendations = BenefitRecommendationsOperations( self._client, self._config, self._serialize, self._deserialize ) self.benefit_utilization_summaries = BenefitUtilizationSummariesOperations( self._client, self._config, self._serialize, self._deserialize ) def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = client._send_request(request) <HttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.HttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs) def close(self): # type: () -> None self._client.close() def __enter__(self): # type: () -> CostManagementClient self._client.__enter__() return self def __exit__(self, *exc_details): # type: (Any) -> None self._client.__exit__(*exc_details)
{ "content_hash": "afbf72e22a980baf2034e22c85d53d7f", "timestamp": "", "source": "github", "line_count": 167, "max_line_length": 116, "avg_line_length": 52.778443113772454, "alnum_prop": 0.746879963694123, "repo_name": "Azure/azure-sdk-for-python", "id": "4ba98e38ed4312b46dd5cf25e384df420d59500f", "size": "9282", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/_cost_management_client.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
import logging from colorama import Fore, Back, Style LOG_TEMPLATE = Back.BLACK + \ Fore.WHITE + \ '%(asctime)s %(name)s %(levelname)s: ' + \ Style.RESET_ALL + \ Back.BLACK + \ Style.BRIGHT + \ '{message_color}' + \ '%(message)s' + \ Style.RESET_ALL class MyFormatter(logging.Formatter): def format(self, record): if record.levelno >= logging.ERROR: msgcolor = Fore.RED elif record.levelno >= logging.WARNING: msgcolor = Fore.YELLOW else: msgcolor = Fore.GREEN return logging.Formatter(LOG_TEMPLATE.format(message_color=msgcolor), datefmt='%Y-%m-%d %H:%M:%S %z').format(record) def loggy(name=""): ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(MyFormatter()) mylogger = logging.getLogger(name) mylogger.addHandler(ch) mylogger.setLevel(logging.DEBUG) return mylogger
{ "content_hash": "cccd5d7bc22c217feb25e478b460b3b0", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 99, "avg_line_length": 28.526315789473685, "alnum_prop": 0.5359778597785978, "repo_name": "helloworlddata/nypd-major-felony-incidents", "id": "6577b387f66dc7b38052578231e67259dd0aabd0", "size": "1084", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wrangle/scripts/loggy.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "6622" }, { "name": "Ruby", "bytes": "3424" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('fundraisers', '0001_initial'), ] operations = [ migrations.AddField( model_name='fundraiser', name='owner', field=models.ForeignKey(help_text='Campaigner', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='campaigner'), ), ]
{ "content_hash": "a6d9dff45d49383bc8137be70aa4aeda", "timestamp": "", "source": "github", "line_count": 23, "max_line_length": 161, "avg_line_length": 27.652173913043477, "alnum_prop": 0.6682389937106918, "repo_name": "onepercentclub/bluebottle", "id": "cf55fb81a19e1125c262a34872f41668be3c5798", "size": "708", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "bluebottle/fundraisers/migrations/0002_fundraiser_owner.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "41694" }, { "name": "HTML", "bytes": "246695" }, { "name": "Handlebars", "bytes": "63" }, { "name": "JavaScript", "bytes": "139123" }, { "name": "PHP", "bytes": "35" }, { "name": "PLpgSQL", "bytes": "1369882" }, { "name": "PostScript", "bytes": "2927" }, { "name": "Python", "bytes": "4983116" }, { "name": "Rich Text Format", "bytes": "39109" }, { "name": "SCSS", "bytes": "99555" }, { "name": "Shell", "bytes": "3068" }, { "name": "Smarty", "bytes": "3814" } ], "symlink_target": "" }
from common import PrimeGenerator import functools import operator def calculate_factors(number, prime_generator): factors = [] prime_generator = iter(prime_generator) while number != 1: prime = next(prime_generator) if number % prime == 0: number = number // prime factors.append(prime) prime_generator = iter(prime_generator) # start from 2 again return factors def solve(): prime_generator = PrimeGenerator() triangle_number = 0 n = 1 while True: triangle_number += n n += 1 factors = calculate_factors(triangle_number, prime_generator) if len(factors) == 0: continue counts = {} for p in factors: counts[p] = counts.get(p, 0) + 1 divisor_count = functools.reduce(operator.mul, (c+1 for c in counts.values())) + 2 # 1 and triangle_number iteself if divisor_count > 500: return triangle_number if __name__ == '__main__': print(__file__ + ": %d" % solve())
{ "content_hash": "e75cdad6e5271b288d4b4000894517dc", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 122, "avg_line_length": 31.6875, "alnum_prop": 0.6074950690335306, "repo_name": "mytram/learning-python", "id": "3d3be1385ecfa708da707302c0112d930fc28af4", "size": "1621", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "projecteuler.net/problem_012.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "74577" }, { "name": "Shell", "bytes": "153" } ], "symlink_target": "" }
import inspect import IECore import Gaffer import GafferTest class ProcessMessageHandlerTest( GafferTest.TestCase ) : def testMessageOutSideProcessIsForwardedUnmodified( self ) : capturingMessageHandler = IECore.CapturingMessageHandler() messageHandler = Gaffer.ProcessMessageHandler( capturingMessageHandler ) # if we log a message outside a compute or hash Process then we only get the original message messageHandler.handle(IECore.MessageHandler.Level.Debug, "sending out an SOS", "message in a bottle") self.assertEqual(len( capturingMessageHandler.messages ), 1 ) self.assertEqual(capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Debug) self.assertEqual(capturingMessageHandler.messages[0].context, "sending out an SOS") self.assertEqual(capturingMessageHandler.messages[0].message, "message in a bottle") def testMessageInProcessGetExtraDebugInfo( self ) : capturingMessageHandler = IECore.CapturingMessageHandler() messageHandler = Gaffer.ProcessMessageHandler( capturingMessageHandler ) scriptNode = Gaffer.ScriptNode() expression = Gaffer.Expression( "Expression" ) node = Gaffer.Node( "Node" ) node["user"].addChild( Gaffer.IntPlug( "test", defaultValue = 0, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) ) scriptNode.addChild(expression) scriptNode.addChild(node) expression.setExpression( inspect.cleandoc( """ import IECore IECore.MessageHandler.output( IECore.MessageHandler.Level.Error, "testA", "testB" ) parent["Node"]["user"]["test"] = len( context.get( "scene:path", [] ) ) """ ) ) with Gaffer.Context() as context : with messageHandler : self.assertEqual( node['user']['test'].getValue(), 0 ) self.assertEqual( len( capturingMessageHandler.messages ), 2 ) self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error ) self.assertEqual( capturingMessageHandler.messages[0].context, "testA" ) self.assertEqual( capturingMessageHandler.messages[0].message, "testB" ) self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug ) self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" ) self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', frame: 1 ]" ) del capturingMessageHandler.messages[:] context["scene:path"] = IECore.InternedStringVectorData( [ "a", "b" ] ) self.assertEqual( node['user']['test'].getValue(), 2 ) self.assertEqual( len( capturingMessageHandler.messages ), 2 ) self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error ) self.assertEqual( capturingMessageHandler.messages[0].context, "testA" ) self.assertEqual( capturingMessageHandler.messages[0].message, "testB" ) self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug ) self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" ) self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', frame: 1, path: '/a/b' ]" ) del capturingMessageHandler.messages[:] del context["frame"] context["scene:path"] = IECore.InternedStringVectorData( [ "a", "b", "c" ] ) self.assertEqual( node['user']['test'].getValue(), 3 ) self.assertEqual( len( capturingMessageHandler.messages ), 2 ) self.assertEqual( capturingMessageHandler.messages[0].level, IECore.MessageHandler.Level.Error ) self.assertEqual( capturingMessageHandler.messages[0].context, "testA" ) self.assertEqual( capturingMessageHandler.messages[0].message, "testB" ) self.assertEqual( capturingMessageHandler.messages[1].level, IECore.MessageHandler.Level.Debug ) self.assertEqual( capturingMessageHandler.messages[1].context, "Gaffer::Process" ) self.assertEqual( capturingMessageHandler.messages[1].message, "[ plug: 'ScriptNode.Expression.__execute', path: '/a/b/c' ]" ) if __name__ == "__main__": unittest.main()
{ "content_hash": "a5bd3887dc6025baec91029e9e1e5d65", "timestamp": "", "source": "github", "line_count": 96, "max_line_length": 138, "avg_line_length": 42.770833333333336, "alnum_prop": 0.7471992206527034, "repo_name": "johnhaddon/gaffer", "id": "da69ef92542fdfdc5aa31c4de0ebe75456db6b9c", "size": "5921", "binary": false, "copies": "8", "ref": "refs/heads/main", "path": "python/GafferTest/ProcessMessageHandlerTest.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5790" }, { "name": "C", "bytes": "61993" }, { "name": "C++", "bytes": "9571062" }, { "name": "CMake", "bytes": "85201" }, { "name": "GLSL", "bytes": "6208" }, { "name": "Python", "bytes": "10271481" }, { "name": "Ruby", "bytes": "419" }, { "name": "Shell", "bytes": "14389" } ], "symlink_target": "" }