content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from datetime import datetime from typing import Optional, Dict, List, Union from schema import Schema, Or from src.monitorables.nodes.node import Node from src.utils.exceptions import InvalidDictSchemaException def get_int_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing int metrics. """ int_prometheus_metric_attributes = \ self.get_int_prometheus_metric_attributes() return [*int_prometheus_metric_attributes] def get_float_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing float metrics. """ float_prometheus_metric_attributes = \ self.get_float_prometheus_metric_attributes() return [*float_prometheus_metric_attributes] def get_dict_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing dict metrics. """ dict_prometheus_metric_attributes = \ self.get_dict_prometheus_metric_attributes() return [*dict_prometheus_metric_attributes] def get_str_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing str metrics. """ str_prometheus_metric_attributes = \ self.get_str_prometheus_metric_attributes() return [*str_prometheus_metric_attributes] def get_all_metric_attributes(self) -> List[str]: """ :return: A list of all variable names representing metrics """ prometheus_metric_attributes = \ self.get_all_prometheus_metric_attributes() return [*prometheus_metric_attributes] def set_prometheus_as_down(self, downtime: Optional[float]) -> None: """ This function sets the node's prometheus interface as down. It sets the time that the interface was initially down to the parameter 'downtime' if it is not None, otherwise it sets it to the current timestamp. :param downtime: :return: """ if downtime is None: self.set_went_down_at_prometheus(datetime.now().timestamp()) else: self.set_went_down_at_prometheus(downtime) def set_prometheus_as_up(self) -> None: """ This function sets a node's prometheus interface as up. A node's interface is said to be up if went_down_at_prometheus is None. :return: None """ self.set_went_down_at_prometheus(None) def set_current_gas_price_info(self, new_percentile: Optional[float], new_price: Optional[float]) -> None: """ This method sets the current_gas_price_info dict based on the new percentile and price. This is done in this way to protect the Dict schema. :param new_percentile: The new percentile to be stored :param new_price: The new gas to be stored :return: None """ self._current_gas_price_info['percentile'] = new_percentile self._current_gas_price_info['price'] = new_price def reset(self) -> None: """ This method resets all metrics to their initial state :return: None """ self.set_went_down_at_prometheus(None) self.set_current_height(None) self.set_total_block_headers_received(None) self.set_max_pending_tx_delay(None) self.set_process_start_time_seconds(None) self.set_total_gas_bumps(None) self.set_total_gas_bumps_exceeds_limit(None) self.set_no_of_unconfirmed_txs(None) self.set_total_errored_job_runs(None) self.set_current_gas_price_info(None, None) self.set_eth_balance_info({}) self.set_last_prometheus_source_used(None) self.set_last_monitored_prometheus(None)
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 19720, 1330, 32233, 11, 360, 713, 11, 7343, 11, 4479, 198, 198, 6738, 32815, 1330, 10011, 2611, 11, 1471, 198, 198, 6738, 12351, 13, 41143, 2977, 13, 77, 4147, 13, 17440, 1330, 19081, 198...
2.378279
1,639
import pickle import numpy as np import os def _analyze_query_point_assignment( query_data_dict: dict, init_Rdata_dict: dict, init_Edata_dict: dict, num_R: int, query_point_assignment_array: np.ndarray, root: str, n_points_to_copy=50, ): """ Analyzes and visualizes qDCA results. :param query_data_dict: raw query data. :param init_Rdata_dict: raw R data. :param init_Edata_dict: raw E data. :param num_R: total number of R points. :param query_point_assignment_array: query point assignments results. :param root: root directory of the experiment. :param n_points_to_copy: number of images to save. :return: accuracy of qDCA assignments; list of (R, query) points with same label; list of (R, query) points with different label """ true_query_data_labels = query_data_dict["labels"] assigned_R = query_point_assignment_array[ query_point_assignment_array[:, 1] < num_R, 1 ] assigned_E = query_point_assignment_array[ query_point_assignment_array[:, 1] >= num_R, 1 ] assigned_R_labels = init_Rdata_dict["labels"][assigned_R] assigned_E_labels = init_Edata_dict["labels"][assigned_E - num_R] assigned_query_data_labels = np.empty( shape=query_point_assignment_array.shape[0] ).astype(np.int32) assigned_query_data_labels[ query_point_assignment_array[:, 1] < num_R ] = assigned_R_labels assigned_query_data_labels[ query_point_assignment_array[:, 1] >= num_R ] = assigned_E_labels accuracy = ( true_query_data_labels == assigned_query_data_labels ).sum() / assigned_query_data_labels.shape[0] same_label_idx = np.where(true_query_data_labels == assigned_query_data_labels)[0] wrong_label_idx = np.where(true_query_data_labels != assigned_query_data_labels)[0] correct_pairs = [] for i in query_point_assignment_array[same_label_idx]: query_idx, init_idx = i if init_idx < num_R: correct_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Rdata_dict["paths"].astype(object)[init_idx], query_data_dict["labels"][query_idx], init_Rdata_dict["labels"][init_idx], ] ) else: correct_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Edata_dict["paths"].astype(object)[init_idx - num_R], query_data_dict["labels"][query_idx], init_Edata_dict["labels"][init_idx - num_R], ] ) wrong_pairs = [] for i in query_point_assignment_array[wrong_label_idx]: query_idx, init_idx = i if init_idx < num_R: wrong_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Rdata_dict["paths"].astype(object)[init_idx], query_data_dict["labels"][query_idx], init_Rdata_dict["labels"][init_idx], ] ) else: wrong_pairs.append( [ query_data_dict["paths"].astype(object)[query_idx], init_Edata_dict["paths"].astype(object)[init_idx - num_R], query_data_dict["labels"][query_idx], init_Edata_dict["labels"][init_idx - num_R], ] ) with open( os.path.join(root, "logs", "analyzed_query_point_assignments.pkl"), "wb" ) as f: pickle.dump( { "accuracy": accuracy, "same_label_idx": same_label_idx, "wrong_label_idx": wrong_label_idx, "correct_pairs": correct_pairs, "wrong_pairs": wrong_pairs, "query_point_assignment_array": query_point_assignment_array, }, f, ) same_label_image_path = os.path.join(root, "visualization", "same_label_images") wrong_label_image_path = os.path.join(root, "visualization", "wrong_label_images") if not os.path.exists(wrong_label_image_path): os.mkdir(wrong_label_image_path) if not os.path.exists(same_label_image_path): os.mkdir(same_label_image_path) for i in range(n_points_to_copy): query_image_path, init_image_path, query_label, init_label = correct_pairs[i] path_to_copy = os.path.join( same_label_image_path, "i{0}_init_image_querylabel{1}_initlabel{2}.png".format( str(i), str(query_label), str(init_label) ), ) os.system("cp {0} {1}".format(init_image_path, path_to_copy)) path_to_copy2 = os.path.join( same_label_image_path, "i{0}_query_image_querylabel{1}_initlabel{2}.png".format( str(i), str(query_label), str(init_label) ), ) os.system("cp {0} {1}".format(query_image_path, path_to_copy2)) ( w_query_image_path, w_init_image_path, w_query_label, w_init_label, ) = wrong_pairs[i] path_to_copy_w = os.path.join( wrong_label_image_path, "i{0}_init_image_querylabel{1}_initlabel{2}.png".format( str(i), str(w_query_label), str(w_init_label) ), ) os.system("cp {0} {1}".format(w_init_image_path, path_to_copy_w)) path_to_copy_w2 = os.path.join( wrong_label_image_path, "i{0}_query_image_querylabel{1}_initlabel{2}.png".format( i, w_query_label, w_init_label ), ) os.system("cp {0} {1}".format(w_query_image_path, path_to_copy_w2)) return accuracy, correct_pairs, wrong_pairs def _generate_query_sets(version: str, N: int = 5000): """ Generates query sets for qDCA experiment in Section 4.3. :param version: either version1 (dogs vs kitchen utils) or version2 (random). :param N: number of points to sample for R used in DCA. """ with open(f"representations/vgg16/{version}/Rfeatures.pkl", "rb") as f: Rdata_v1 = pickle.load(f) with open(f"representations/vgg16/{version}/Efeatures.pkl", "rb") as f: Edata_v1 = pickle.load(f) init_Ridxs = np.random.choice( np.arange(len(Rdata_v1["feat_lin1"])), size=N, replace=False ) query_Ridxs = np.setdiff1d(np.arange(len(Rdata_v1["feat_lin1"])), init_Ridxs) init_Eidxs = np.random.choice( np.arange(len(Edata_v1["feat_lin1"])), size=N, replace=False ) query_Eidxs = np.setdiff1d(np.arange(len(Edata_v1["feat_lin1"])), init_Eidxs) with open(f"representations/vgg16/{version}/sampled_Rfeatures.pkl", "wb") as f: pickle.dump( { "feat_lin1": Rdata_v1["feat_lin1"][init_Ridxs], "feat_lin2": Rdata_v1["feat_lin2"][init_Ridxs], "labels": Rdata_v1["labels"][init_Ridxs], "paths": np.array(Rdata_v1["paths"])[init_Ridxs], "init_Ridx": init_Ridxs, "query_Ridx": query_Ridxs, }, f, ) with open(f"representations/vgg16/{version}/sampled_Efeatures.pkl", "wb") as f: pickle.dump( { "feat_lin1": Edata_v1["feat_lin1"][init_Eidxs], "feat_lin2": Edata_v1["feat_lin2"][init_Eidxs], "labels": Edata_v1["labels"][init_Eidxs], "paths": np.array(Edata_v1["paths"])[init_Eidxs], "init_Eidx": init_Eidxs, "query_Eidx": query_Eidxs, }, f, ) with open(f"representations/vgg16/{version}/query_features.pkl", "wb") as f: pickle.dump( { "feat_lin1": np.concatenate( [ Rdata_v1["feat_lin1"][query_Ridxs], Edata_v1["feat_lin1"][query_Eidxs], ] ), "feat_lin2": np.concatenate( [ Rdata_v1["feat_lin2"][query_Ridxs], Edata_v1["feat_lin2"][query_Eidxs], ] ), "labels": np.concatenate( [Rdata_v1["labels"][query_Ridxs], Edata_v1["labels"][query_Eidxs]] ), "paths": np.concatenate( [ np.array(Rdata_v1["paths"])[query_Ridxs], np.array(Edata_v1["paths"])[query_Eidxs], ] ), "init_Eidxs": init_Eidxs, "query_Eidxs": query_Eidxs, "init_Ridxs": init_Ridxs, "query_Ridxs": query_Ridxs, }, f, )
[ 11748, 2298, 293, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 28686, 628, 198, 4299, 4808, 38200, 2736, 62, 22766, 62, 4122, 62, 562, 16747, 7, 198, 220, 220, 220, 12405, 62, 7890, 62, 11600, 25, 8633, 11, 198, 220, 220, 220, 23...
1.8247
4,923
from .models import * from decorator import * from app_goods.views import getGoodsByID #
[ 6738, 764, 27530, 1330, 1635, 198, 6738, 11705, 1352, 1330, 1635, 198, 198, 6738, 598, 62, 11274, 82, 13, 33571, 1330, 651, 10248, 82, 3886, 2389, 198, 198, 2, 220, 628, 628, 628, 628, 628, 628 ]
2.861111
36
#!/usr/bin/env python3 ############################################################################### # # Category Summaries # # ############################################################################### import datetime import io import json import logging import pprint import sys from typing import Dict, Any from dateutil import tz # set logging logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) # create handler c_handler = logging.StreamHandler() c_handler.setLevel(logging.INFO) # Create formatters and add it to handlers LOG_FORMAT = "[%(asctime)s - %(levelname)-8s - %(module)s:%(name)s ] %(message)s" c_format = logging.Formatter(LOG_FORMAT) c_handler.setFormatter(c_format) # Add handlers to the logger logger.addHandler(c_handler) DATE_FORMAT = "%Y%m%dT%H%M%SZ" # TODO: Convert to defaultdict # https://www.accelebrate.com/blog/using-defaultdict-python # https://stackoverflow.com/questions/9358983/dictionaries-and-default-values # https://docs.python.org/2/library/collections.html#collections.defaultdict CATEGORIES: dict = { "PT": "Personal Time", "PW": "Planned Work", "UW": "Unplanned Work", "OW": "Other Work", } def format_seconds(seconds: int) -> str: """ Convert seconds to a formatted string Convert seconds: 3661 To formatted: " 1:01:01" """ # print(seconds, type(seconds)) hours = seconds // 3600 minutes = seconds % 3600 // 60 seconds = seconds % 60 return f"{hours:4d}:{minutes:02d}:{seconds:02d}" def print_dotted_line(width: int = 72): """Print a dotted (rather 'dashed') line""" print("-" * width) if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 29113, 29113, 7804, 4242, 21017, 198, 2, 198, 2, 21743, 5060, 76, 3166, 198, 2, 198, 2, 198, 29113, 29113, 7804, 4242, 21017, 198, 198, 11748, 4818, 8079, 198, 11748, 33245, ...
2.852041
588
from typing import ParamSpecArgs from flask_restful import Resource, reqparse from models.hotel import HotelModel from flask_jwt_extended import jwt_required from models.site import SiteModel from resources.filtros import * import sqlite3 path_params = reqparse.RequestParser() path_params.add_argument('cidade', type=str) path_params.add_argument('estrelas_min', type=float) path_params.add_argument('estrelas_max', type=float) path_params.add_argument('diaria_min', type=float) path_params.add_argument('diaria_max', type=float) path_params.add_argument('limit', type=float) path_params.add_argument('offset', type=float)
[ 6738, 19720, 1330, 25139, 22882, 42035, 198, 6738, 42903, 62, 2118, 913, 1330, 20857, 11, 43089, 29572, 198, 6738, 4981, 13, 8940, 417, 1330, 12696, 17633, 198, 6738, 42903, 62, 73, 46569, 62, 2302, 1631, 1330, 474, 46569, 62, 35827, 19...
3.187817
197
from __future__ import absolute_import, print_function, unicode_literals if __name__ == "__main__": from .cli import cli cli.wormhole() else: # raise ImportError('this module should not be imported') pass
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 11, 3601, 62, 8818, 11, 28000, 1098, 62, 17201, 874, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 422, 764, 44506, 1330, 537, 72, 198, 220, 220, 22...
2.986486
74
import networkx as nx import utils.connectivity_metrics as connectivity_metric from platypus import NSGAII, EpsMOEA, NSGAIII, EpsNSGAII, Problem, Dominance, Subset, TournamentSelector, \ HypervolumeFitnessEvaluator, Archive import statistics import multiprocessing as mp G = nx.read_adjlist("input/Ventresca/BarabasiAlbert_n500m1.txt") k = 50 num_of_tests = 10 def get_critical_nodes(): algorithm = NSGAII(CNDP(), selector=TournamentSelector(dominance=BergeDominance()), archive=BergeArchive()) algorithm.run(1000) fitness = algorithm.result[0].objectives[0] print(fitness) return fitness if __name__ == '__main__': pool = mp.Pool(mp.cpu_count()) samples = pool.starmap_async(get_critical_nodes, [() for _ in range(num_of_tests)]).get() pool.close() avg = sum(samples) / len(samples) stdev = statistics.stdev(samples) print(f"Average: {avg}") print(f"Standard Deviation: {stdev}")
[ 11748, 3127, 87, 355, 299, 87, 198, 11748, 3384, 4487, 13, 8443, 3458, 62, 4164, 10466, 355, 19843, 62, 4164, 1173, 198, 6738, 40315, 4464, 385, 1330, 10896, 9273, 3978, 11, 43427, 11770, 16412, 11, 10896, 9273, 10855, 11, 43427, 8035, ...
2.672316
354
# Generated by Django 3.2.2 on 2021-09-02 15:10 from django.db import migrations, models import django.db.models.deletion
[ 2, 2980, 515, 416, 37770, 513, 13, 17, 13, 17, 319, 33448, 12, 2931, 12, 2999, 1315, 25, 940, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 198, 11748, 42625, 14208, 13, 9945, 13, 27530, 13, 2934, 1616, 295, ...
2.818182
44
import numpy as np from mcerp import * from uncertainties.core import AffineScalarFunc
[ 11748, 299, 32152, 355, 45941, 198, 6738, 285, 2189, 79, 1330, 1635, 198, 6738, 36553, 13, 7295, 1330, 6708, 500, 3351, 282, 283, 37, 19524, 198 ]
3.346154
26
import requests import re from bs4 import BeautifulSoup
[ 11748, 7007, 198, 11748, 302, 198, 6738, 275, 82, 19, 1330, 23762, 50, 10486, 198 ]
3.733333
15
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, relationship # TODO: db_uri # dialect+driver://username:password@host:port/database?charset=utf8 DB_URI = 'mysql+pymysql://root:root123@127.0.0.1:3300/first_sqlalchemy?charset=utf8' engine = create_engine(DB_URI) Base = declarative_base(bind=engine) session = sessionmaker(bind=engine)() # TODO: User # TODO: Article # TODO: # Base.metadata.drop_all() # TODO: # Base.metadata.create_all() # # user = User(name='zhiliao') # article1 = Article(title='python') # article2 = Article(title='flask') # # user.articles.append(article1) # user.articles.append(article2) # TODO: # session.add(user) # session.commit() # TODO: 1.session.delete`nullable=False` # TODO: 2.session.delete`nullable=False` user = session.query(User).first() print(user) session.delete(user) session.commit()
[ 6738, 44161, 282, 26599, 1330, 2251, 62, 18392, 11, 29201, 11, 34142, 11, 10903, 11, 8708, 9218, 198, 6738, 44161, 282, 26599, 13, 2302, 13, 32446, 283, 876, 1330, 2377, 283, 876, 62, 8692, 198, 6738, 44161, 282, 26599, 13, 579, 1330,...
2.756447
349
# coding: utf-8 from __future__ import unicode_literals from ...lemmatizer import read_index, read_exc import pytest
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 2644, 293, 3020, 265, 7509, 1330, 1100, 62, 9630, 11, 1100, 62, 41194, 198, 198, 11748, 12972, 9288, 628, 628, 628, 628...
2.930233
43
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: from ._models_py3 import AcquiredPhoneNumbers from ._models_py3 import CommunicationError from ._models_py3 import CommunicationErrorResponse from ._models_py3 import PhoneNumberCapabilities from ._models_py3 import PhoneNumberCapabilitiesRequest from ._models_py3 import PhoneNumberCost from ._models_py3 import PhoneNumberOperation from ._models_py3 import PhoneNumberPurchaseRequest from ._models_py3 import PhoneNumberSearchRequest from ._models_py3 import PhoneNumberSearchResult from ._models_py3 import PurchasedPhoneNumber except (SyntaxError, ImportError): from ._models import AcquiredPhoneNumbers # type: ignore from ._models import CommunicationError # type: ignore from ._models import CommunicationErrorResponse # type: ignore from ._models import PhoneNumberCapabilities # type: ignore from ._models import PhoneNumberCapabilitiesRequest # type: ignore from ._models import PhoneNumberCost # type: ignore from ._models import PhoneNumberOperation # type: ignore from ._models import PhoneNumberPurchaseRequest # type: ignore from ._models import PhoneNumberSearchRequest # type: ignore from ._models import PhoneNumberSearchResult # type: ignore from ._models import PurchasedPhoneNumber # type: ignore from ._phone_numbers_client_enums import ( BillingFrequency, PhoneNumberAssignmentType, PhoneNumberCapabilityType, PhoneNumberOperationStatus, PhoneNumberOperationType, PhoneNumberType, ) __all__ = [ 'AcquiredPhoneNumbers', 'CommunicationError', 'CommunicationErrorResponse', 'PhoneNumberCapabilities', 'PhoneNumberCapabilitiesRequest', 'PhoneNumberCost', 'PhoneNumberOperation', 'PhoneNumberPurchaseRequest', 'PhoneNumberSearchRequest', 'PhoneNumberSearchResult', 'PurchasedPhoneNumber', 'BillingFrequency', 'PhoneNumberAssignmentType', 'PhoneNumberCapabilityType', 'PhoneNumberOperationStatus', 'PhoneNumberOperationType', 'PhoneNumberType', ]
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 16529, 35937, 198, 2, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 49962, 739, 262, 17168, 13789, 13, 4091, 13789, 13, 14116, 287, 262, 1628, 6808, 329, 5964, 1321, 13, 19...
3.666667
690
import numpy as np from keras.models import Sequential from keras.layers import LSTM, Dense, Dropout def visualize_training_results(results): """ Plots the loss and accuracy for the training and testing data """ history = results.history plt.figure(figsize=(12,4)) plt.plot(history['val_loss']) plt.plot(history['loss']) plt.legend(['val_loss', 'loss']) plt.title('Loss') plt.xlabel('Epochs') plt.ylabel('Loss') plt.show() plt.figure(figsize=(12,4)) plt.plot(history['val_accuracy']) plt.plot(history['accuracy']) plt.legend(['val_accuracy', 'accuracy']) plt.title('Accuracy') plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.show() def split_sequence(seq, n_steps_in, n_steps_out): """ Splits the univariate time sequence """ X, y = [], [] for i in range(len(seq)): end = i + n_steps_in out_end = end + n_steps_out if out_end > len(seq): break seq_x, seq_y = seq[i:end], seq[end:out_end] X.append(seq_x) y.append(seq_y) return np.array(X), np.array(y) def layer_maker(n_layers, n_nodes, activation, drop=None, d_rate=.5): """ Create a specified number of hidden layers for an RNN Optional: Adds regularization option, dropout layer to prevent potential overfitting if necessary """ model = Sequential() # Creating the specified number of hidden layers with the specified number of nodes for x in range(1,n_layers+1): model.add(LSTM(n_nodes, activation=activation, return_sequences=True)) # Adds a Dropout layer after every Nth hidden layer (the 'drop' variable) try: if x % drop == 0: model.add(Dropout(d_rate)) except: pass
[ 11748, 299, 32152, 355, 45941, 198, 6738, 41927, 292, 13, 27530, 1330, 24604, 1843, 198, 6738, 41927, 292, 13, 75, 6962, 1330, 406, 2257, 44, 11, 360, 1072, 11, 14258, 448, 198, 198, 4299, 38350, 62, 34409, 62, 43420, 7, 43420, 2599, ...
2.222892
830
import torch import torch.nn.functional as F self_attn_func = SelfAttnFunc.apply
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 628, 198, 198, 944, 62, 1078, 77, 62, 20786, 796, 12189, 8086, 77, 37, 19524, 13, 39014, 198 ]
2.896552
29
from datetime import datetime from app import db from app.utils import misc
[ 6738, 4818, 8079, 1330, 4818, 8079, 201, 198, 6738, 598, 1330, 20613, 201, 198, 6738, 598, 13, 26791, 1330, 12747, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198, 201, 198 ]
2.78125
32
import torch.nn as nn import torch from utils import Flatten , Unflatten , weights_init , down_conv , up_conv
[ 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 198, 6738, 3384, 4487, 1330, 1610, 41769, 837, 791, 2704, 41769, 837, 19590, 62, 15003, 837, 866, 62, 42946, 837, 510, 62, 42946 ]
3.30303
33
"""This module contains helper functions and utilities for nelpy.""" __all__ = ['spatial_information', 'frange', 'swap_cols', 'swap_rows', 'pairwise', 'is_sorted', 'linear_merge', 'PrettyDuration', 'ddt_asa', 'get_contiguous_segments', 'get_events_boundaries', 'get_threshold_crossing_epochs', '_bst_get_bins'] import numpy as np import logging from itertools import tee, repeat from collections import namedtuple from math import floor from scipy.signal import hilbert import scipy.ndimage.filters #import gaussian_filter1d, gaussian_filter from numpy import log, ceil import copy import sys import ctypes from multiprocessing import Array, cpu_count from multiprocessing.pool import Pool import pdb from . import core # so that core.RegularlySampledAnalogSignalArray is exposed from . import auxiliary # so that auxiliary.TuningCurve1D is epxosed from . import filtering from .utils_.decorators import keyword_deprecation # def sub2ind(array_shape, rows, cols): # ind = rows*array_shape[1] + cols # ind[ind < 0] = -1 # ind[ind >= array_shape[0]*array_shape[1]] = -1 # return ind # def ind2sub(array_shape, ind): # # see also np.unravel_index(ind, array.shape) # ind[ind < 0] = -1 # ind[ind >= array_shape[0]*array_shape[1]] = -1 # rows = (ind.astype('int') / array_shape[1]) # cols = ind % array_shape[1] # return (rows, cols) def ragged_array(arr): """Takes a list of arrays, and returns a ragged array. See https://github.com/numpy/numpy/issues/12468 """ n_elem = len(arr) out = np.array(n_elem*[None]) for ii in range(out.shape[0]): out[ii] = arr[ii] return out def asa_indices_within_epochs(asa, intervalarray): """Return indices of ASA within epochs. [[start, stop] ... [start, stop]] so that data can be associated with asa._data[:,start:stop] for each epoch. """ indices = [] intervalarray = intervalarray[asa.support] for interval in intervalarray.merge().data: a_start = interval[0] a_stop = interval[1] frm, to = np.searchsorted(asa._abscissa_vals, (a_start, a_stop)) indices.append((frm, to)) indices = np.array(indices, ndmin=2) return indices def frange(start, stop, step): """arange with floating point step""" # TODO: this function is not very general; we can extend it to work # for reverse (stop < start), empty, and default args, etc. # there are also many edge cases where this is weird. # see https://stackoverflow.com/questions/7267226/range-for-floats # for better alternatives. num_steps = int(np.floor((stop-start)/step)) return np.linspace(start, stop, num=num_steps, endpoint=False) def spatial_information(ratemap): """Compute the spatial information and firing sparsity... The specificity index examines the amount of information (in bits) that a single spike conveys about the animal's location (i.e., how well cell firing predicts the animal's location).The spatial information content of cell discharge was calculated using the formula: information content = \Sum P_i(R_i/R)log_2(R_i/R) where i is the bin number, P_i, is the probability for occupancy of bin i, R_i, is the mean firing rate for bin i, and R is the overall mean firing rate. In order to account for the effects of low firing rates (with fewer spikes there is a tendency toward higher information content) or random bursts of firing, the spike firing time-series was randomly offset in time from the rat location time-series, and the information content was calculated. A distribution of the information content based on 100 such random shifts was obtained and was used to compute a standardized score (Zscore) of information content for that cell. While the distribution is not composed of independent samples, it was nominally normally distributed, and a Z value of 2.29 was chosen as a cut-off for significance (the equivalent of a one-tailed t-test with P = 0.01 under a normal distribution). Reference(s) ------------ Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L., and Skaggs, W. E. (1994). "Spatial information content and reliability of hippocampal CA1 neurons: effects of visual input", Hippocampus, 4(4), 410-421. Parameters ---------- ratemap : array of shape (n_units, n_bins) Rate map in Hz. Returns ------- si : array of shape (n_units,) spatial information (in bits) per unit """ ratemap = copy.deepcopy(ratemap) # ensure that the ratemap always has nonzero firing rates, # otherwise the spatial information might return NaNs: bkg_rate = ratemap[ratemap>0].min() ratemap[ratemap < bkg_rate] = bkg_rate number_of_spatial_bins = np.prod(ratemap.shape[1:]) weight_per_bin = 1/number_of_spatial_bins Pi = 1 if len(ratemap.shape) == 3: # we have 2D tuning curve, (n_units, n_x, n_y) R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate Ri = np.transpose(ratemap, (2,1,0)) si = np.sum(np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1), axis=1) elif len(ratemap.shape) == 2: # we have 1D tuning curve, (n_units, n_x) R = ratemap.mean(axis=1) # mean firing rate Ri = ratemap.T si = np.sum((Pi*((Ri / R)*np.log2(Ri / R)).T), axis=1) else: raise TypeError("rate map shape not supported / understood!") return si/number_of_spatial_bins def spatial_sparsity(ratemap): """Compute the firing sparsity... The specificity index examines the amount of information (in bits) that a single spike conveys about the animal's location (i.e., how well cell firing predicts the animal's location).The spatial information content of cell discharge was calculated using the formula: information content = \Sum P_i(R_i/R)log_2(R_i/R) where i is the bin number, P_i, is the probability for occupancy of bin i, R_i, is the mean firing rate for bin i, and R is the overall mean firing rate. In order to account for the effects of low firing rates (with fewer spikes there is a tendency toward higher information content) or random bursts of firing, the spike firing time-series was randomly offset in time from the rat location time-series, and the information content was calculated. A distribution of the information content based on 100 such random shifts was obtained and was used to compute a standardized score (Zscore) of information content for that cell. While the distribution is not composed of independent samples, it was nominally normally distributed, and a Z value of 2.29 was chosen as a cut-off for significance (the equivalent of a one-tailed t-test with P = 0.01 under a normal distribution). Reference(s) ------------ Markus, E. J., Barnes, C. A., McNaughton, B. L., Gladden, V. L., and Skaggs, W. E. (1994). "Spatial information content and reliability of hippocampal CA1 neurons: effects of visual input", Hippocampus, 4(4), 410-421. Parameters ---------- occupancy : array of shape (n_bins,) Occupancy of the animal. ratemap : array of shape (n_units, n_bins) Rate map in Hz. Returns ------- si : array of shape (n_units,) spatial information (in bits) per unit sparsity: array of shape (n_units,) sparsity (in percent) for each unit """ number_of_spatial_bins = np.prod(ratemap.shape[1:]) weight_per_bin = 1/number_of_spatial_bins Pi = 1 if len(ratemap.shape) == 3: # we have 2D tuning curve, (n_units, n_x, n_y) R = ratemap.mean(axis=1).mean(axis=1) # mean firing rate Ri = ratemap sparsity = np.sum(np.sum((Ri*Pi), axis=1), axis=1)/(R**2) elif len(ratemap.shape) == 2: # we have 1D tuning curve, (n_units, n_x) R = ratemap.mean(axis=1) # mean firing rate Ri = ratemap.T sparsity = np.sum((Pi*Ri.T), axis=1)/(R**2) else: raise TypeError("rate map shape not supported / understood!") return sparsity/number_of_spatial_bins def _bst_get_bins_inside_interval(interval, ds, w=1): """(np.array) Return bin edges entirely contained inside an interval. Bin edges always start at interval.start, and continue for as many bins as would fit entirely inside the interval. NOTE 1: there are (n+1) bin edges associated with n bins. WARNING: if an interval is smaller than ds, then no bin will be associated with the particular interval. NOTE 2: nelpy uses half-open intervals [a,b), but if the bin width divides b-a, then the bins will cover the entire range. For example, if interval = [0,2) and ds = 1, then bins = [0,1,2], even though [0,2] is not contained in [0,2). There might be numerical precision deviations from this? Parameters ---------- interval : EpochArray EpochArray containing a single interval with a start, and stop ds : float Time bin width, in seconds. w : number of bins to use in a sliding window mode. Default is 1 (no sliding window). For example, 40 ms bins, with a stride of 5 ms, can be achieved by using (ds=0.005, w=8) For now, w has to be an integer, and therefore 5 second bins, with a stride of 2 seconds are not supported within this framework. Returns ------- bins : array Bin edges in an array of shape (n+1,) where n is the number of bins centers : array Bin centers in an array of shape (n,) where n is the number of bins """ if interval.length < ds: return None, None n_bins = int(np.floor(interval.length / ds)) # number of bins # linspace is better than arange for non-integral steps bins = np.linspace(interval.start, interval.start + n_bins*ds, n_bins+1) if w > 1: wn_bins = np.max((1, n_bins - w + 1)) wn_bins = bins[:wn_bins+1] + w/2*ds - ds/2 bins = wn_bins centers = bins[:-1] + (ds / 2) return bins, centers def _bst_get_bins(intervalArray, ds, w=1): """ Docstring goes here. TBD. For use with bins that are contained wholly inside the intervals. """ b = [] # bin list c = [] # centers list left_edges = [] right_edges = [] counter = 0 for interval in intervalArray: bins, centers = _bst_get_bins_inside_interval(interval=interval, ds=ds, w=w) if bins is not None: left_edges.append(counter) counter += len(centers) - 1 right_edges.append(counter) counter += 1 b.extend(bins.tolist()) c.extend(centers.tolist()) bins = np.array(b) bin_centers = np.array(c) le = np.array(left_edges) le = le[:, np.newaxis] re = np.array(right_edges) re = re[:, np.newaxis] binned_support = np.hstack((le, re)) lengths = np.atleast_1d((binned_support[:,1] - binned_support[:,0] + 1).squeeze()) support_starts = bins[np.insert(np.cumsum(lengths+1),0,0)[:-1]] support_stops = bins[np.insert(np.cumsum(lengths+1)-1,0,0)[1:]] supportdata = np.vstack([support_starts, support_stops]).T support = type(intervalArray)(supportdata) # set support to TRUE bin support return bins, bin_centers, binned_support, support def is_odd(n): """Returns True if n is odd, and False if n is even. Assumes integer. """ return bool(n & 1) def swap_cols(arr, frm, to): """swap columns of a 2D np.array""" if arr.ndim > 1: arr[:,[frm, to]] = arr[:,[to, frm]] else: arr[frm], arr[to] = arr[to], arr[frm] def swap_rows(arr, frm, to): """swap rows of a 2D np.array""" if arr.ndim > 1: arr[[frm, to],:] = arr[[to, frm],:] else: arr[frm], arr[to] = arr[to], arr[frm] def pairwise(iterable): """returns a zip of all neighboring pairs. This is used as a helper function for is_sorted. Example ------- >>> mylist = [2, 3, 6, 8, 7] >>> list(pairwise(mylist)) [(2, 3), (3, 6), (6, 8), (8, 7)] """ a, b = tee(iterable) next(b, None) return zip(a, b) def is_sorted_general(iterable, key=lambda a, b: a <= b): """Check to see if iterable is monotonic increasing (sorted).""" return all(key(a, b) for a, b in pairwise(iterable)) def is_sorted(x, chunk_size=None): """Returns True if iterable is monotonic increasing (sorted). NOTE: intended for 1D array, list or tuple. Will not work on more than 1D This function works in-core with memory footrpint XXX. chunk_size = 100000 is probably a good choice. """ if not isinstance(x, (tuple, list, np.ndarray)): raise TypeError("Unsupported type {}".format(type(x))) x = np.atleast_1d(np.array(x).squeeze()) if x.ndim > 1: raise ValueError("Input x must be 1-dimensional") if chunk_size is None: chunk_size = 500000 stop = x.size for chunk_start in range(0, stop, chunk_size): chunk_stop = int(min(stop, chunk_start + chunk_size + 1)) chunk = x[chunk_start:chunk_stop] if not np.all(chunk[:-1] <= chunk[1:]): return False return True def get_mua_events(mua, fs=None, minLength=None, maxLength=None, PrimaryThreshold=None, minThresholdLength=None, SecondaryThreshold=None): """Determine MUA/PBEs from multiunit activity. MUA : multiunit activity PBE : population burst event Parameters ---------- mua : AnalogSignalArray AnalogSignalArray with one signal, namely the multiunit firing rate [in Hz]. fs : float, optional Sampling frequency of mua, in Hz. If not specified, it will be inferred from mua.fs minLength : float, optional maxLength : float, optional PrimaryThreshold : float, optional SecondaryThreshold : float, optional minThresholdLength : float, optional Returns ------- mua_epochs : EpochArray EpochArray containing all the MUA events / PBEs. Example ------- mua = get_mua(spiketrain) mua_epochs = get_mua_events(mua) PBEs = get_PBEs(spiketrain, min_active=5) = get_PBEs(get_mua_events(get_mua(*)), spiketrain, min_active=5) """ if fs is None: fs = mua.fs if fs is None: raise ValueError("fs must either be specified, or must be contained in mua!") if PrimaryThreshold is None: PrimaryThreshold = mua.mean() + 3*mua.std() if SecondaryThreshold is None: SecondaryThreshold = mua.mean() if minLength is None: minLength = 0.050 # 50 ms minimum event duration if maxLength is None: maxLength = 0.750 # 750 ms maximum event duration if minThresholdLength is None: minThresholdLength = 0.0 # determine MUA event bounds: mua_bounds_idx, maxes, _ = get_events_boundaries( x = mua.data, PrimaryThreshold = PrimaryThreshold, SecondaryThreshold = SecondaryThreshold, minThresholdLength = minThresholdLength, minLength = minLength, maxLength = maxLength, ds = 1/fs ) if len(mua_bounds_idx) == 0: logging.warning("no mua events detected") return core.EpochArray(empty=True) # store MUA bounds in an EpochArray mua_epochs = core.EpochArray(mua.time[mua_bounds_idx]) return mua_epochs def get_contiguous_segments(data, *, step=None, assume_sorted=None, in_core=True, index=False, inclusive=False, fs=None, sort=None, in_memory=None): """Compute contiguous segments (seperated by step) in a list. Note! This function requires that a sorted list is passed. It first checks if the list is sorted O(n), and only sorts O(n log(n)) if necessary. But if you know that the list is already sorted, you can pass assume_sorted=True, in which case it will skip the O(n) check. Returns an array of size (n_segments, 2), with each row being of the form ([start, stop]) [inclusive, exclusive]. NOTE: when possible, use assume_sorted=True, and step=1 as explicit arguments to function call. WARNING! Step is robustly computed in-core (i.e., when in_core is True), but is assumed to be 1 when out-of-core. Example ------- >>> data = [1,2,3,4,10,11,12] >>> get_contiguous_segments(data) ([1,5], [10,13]) >>> get_contiguous_segments(data, index=True) ([0,4], [4,7]) Parameters ---------- data : array-like 1D array of sequential data, typically assumed to be integral (sample numbers). step : float, optional Expected step size for neighboring samples. Default uses numpy to find the median, but it is much faster and memory efficient to explicitly pass in step=1. assume_sorted : bool, optional If assume_sorted == True, then data is not inspected or re-ordered. This can be significantly faster, especially for out-of-core computation, but it should only be used when you are confident that the data is indeed sorted, otherwise the results from get_contiguous_segments will not be reliable. in_core : bool, optional If True, then we use np.diff which requires all the data to fit into memory simultaneously, otherwise we use groupby, which uses a generator to process potentially much larger chunks of data, but also much slower. index : bool, optional If True, the indices of segment boundaries will be returned. Otherwise, the segment boundaries will be returned in terms of the data itself. Default is False. inclusive : bool, optional If True, the boundaries are returned as [(inclusive idx, inclusive idx)] Default is False, and can only be used when index==True. Deprecated ---------- in_memory : bool, optional This is equivalent to the new 'in-core'. sort : bool, optional This is equivalent to the new 'assume_sorted' fs : sampling rate (Hz) used to extend half-open interval support by 1/fs """ # handle deprecated API calls: if in_memory: in_core = in_memory logging.warning("'in_memory' has been deprecated; use 'in_core' instead") if sort: assume_sorted = sort logging.warning("'sort' has been deprecated; use 'assume_sorted' instead") if fs: step = 1/fs logging.warning("'fs' has been deprecated; use 'step' instead") if inclusive: assert index, "option 'inclusive' can only be used with 'index=True'" if in_core: data = np.asarray(data) if not assume_sorted: if not is_sorted(data): data = np.sort(data) # algorithm assumes sorted list if step is None: step = np.median(np.diff(data)) # assuming that data(t1) is sampled somewhere on [t, t+1/fs) we have a 'continuous' signal as long as # data(t2 = t1+1/fs) is sampled somewhere on [t+1/fs, t+2/fs). In the most extreme case, it could happen # that t1 = t and t2 = t + 2/fs, i.e. a difference of 2 steps. if np.any(np.diff(data) < step): logging.warning("some steps in the data are smaller than the requested step size.") breaks = np.argwhere(np.diff(data)>=2*step) starts = np.insert(breaks+1, 0, 0) stops = np.append(breaks, len(data)-1) bdries = np.vstack((data[starts], data[stops] + step)).T if index: if inclusive: indices = np.vstack((starts, stops)).T else: indices = np.vstack((starts, stops + 1)).T return indices else: from itertools import groupby from operator import itemgetter if not assume_sorted: if not is_sorted(data): # data = np.sort(data) # algorithm assumes sorted list raise NotImplementedError("out-of-core sorting has not been implemented yet...") if step is None: step = 1 bdries = [] if not index: for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])): f = itemgetter(1) gen = (f(x) for x in g) start = next(gen) stop = start for stop in gen: pass bdries.append([start, stop + step]) else: counter = 0 for k, g in groupby(enumerate(data), lambda ix: (ix[0] - ix[1])): f = itemgetter(1) gen = (f(x) for x in g) _ = next(gen) start = counter stop = start for _ in gen: stop +=1 if inclusive: bdries.append([start, stop]) else: bdries.append([start, stop + 1]) counter = stop + 1 return np.asarray(bdries) def get_direction(asa, *, sigma=None): """Return epochs during which an animal was running left to right, or right to left. Parameters ---------- asa : AnalogSignalArray 1D AnalogSignalArray containing the 1D position data. sigma : float, optional Smoothing to apply to position (x) before computing gradient estimate. Default is 0. Returns ------- l2r, r2l : EpochArrays EpochArrays corresponding to left-to-right and right-to-left movement. """ if sigma is None: sigma = 0 if not isinstance(asa, core.AnalogSignalArray): raise TypeError('AnalogSignalArray expected!') assert asa.n_signals == 1, "1D AnalogSignalArray expected!" direction = dxdt_AnalogSignalArray(asa.smooth(sigma=sigma), rectify=False).data direction[direction>=0] = 1 direction[direction<0] = -1 direction = direction.squeeze() l2r = get_contiguous_segments(np.argwhere(direction>0).squeeze(), step=1) l2r[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive] l2r = core.EpochArray(asa.abscissa_vals[l2r]) r2l = get_contiguous_segments(np.argwhere(direction<0).squeeze(), step=1) r2l[:,1] -= 1 # change bounds from [inclusive, exclusive] to [inclusive, inclusive] r2l = core.EpochArray(asa.abscissa_vals[r2l]) return l2r, r2l def __radd__(self, other): """b + a""" return self.__add__(other) def __sub__(self, other): """a - b""" return PrettyDuration(self.duration - other) def __rsub__(self, other): """b - a""" return other - self.duration def __mul__(self, other): """a * b""" return PrettyDuration(self.duration * other) def __rmul__(self, other): """b * a""" return self.__mul__(other) def __truediv__(self, other): """a / b""" return PrettyDuration(self.duration / other) def shrinkMatColsTo(mat, numCols): """ Docstring goes here Shrinks a NxM1 matrix down to an NxM2 matrix, where M2 <= M1""" import scipy.ndimage numCells = mat.shape[0] numColsMat = mat.shape[1] a = np.zeros((numCells, numCols)) for row in np.arange(numCells): niurou = scipy.ndimage.interpolation.zoom(input=mat[row,:], zoom=(numCols/numColsMat), order = 1) a[row,:] = niurou return a def find_threshold_crossing_events(x, threshold, *, mode='above'): """Find threshold crossing events. INCLUSIVE Parameters ---------- x : numpy array Input data threshold : float The value whose crossing triggers an event mode : string, optional in ['above', 'below']; default 'above' event triggering above, or below threshold Returns ------- eventlist : list List containing the indices corresponding to threshold crossings eventmax : list List containing the maximum value of each event """ from itertools import groupby from operator import itemgetter if mode == 'below': cross_threshold = np.where(x <= threshold, 1, 0) elif mode == 'above': cross_threshold = np.where(x >= threshold, 1, 0) else: raise NotImplementedError( "mode {} not understood for find_threshold_crossing_events".format(str(mode))) eventlist = [] eventmax = [] for k,v in groupby(enumerate(cross_threshold),key=itemgetter(1)): if k: v = list(v) eventlist.append([v[0][0],v[-1][0]]) try : eventmax.append(x[v[0][0]:(v[-1][0]+1)].max()) except : print(v, x[v[0][0]:v[-1][0]]) eventmax = np.asarray(eventmax) eventlist = np.asarray(eventlist) return eventlist, eventmax def get_events_boundaries(x, *, PrimaryThreshold=None, SecondaryThreshold=None, minThresholdLength=None, minLength=None, maxLength=None, ds=None, mode='above'): """get event boundaries such that event.max >= PrimaryThreshold and the event extent is defined by SecondaryThreshold. Note that when PrimaryThreshold==SecondaryThreshold, then this is a simple threshold crossing algorithm. NB. minLength and maxLength are applied to the SecondaryThreshold events, whereas minThresholdLength is applied to the PrimaryThreshold events. Parameters ---------- x : numpy array Input data mode : string, optional in ['above', 'below']; default 'above' event triggering above, or below threshold PrimaryThreshold : float, optional If mode=='above', requires that event.max >= PrimaryThreshold If mode=='below', requires that event.min <= PrimaryThreshold SecondaryThreshold : float, optional The value that defines the event extent minThresholdLength : float, optional Minimum duration for which the PrimaryThreshold is crossed minLength : float, optional Minimum duration for which the SecondaryThreshold is crossed maxLength : float, optional Maximum duration for which the SecondaryThreshold is crossed ds : float, optional Time step of the input data x Returns ------- returns bounds, maxes, events where bounds <==> SecondaryThreshold to SecondaryThreshold, inclusive maxes <==> maximum value during each event events <==> PrimaryThreshold to PrimaryThreshold, inclusive """ # TODO: x must be a numpy array # TODO: ds is often used, but we have no default, and no check for when # it is left as None. # TODO: the Docstring should equally be improved. x = x.squeeze() if x.ndim > 1: raise TypeError("multidimensional arrays not supported!") if PrimaryThreshold is None: # by default, threshold is 3 SDs above mean of x PrimaryThreshold = np.mean(x) + 3*np.std(x) if SecondaryThreshold is None: # by default, revert back to mean of x SecondaryThreshold = np.mean(x) # + 0*np.std(x) events, _ = \ find_threshold_crossing_events(x=x, threshold=PrimaryThreshold, mode=mode) # apply minThresholdLength criterion: if minThresholdLength is not None and len(events) > 0: durations = (events[:,1] - events[:,0] + 1) * ds events = events[[durations >= minThresholdLength]] if len(events) == 0: bounds, maxes, events = [], [], [] logging.warning("no events satisfied criteria") return bounds, maxes, events # Find periods where value is > SecondaryThreshold; note that the previous periods should be within these! if mode == 'above': assert SecondaryThreshold <= PrimaryThreshold, \ "Secondary Threshold by definition should include more data than Primary Threshold" elif mode == 'below': assert SecondaryThreshold >= PrimaryThreshold, \ "Secondary Threshold by definition should include more data than Primary Threshold" else: raise NotImplementedError( "mode {} not understood for find_threshold_crossing_events".format(str(mode))) bounds, broader_maxes = \ find_threshold_crossing_events(x=x, threshold=SecondaryThreshold, mode=mode) # Find corresponding big windows for potential events # Specifically, look for closest left edge that is just smaller outer_boundary_indices = np.searchsorted(bounds[:,0], events[:,0], side='right') # searchsorted finds the index after, so subtract one to get index before outer_boundary_indices = outer_boundary_indices - 1 # Find extended boundaries for events by pairing to larger windows # (Note that there may be repeats if the larger window contains multiple > 3SD sections) bounds = bounds[outer_boundary_indices,:] maxes = broader_maxes[outer_boundary_indices] if minLength is not None and len(events) > 0: durations = (bounds[:,1] - bounds[:,0] + 1) * ds # TODO: refactor [durations <= maxLength] but be careful about edge cases bounds = bounds[[durations >= minLength]] maxes = maxes[[durations >= minLength]] events = events[[durations >= minLength]] if maxLength is not None and len(events) > 0: durations = (bounds[:,1] - bounds[:,0] + 1) * ds # TODO: refactor [durations <= maxLength] but be careful about edge cases bounds = bounds[[durations <= maxLength]] maxes = maxes[[durations <= maxLength]] events = events[[durations <= maxLength]] if len(events) == 0: bounds, maxes, events = [], [], [] logging.warning("no events satisfied criteria") return bounds, maxes, events # Now, since all that we care about are the larger windows, so we should get rid of repeats _, unique_idx = np.unique(bounds[:,0], return_index=True) bounds = bounds[unique_idx,:] # SecondaryThreshold to SecondaryThreshold maxes = maxes[unique_idx] # maximum value during event events = events[unique_idx,:] # PrimaryThreshold to PrimaryThreshold return bounds, maxes, events def signal_envelope1D(data, *, sigma=None, fs=None): logging.warnings("'signal_envelope1D' is deprecated; use 'signal_envelope_1d' instead!") return signal_envelope_1d(data, sigma=sigma, fs=fs) def signal_envelope_1d(data, *, sigma=None, fs=None): """Finds the signal envelope by taking the absolute value of the Hilbert transform Parameters ---------- data : numpy array, list, or RegularlySampledAnalogSignalArray Input data If data is a numpy array, it is expected to have shape (n_signals, n_samples) If data is a list, it is expected to have length n_signals, where each sublist has length n_samples, i.e. data is not jagged sigma : float, optional Standard deviation of the Gaussian kernel used to smooth the envelope after applying the Hilbert transform. Units of seconds. Default is 4 ms fs : float, optional Sampling rate of the signal Returns ------- out : same type as the input object An object containing the signal envelope TODO: this is not yet epoch-aware! UPDATE: this is actually epoch-aware by now! """ if sigma is None: sigma = 0.004 # 4 ms standard deviation if fs is None: if isinstance(data, (np.ndarray, list)): raise ValueError("sampling frequency must be specified!") elif isinstance(data, core.RegularlySampledAnalogSignalArray): fs = data.fs if isinstance(data, (np.ndarray, list)): data_array = np.array(data) n_dims = np.array(data).ndim assert n_dims <= 2, "Only 1D signals supported!" if n_dims == 1: input_data = data_array.reshape((1, data_array.size)) else: input_data = data_array n_signals, n_samples = input_data.shape # Compute number of samples to compute fast FFTs padlen = nextfastpower(n_samples) - n_samples # Pad data paddeddata = np.hstack( (input_data, np.zeros((n_signals, padlen))) ) # Use hilbert transform to get an envelope envelope = np.absolute(hilbert(paddeddata, axis=-1)) # free up memory del paddeddata # Truncate results back to original length envelope = envelope[..., :n_samples] if sigma: # Smooth envelope with a gaussian (sigma = 4 ms default) EnvelopeSmoothingSD = sigma*fs smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD, mode='constant', axis=-1) envelope = smoothed_envelope if isinstance(data, list): envelope = envelope.tolist() return envelope elif isinstance(data, core.RegularlySampledAnalogSignalArray): # Only ASA data of shape (n_signals, n_timepoints) -> 2D currently supported assert data.data.ndim == 2 cum_lengths = np.insert(np.cumsum(data.lengths), 0, 0) newasa = data.copy() # for segment in data: for idx in range(data.n_epochs): # print('hilberting epoch {}/{}'.format(idx+1, data.n_epochs)) segment_data = data._data[:,cum_lengths[idx]:cum_lengths[idx+1]] n_signals, n_samples = segment_data.shape # Compute number of samples to compute fast FFTs: padlen = nextfastpower(n_samples) - n_samples # Pad data paddeddata = np.hstack( (segment_data, np.zeros((n_signals, padlen))) ) # Use hilbert transform to get an envelope envelope = np.absolute(hilbert(paddeddata, axis=-1)) # free up memory del paddeddata # Truncate results back to original length envelope = envelope[..., :n_samples] if sigma: # Smooth envelope with a gaussian (sigma = 4 ms default) EnvelopeSmoothingSD = sigma*fs smoothed_envelope = scipy.ndimage.filters.gaussian_filter1d(envelope, EnvelopeSmoothingSD, mode='constant', axis=-1) envelope = smoothed_envelope newasa._data[:,cum_lengths[idx]:cum_lengths[idx+1]] = np.atleast_2d(envelope) return newasa def nextpower(n, base=2.0): """Return the next integral power of two greater than the given number. Specifically, return m such that m >= n m == 2**x where x is an integer. Use base argument to specify a base other than 2. This is useful for ensuring fast FFT sizes. From https://gist.github.com/bhawkins/4479607 (Brian Hawkins) """ x = base**ceil (log (n) / log (base)) if type(n) == np.ndarray: return np.asarray (x, dtype=int) else: return int (x) def nextfastpower(n): """Return the next integral power of small factors greater than the given number. Specifically, return m such that m >= n m == 2**x * 3**y * 5**z where x, y, and z are integers. This is useful for ensuring fast FFT sizes. From https://gist.github.com/bhawkins/4479607 (Brian Hawkins) See also http://scipy.github.io/devdocs/generated/scipy.fftpack.next_fast_len.html """ if n < 7: return max (n, 1) # x, y, and z are all bounded from above by the formula of nextpower. # Compute all possible combinations for powers of 3 and 5. # (Not too many for reasonable FFT sizes.) n35 = np.outer (power_series (n, 3.0), power_series (n, 5.0)) n35 = n35[n35<=n] # Lump the powers of 3 and 5 together and solve for the powers of 2. n2 = nextpower (n / n35) return int (min (n2 * n35)) def get_threshold_crossing_epochs(asa, t1=None, t2=None, mode='above'): """Return epochs where a signal crosses a compound threshold specified by t1 and t2. Parameters ---------- asa : AnalogSignalArray AnalogSignalArray containing a single channel t1 : float, optional Primary threshold. Minimum signal value that has to be reached / exceeded during an event. Default is 3 standard deviations above signal mean. t2 : float, optional Secondary threshold. Signal value that defines the event boundaries. Default is signal mean. mode : string, optional Mode of operation. One of ['above', 'below']. If 'above', then return epochs where the signal exceeds the compound threshold, and if 'below', then return epochs where the signal falls below the compound threshold. Default is 'above'. Returns ------- epochs : EpochArray EpochArray with all the epochs where the signal satisfied the criteria. """ if asa.n_signals > 1: raise TypeError("multidimensional AnalogSignalArrays not supported!") x = asa.data.squeeze() if t1 is None: # by default, threshold is 3 SDs above mean of x t1 = np.mean(x) + 3*np.std(x) if t2 is None: # by default, revert back to mean of x t2 = np.mean(x) # compute periods where signal exceeds compound threshold epoch_bounds, _, _ = get_events_boundaries( x=x, PrimaryThreshold=t1, SecondaryThreshold=t2, mode=mode ) # convert bounds to time in seconds epoch_bounds = asa.time[epoch_bounds] if len(epoch_bounds) == 0: return type(asa._abscissa.support)(empty=True) # add 1/fs to stops for open interval epoch_bounds[:,1] += 1/asa.fs # create EpochArray with threshould exceeding bounds epochs = type(asa._abscissa.support)(epoch_bounds) return epochs def get_run_epochs(speed, v1=10, v2=8): """Return epochs where animal is running at least as fast as specified by v1 and v2. Parameters ---------- speed : AnalogSignalArray AnalogSignalArray containing single channel speed, in units/sec v1 : float, optional Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec] v2 : float, optional Speed that defines the event boundaries. Default is 8 [units/sec] Returns ------- run_epochs : EpochArray EpochArray with all the epochs where speed satisfied the criteria. """ run_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='above') return run_epochs def get_inactive_epochs(speed, v1=5, v2=7): """Return epochs where animal is running no faster than specified by v1 and v2. Parameters ---------- speed : AnalogSignalArray AnalogSignalArray containing single channel speed, in units/sec v1 : float, optional Minimum speed (in same units as speed) that has to be reached / exceeded during an event. Default is 10 [units/sec] v2 : float, optional Speed that defines the event boundaries. Default is 8 [units/sec] Returns ------- inactive_epochs : EpochArray EpochArray with all the epochs where speed satisfied the criteria. """ inactive_epochs = get_threshold_crossing_epochs(asa=speed, t1=v1, t2=v2, mode='below') return inactive_epochs def spiketrain_union(st1, st2): """Join two spiketrains together. WARNING! This function should be improved a lot! """ assert st1.n_units == st2.n_units support = st1.support.join(st2.support) newdata = [] for unit in range(st1.n_units): newdata.append(np.append(st1.time[unit], st2.time[unit])) fs = None if st1.fs == st2.fs: fs = st1.fs return core.SpikeTrainArray(newdata, support=support, fs=fs) ######################################################################## # uncurated below this line! ######################################################################## def find_nearest_idx(array, val): """Finds nearest index in array to value. Parameters ---------- array : np.array val : float Returns ------- Index into array that is closest to val TODO: this is a better version that should be incorporated: # Based on answer here: http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array def find_nearest(array,values): right_idxs = np.searchsorted(array, values, side="left") left_idxs = np.where(right_idxs > 0, right_idxs-1, right_idxs) right_idxs = np.where(right_idxs == len(array), len(array)-1, right_idxs) closest_idx = np.where(np.abs(values - array[right_idxs]) < np.abs(values - array[left_idxs]), right_idxs, left_idxs) return closest_idx """ return (np.abs(array-val)).argmin() def find_nearest_indices(array, vals): """Finds nearest index in array to value. Parameters ---------- array : np.array This is the array you wish to index into. vals : np.array This is the array that you are getting your indices from. Returns ------- Indices into array that is closest to vals. Notes ----- Wrapper around find_nearest_idx(). """ return np.array([find_nearest_idx(array, val) for val in vals], dtype=int) def get_sort_idx(tuning_curves): """Finds indices to sort neurons by max firing in tuning curve. Parameters ---------- tuning_curves : list of lists Where each inner list is the tuning curves for an individual neuron. Returns ------- sorted_idx : list List of integers that correspond to the neuron in sorted order. """ tc_max_loc = [] for i, neuron_tc in enumerate(tuning_curves): tc_max_loc.append((i, np.where(neuron_tc == np.max(neuron_tc))[0][0])) sorted_by_tc = sorted(tc_max_loc, key=lambda x: x[1]) sorted_idx = [] for idx in sorted_by_tc: sorted_idx.append(idx[0]) return sorted_idx def collapse_time(obj, gap=0): """Collapse all epochs in a SpikeTrainArray and collapse them into a single, contiguous SpikeTrainArray""" # TODO: redo SpikeTrainArray so as to keep the epochs separate!, and to support gaps! # We'll have to ajust all the spikes per epoch... and we'll have to compute a new support. Also set a flag! # If it's a SpikeTrainArray, then we left-shift the spike times. If it's an AnalogSignalArray, then we # left-shift the time and tdata. # Also set a new attribute, with the boundaries in seconds. if isinstance(obj, core.RegularlySampledAnalogSignalArray): new_obj = type(obj)(empty=True) new_obj._data = obj._data durations = obj.support.durations starts = np.insert(np.cumsum(durations + gap),0,0)[:-1] stops = starts + durations newsupport = type(obj._abscissa.support)(np.vstack((starts, stops)).T) new_obj._support = newsupport new_time = obj.time.astype(float) # fast copy time_idx = np.insert(np.cumsum(obj.lengths),0,0) new_offset = 0 for epidx in range(obj.n_epochs): if epidx > 0: new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset + gap new_offset += durations[epidx] + gap else: new_time[time_idx[epidx]:time_idx[epidx+1]] = new_time[time_idx[epidx]:time_idx[epidx+1]] - obj.time[time_idx[epidx]] + new_offset new_offset += durations[epidx] new_obj._time = new_time new_obj._fs = obj._fs elif isinstance(obj, core.EventArray): if gap > 0: raise ValueError("gaps not supported for SpikeTrainArrays yet!") new_obj = type(obj)(empty=True) new_time = [[] for _ in range(obj.n_series)] duration = 0 for st_ in obj: le = st_.support.start for unit_ in range(obj.n_series): new_time[unit_].extend(st_._data[unit_] - le + duration) duration += st_.support.duration new_time = np.asanyarray([np.asanyarray(unittime) for unittime in new_time]) new_obj._data = new_time new_obj.support = type(obj._abscissa.support)([0, duration]) new_obj._series_ids = obj._series_ids new_obj._series_labels = obj._series_labels new_obj._series_tags = obj._series_tags elif isinstance(obj, core.BinnedEventArray): raise NotImplementedError("BinnedEventArrays are not yet supported, but bst.data is essentially already collapsed!") else: raise TypeError("unsupported type for collapse_time") return new_obj def cartesian(xcenters, ycenters): """Finds every combination of elements in two arrays. Parameters ---------- xcenters : np.array ycenters : np.array Returns ------- cartesian : np.array With shape(n_sample, 2). """ return np.transpose([np.tile(xcenters, len(ycenters)), np.repeat(ycenters, len(xcenters))])
[ 37811, 1212, 8265, 4909, 31904, 5499, 290, 20081, 329, 299, 417, 9078, 526, 15931, 198, 198, 834, 439, 834, 796, 37250, 2777, 34961, 62, 17018, 3256, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 705, 8310, 858, 3256, 198, ...
2.436577
18,968
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import logging import numpy as np import sys import os import paddle from paddle.fluid import dygraph, core, framework from paddle.fluid.executor import Executor from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn import Linear, Conv2D, Conv2DTranspose, MaxPool2D, MaxPool1D, BatchNorm1D, BatchNorm2D, BatchNorm3D from paddle.fluid.dygraph.nn import BatchNorm, Pool2D from paddle.fluid.io import load_inference_model, save_inference_model from paddle.nn.layer.activation import ReLU, LeakyReLU, Sigmoid, ReLU6, Tanh, Softmax, PReLU, Swish from paddle.fluid.log_helper import get_logger from . import quant_nn from .. import quantization_pass __all__ = ['ImperativeQuantAware', 'ImperativeCalcOutScale'] _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s') _op_real_in_out_name = { "conv2d": [["Input", "Filter"], ["Output"]], "conv2d_transpose": [["Input", "Filter"], ["Output"]], "pool2d": [["X"], ["Out"]], "elementwise_add": [["X", "Y"], ["Out"]], "softmax": [["X"], ["Out"]], "relu": [["X"], ["Out"]], "relu6": [["X"], ["Out"]], "leaky_relu": [["X"], ["Out"]], "prelu": [["X"], ["Out"]], "tanh": [["X"], ["Out"]], "batch_norm": [["X"], ["Y"]], "sigmoid": [["X"], ["Out"]], "swish": [["X"], ["Out"]], }
[ 2, 220, 220, 15069, 357, 66, 8, 12131, 350, 37382, 47, 37382, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, ...
2.780591
711
import pandas as pd import sqlite3 from pandas import DataFrame n_conn = sqlite3.connect('northwind_small.sqlite3') n_curs = n_conn.cursor() # What are the ten most expensive items (per unit price) in the database? query = """ SELECT ProductName, UnitPrice FROM Product ORDER BY UnitPrice DESC LIMIT 10 """ n_curs.execute(query) print(n_curs.fetchall()) # What is the average age of an employee at the time of their hiring? (Hint: a # lot of arithmetic works with dates.) query = """ SELECT AVG(HireDate-BirthDate) FROM Employee """ n_curs.execute(query) print(n_curs.fetchall()) # answer: 37.22 # (*Stretch*) How does the average age of employee at hire vary by city? query = """SELECT City, AVG(HireDate-BirthDate) FROM Employee GROUP BY City """ n_curs.execute(query) print(n_curs.fetchall()) # What are the ten most expensive items (per unit price) # in the database *and* their suppliers? query = """ SELECT ProductName, UnitPrice, CompanyName FROM Product as p JOIN Supplier as s ON p.SupplierID = s.ID ORDER BY UnitPrice DESC LIMIT 10 """ n_curs.execute(query) print(n_curs.fetchall()) # What is the largest category (by number of unique products in it)? query = """ SELECT CategoryName, COUNT(CategoryName) FROM Category as c JOIN Product as p ON c.ID=p.CategoryID GROUP BY CategoryName ORDER by COUNT(CategoryName) DESC """ n_curs.execute(query) print(n_curs.fetchall()) # largest category is Confections 13 # (*Stretch*) Who's the employee with the most territories? Use `TerritoryId` # (not name, region, or other fields) as the unique identifier for territories. # EMPLOYEE ID 7 query = """ SELECT EmployeeId, TerritoryId, COUNT(DISTINCT TerritoryId) FROM EmployeeTerritory GROUP BY EmployeeId ORDER BY COUNT(DISTINCT TerritoryId) DESC """ n_curs.execute(query) print(n_curs.fetchall())
[ 11748, 19798, 292, 355, 279, 67, 198, 11748, 44161, 578, 18, 198, 6738, 19798, 292, 1330, 6060, 19778, 220, 198, 198, 77, 62, 37043, 796, 44161, 578, 18, 13, 8443, 10786, 43588, 7972, 62, 17470, 13, 25410, 578, 18, 11537, 198, 77, 6...
2.990115
607
from django.contrib import admin from django.urls import path from .models import BookLoan, Library from .views import CustomView
[ 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 198, 198, 6738, 764, 27530, 1330, 4897, 43, 24611, 11, 10074, 198, 6738, 764, 33571, 1330, 8562, 7680, 628, 628 ]
3.621622
37
import os import datetime import logging import json import uuid from installed_clients.WorkspaceClient import Workspace as Workspace from installed_clients.KBaseReportClient import KBaseReport from installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api import MergeMetabolicAnnotations.utils.functions as f
[ 11748, 28686, 198, 11748, 4818, 8079, 198, 11748, 18931, 198, 11748, 33918, 198, 11748, 334, 27112, 198, 198, 6738, 6589, 62, 565, 2334, 13, 23044, 10223, 11792, 1330, 10933, 10223, 355, 10933, 10223, 198, 6738, 6589, 62, 565, 2334, 13, ...
3.931034
87
from models.Model import Player, Group, Session, engine
[ 6738, 4981, 13, 17633, 1330, 7853, 11, 4912, 11, 23575, 11, 3113, 198 ]
4.307692
13
from unittest.mock import Mock, patch import numpy as np from game.models import ValuePolicyModel
[ 6738, 555, 715, 395, 13, 76, 735, 1330, 44123, 11, 8529, 198, 198, 11748, 299, 32152, 355, 45941, 198, 198, 6738, 983, 13, 27530, 1330, 11052, 36727, 17633, 628 ]
3.482759
29
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html from scrapy import Item, Field def main(): item = TopicItem() pass if __name__ == '__main__': main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 2896, 500, 994, 262, 4981, 329, 534, 15881, 276, 3709, 198, 2, 198, 2, 4091, 10314, 287, 25, 198, 2, 2638, 1378, 15390, 13, 1416, 2416, 88, 13, 2398, 14, 2...
2.601942
103
import json import logging import dateutil.parser from datetime import datetime # Our imports from emission.core.get_database import get_profile_db, get_client_db, get_pending_signup_db import emission.clients.common
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 3128, 22602, 13, 48610, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 2, 3954, 17944, 198, 6738, 25592, 13, 7295, 13, 1136, 62, 48806, 1330, 651, 62, 13317, 62, 9945, 11, 651, 62, 1636...
3.460317
63
import setuptools setuptools.setup(name='advent_of_code')
[ 11748, 900, 37623, 10141, 198, 2617, 37623, 10141, 13, 40406, 7, 3672, 11639, 324, 1151, 62, 1659, 62, 8189, 11537 ]
2.85
20
#!/usr/bin/env python3 """ sanity check script """ import vpp_papi
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 37811, 34182, 2198, 4226, 37227, 198, 11748, 410, 381, 62, 79, 15042, 198 ]
2.833333
24
#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: from __future__ import print_function # Python 2/3 compatibility __doc__ = """ Examples of design matrices specification and and computation (event-related design, FIR design, etc) Requires matplotlib Author : Bertrand Thirion: 2009-2010 """ print(__doc__) import numpy as np try: import matplotlib.pyplot as plt except ImportError: raise RuntimeError("This script needs the matplotlib library") from nipy.modalities.fmri.design_matrix import make_dmtx from nipy.modalities.fmri.experimental_paradigm import (EventRelatedParadigm, BlockParadigm) # frame times tr = 1.0 nscans = 128 frametimes = np.linspace(0, (nscans - 1) * tr, nscans) # experimental paradigm conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c3', 'c3', 'c3'] onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60] hrf_model = 'canonical' motion = np.cumsum(np.random.randn(128, 6), 0) add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz'] #event-related design matrix paradigm = EventRelatedParadigm(conditions, onsets) X1 = make_dmtx( frametimes, paradigm, drift_model='polynomial', drift_order=3, add_regs=motion, add_reg_names=add_reg_names) # block design matrix duration = 7 * np.ones(9) paradigm = BlockParadigm(con_id=conditions, onset=onsets, duration=duration) X2 = make_dmtx(frametimes, paradigm, drift_model='polynomial', drift_order=3) # FIR model paradigm = EventRelatedParadigm(conditions, onsets) hrf_model = 'FIR' X3 = make_dmtx(frametimes, paradigm, hrf_model='fir', drift_model='polynomial', drift_order=3, fir_delays=np.arange(1, 6)) # plot the results fig = plt.figure(figsize=(10, 6)) ax = plt.subplot(1, 3, 1) X1.show(ax=ax) ax.set_title('Event-related design matrix', fontsize=12) ax = plt.subplot(1, 3, 2) X2.show(ax=ax) ax.set_title('Block design matrix', fontsize=12) ax = plt.subplot(1, 3, 3) X3.show(ax=ax) ax.set_title('FIR design matrix', fontsize=12) plt.subplots_adjust(top=0.9, bottom=0.25) plt.show()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 795, 16436, 25, 532, 9, 12, 4235, 25, 21015, 26, 12972, 12, 521, 298, 12, 28968, 25, 604, 26, 33793, 12, 8658, 82, 12, 14171, 25, 18038, 532, 9, 12, 198, 2, 25357, 25, 900, ...
2.328374
941
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import defaultdict from typing import List, Optional, Tuple from unittest.mock import MagicMock, call, patch from fbpcp.entity.container_instance import ContainerInstance, ContainerInstanceStatus from fbpcp.service.mpc import MPCInstanceStatus, MPCParty, MPCService from fbpcp.service.onedocker import OneDockerService from fbpcs.common.entity.pcs_mpc_instance import PCSMPCInstance from fbpcs.data_processing.lift_id_combiner.lift_id_spine_combiner_cpp import ( CppLiftIdSpineCombinerService, ) from fbpcs.data_processing.sharding.sharding_cpp import CppShardingService from fbpcs.onedocker_binary_config import OneDockerBinaryConfig from fbpcs.onedocker_binary_names import OneDockerBinaryNames from fbpcs.onedocker_service_config import OneDockerServiceConfig from fbpcs.pcf.tests.async_utils import to_sync from fbpcs.pid.entity.pid_instance import ( PIDInstance, PIDInstanceStatus, PIDProtocol, PIDRole, ) from fbpcs.pid.service.pid_service.pid import PIDService from fbpcs.private_computation.entity.private_computation_instance import ( PrivateComputationGameType, PrivateComputationInstance, PrivateComputationInstanceStatus, PrivateComputationRole, UnionedPCInstance, ) from fbpcs.private_computation.entity.private_computation_stage_type import ( PrivateComputationStageType, ) from fbpcs.private_computation.repository.private_computation_game import GameNames from fbpcs.private_computation.service.errors import ( PrivateComputationServiceValidationError, ) from fbpcs.private_computation.service.private_computation import ( PrivateComputationService, NUM_NEW_SHARDS_PER_FILE, DEFAULT_K_ANONYMITY_THRESHOLD, ) from fbpcs.private_computation.service.private_computation_stage_service import ( PrivateComputationStageService, ) # TODO T94666166: libfb won't work in OSS from libfb.py.asyncio.mock import AsyncMock from libfb.py.testutil import data_provider from fbpcs.private_computation.service.utils import ( create_and_start_mpc_instance, gen_mpc_game_args_to_retry, map_private_computation_role_to_mpc_party, DEFAULT_CONTAINER_TIMEOUT_IN_SEC, ) def test_map_private_computation_role_to_mpc_party(self): self.assertEqual( MPCParty.SERVER, map_private_computation_role_to_mpc_party(PrivateComputationRole.PUBLISHER), ) self.assertEqual( MPCParty.CLIENT, map_private_computation_role_to_mpc_party(PrivateComputationRole.PARTNER), ) def test_get_status_from_stage(self): # Test get status from an MPC stage mpc_instance = PCSMPCInstance.create_instance( instance_id="test_mpc_id", game_name=GameNames.SHARD_AGGREGATOR.value, mpc_party=MPCParty.SERVER, num_workers=2, status=MPCInstanceStatus.FAILED, ) self.assertEqual( PrivateComputationInstanceStatus.AGGREGATION_FAILED, self.private_computation_service._get_status_from_stage(mpc_instance), ) # Test get status from the PID stage pid_instance = PIDInstance( instance_id="test_pid_id", protocol=PIDProtocol.UNION_PID, pid_role=PIDRole.PUBLISHER, num_shards=4, input_path="input", output_path="output", stages_containers={}, stages_status={}, status=PIDInstanceStatus.COMPLETED, ) self.assertEqual( PrivateComputationInstanceStatus.ID_MATCHING_COMPLETED, self.private_computation_service._get_status_from_stage(pid_instance), ) def test_prepare_data(self): private_computation_instance = self.create_sample_instance( status=PrivateComputationInstanceStatus.CREATED, ) self.private_computation_service.instance_repository.read = MagicMock( return_value=private_computation_instance ) with patch.object( CppLiftIdSpineCombinerService, "combine_on_container_async", ) as mock_combine, patch.object( CppShardingService, "shard_on_container_async", ) as mock_shard: # call prepare_data self.private_computation_service.prepare_data( instance_id=self.test_private_computation_id, dry_run=True, ) binary_config = self.onedocker_binary_config_map[ OneDockerBinaryNames.LIFT_ID_SPINE_COMBINER.value ] mock_combine.assert_called_once_with( spine_path=private_computation_instance.pid_stage_output_spine_path, data_path=private_computation_instance.pid_stage_output_data_path, output_path=private_computation_instance.data_processing_output_path + "_combine", num_shards=self.test_num_containers, onedocker_svc=self.onedocker_service, binary_version=binary_config.binary_version, tmp_directory=binary_config.tmp_directory, ) mock_shard.assert_called() def test_prepare_data_tasks_skipped(self): private_computation_instance = self.create_sample_instance( status=PrivateComputationInstanceStatus.COMPUTATION_FAILED, ) private_computation_instance.partial_container_retry_enabled = True self.private_computation_service.instance_repository.read = MagicMock( return_value=private_computation_instance ) with patch.object( CppLiftIdSpineCombinerService, "combine_on_container_async", ) as mock_combine, patch.object( CppShardingService, "shard_on_container_async", ) as mock_shard: # call prepare_data self.private_computation_service.prepare_data( instance_id=self.test_private_computation_id, ) # expect combining and sharding skipped because this private_computation_instance has # status PrivateComputationInstanceStatus.COMPUTATION_FAILED, so this run # is to recover from a previous compute metrics failure, meaning data # preparation should have been done mock_combine.assert_not_called() mock_shard.assert_not_called() def test_validate_metrics_results_doesnt_match(self): self.private_computation_service.pid_svc.storage_svc.read = MagicMock() self.private_computation_service.pid_svc.storage_svc.read.side_effect = [ '{"subGroupMetrics":[],"metrics":{"controlClicks":1,"testSpend":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}', '{"subGroupMetrics":[],"metrics":{"testSpend":0,"controlClicks":0,"controlImpressions":0,"testImpressions":0,"controlMatchCount":0,"testMatchCount":0,"controlNumConvSquared":0,"testNumConvSquared":0,"testValueSquared":0,"controlValue":0,"testValue":0,"testConverters":0,"testConversions":0,"testPopulation":0,"controlClickers":0,"testClickers":0,"controlReach":0,"testReach":0,"controlSpend":0,"testClicks":0,"controlValueSquared":0,"controlConverters":0,"controlConversions":0,"controlPopulation":0}}', ] with self.assertRaises(PrivateComputationServiceValidationError): self.private_computation_service.validate_metrics( instance_id="test_id", aggregated_result_path="aggregated_result_path", expected_result_path="expected_result_path", ) def test_cancel_current_stage(self): test_mpc_id = self.test_private_computation_id + "_compute_metrics" test_game_name = GameNames.LIFT.value test_mpc_party = MPCParty.CLIENT # prepare the pl instance that will be read in to memory from the repository # at the beginning of the cancel_current_stage function mpc_instance_started = PCSMPCInstance.create_instance( instance_id=test_mpc_id, game_name=test_game_name, mpc_party=test_mpc_party, num_workers=self.test_num_containers, status=MPCInstanceStatus.STARTED, ) private_computation_instance = self.create_sample_instance( status=PrivateComputationInstanceStatus.COMPUTATION_STARTED, role=PrivateComputationRole.PARTNER, instances=[mpc_instance_started], ) self.private_computation_service.instance_repository.read = MagicMock( return_value=private_computation_instance ) # prepare the mpc instance that's returned from mpc_service.stop_instance() mpc_instance_canceled = PCSMPCInstance.create_instance( instance_id=test_mpc_id, game_name=test_game_name, mpc_party=test_mpc_party, num_workers=self.test_num_containers, status=MPCInstanceStatus.CANCELED, ) self.private_computation_service.mpc_svc.stop_instance = MagicMock( return_value=mpc_instance_canceled ) self.private_computation_service.mpc_svc.instance_repository.read = MagicMock( return_value=mpc_instance_canceled ) # call cancel, expect no exception private_computation_instance = ( self.private_computation_service.cancel_current_stage( instance_id=self.test_private_computation_id, ) ) # assert the pl instance returned has the correct status self.assertEqual( PrivateComputationInstanceStatus.COMPUTATION_FAILED, private_computation_instance.status, ) def test_gen_game_args_to_retry(self): test_input = "test_input_retry" mpc_instance = PCSMPCInstance.create_instance( instance_id="mpc_instance", game_name=GameNames.LIFT.value, mpc_party=MPCParty.SERVER, num_workers=2, status=MPCInstanceStatus.FAILED, containers=[ ContainerInstance( instance_id="container_instance_0", status=ContainerInstanceStatus.FAILED, ), ContainerInstance( instance_id="container_instance_1", status=ContainerInstanceStatus.COMPLETED, ), ], game_args=[ { "input_filenames": test_input, }, { "input_filenames": "input_filenames", }, ], ) private_computation_instance = self.create_sample_instance( status=PrivateComputationInstanceStatus.COMPUTATION_FAILED, instances=[mpc_instance], ) game_args = gen_mpc_game_args_to_retry( private_computation_instance ) self.assertEqual(1, len(game_args)) # only 1 failed container self.assertEqual(test_input, game_args[0]["input_filenames"]) def create_sample_instance( self, status: PrivateComputationInstanceStatus, role: PrivateComputationRole = PrivateComputationRole.PUBLISHER, instances: Optional[List[UnionedPCInstance]] = None, ) -> PrivateComputationInstance: return PrivateComputationInstance( instance_id=self.test_private_computation_id, role=role, instances=instances or [], status=status, status_update_ts=1600000000, num_pid_containers=self.test_num_containers, num_mpc_containers=self.test_num_containers, concurrency=self.test_concurrency, num_files_per_mpc_container=NUM_NEW_SHARDS_PER_FILE, game_type=PrivateComputationGameType.LIFT, input_path=self.test_input_path, output_dir=self.test_output_dir, fail_fast=True, k_anonymity_threshold=DEFAULT_K_ANONYMITY_THRESHOLD, )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 2, 15069, 357, 66, 8, 3203, 11, 3457, 13, 290, 663, 29116, 13, 198, 2, 198, 2, 770, 2723, 2438, 318, 11971, 739, 262, 17168, 5964, 1043, 287, 262, 198, 2, 38559, 24290, 2393, ...
2.242109
5,671
from app import app from app.database.db import Database if __name__ == "__main__": db = Database() db.create_tables() db.create_admin() app.run(debug=True)
[ 6738, 598, 1330, 598, 198, 6738, 598, 13, 48806, 13, 9945, 1330, 24047, 628, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 20613, 796, 24047, 3419, 198, 220, 220, 220, 20613, 13, 17953, 62, 83, 29...
2.636364
66
# -*- coding: utf-8 -*- # flake8: noqa from flask import Flask from flask_themes2 import Themes import config from util.auth import is_admin from util.converter import RegexConverter from util.csrf import generate_csrf_token app = Flask(__name__.split('.')[0]) app.secret_key = config.SECRET_KEY app.url_map.converters['regex'] = RegexConverter app.jinja_env.globals['config'] = config app.jinja_env.globals['csrf_token'] = generate_csrf_token app.jinja_env.globals['is_admin'] = is_admin Themes(app, app_identifier='yelplove') # if debug property is present, let's use it try: app.debug = config.DEBUG except AttributeError: app.debug = False import views
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 781, 539, 23, 25, 645, 20402, 198, 6738, 42903, 1330, 46947, 198, 6738, 42903, 62, 1169, 6880, 17, 1330, 383, 6880, 198, 198, 11748, 4566, 198, 6738, 7736, 13, 1843...
2.666667
255
""" This is how I'm gonna schedule hours IDEA: import the format example file that I'm using and is saved in the same directory """ import csv import pprint from tkinter import * from tkinter.filedialog import askopenfilename import StringProcessing """ Receives a file location, opens the csv The format looks like this: CLASS STARTS,Class name (optional),MON,TUES,WED,THURS,FRI,,CLASS ENDS,MON,TUES,WED,THURS,FRI 1, Stats, 10:20:00 AM,,10:20:00 AM,,10:20:00 AM,,,11:15:00 AM,,11:15:00 AM,,11:15:00 AM 2,,,09:35:00 AM,,09:35:00 AM,,,,,10:55:00 AM,,10:55:00 AM, 3,,,11:30:00 AM,11:30:00 AM,11:30:00 AM,11:30:00 AM,,,,12:25:00 PM,12:25:00 PM,12:25:00 PM,12:25:00 PM 4,,,,,,09:10:00 AM,,,,,,,10:05:00 AM 5,,12:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,01:00:00 PM,,,,04:30:00 PM,04:30:00 PM,04:30:00 PM,04:30:00 PM 6,,,,,,,,,,,,, 7,,,,,,,,,,,,, 8,,,,,,,,,,,,, 9,,,,,,,,,,,,, 10,,,,,,,,,,,,, 11,,,,,,,,,,,,, 12,,,,,,,,,,,,, 13,,,,,,,,,,,,, 14,,,,,,,,,,,,, 15,,,,,,,,,,,,, """
[ 37811, 198, 1212, 318, 703, 314, 1101, 8066, 7269, 2250, 198, 198, 14114, 32, 25, 1330, 262, 5794, 1672, 2393, 326, 314, 1101, 1262, 290, 318, 7448, 287, 262, 976, 8619, 628, 198, 37811, 198, 198, 11748, 269, 21370, 198, 11748, 279, ...
2.305239
439
from pathlib import Path from bsmu.bone_age.models import constants IMAGE_DIR = Path('C:/MyDiskBackup/Projects/BoneAge/Data/SmallImages500_NoPads') TRAIN_DATA_CSV_PATH = constants.TRAIN_DATA_CSV_PATH VALID_DATA_CSV_PATH = constants.VALID_DATA_CSV_PATH TEST_DATA_CSV_PATH = constants.TEST_DATA_CSV_PATH BATCH_SIZE = 7 MODEL_NAME_PREFIX = 'DenseNet169' MODEL_NAME_POSTFIX = 'AllImages3_MoreAugments'
[ 6738, 3108, 8019, 1330, 10644, 201, 198, 201, 198, 6738, 275, 5796, 84, 13, 15992, 62, 496, 13, 27530, 1330, 38491, 201, 198, 201, 198, 3955, 11879, 62, 34720, 796, 10644, 10786, 34, 14079, 3666, 40961, 7282, 929, 14, 16775, 82, 14, ...
2.357955
176
from matplotlib import pyplot as plt def nisa_projection(years=30, annual_deposit=80, initial_budget=100): """ This is a function to plot deposit of TSUMITATE NISA Parameters: --------------- years: integer How many years are you going to continue? annual_depoist: integer Annual deposit into the NISA account. initial_budget: integer The initial budget. Returns: -------------- matplotlib figure """ for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]: original = initial_budget ganbon = [] box = [] for i in range(0,years): if i == 0: box.append(original) ganbon.append(original) gan = ganbon[-1] + annual_deposit original = original * j + annual_deposit if i > 0: box.append(original) ganbon.append(gan) plt.scatter(list(range(0,years)), box) plt.legend(["0%", "1%", "2%", "3%", "4%", "5%"]) plt.xlabel("Years") plt.ylabel("Money (Man yen)") # Reference: https://plotly.com/python/figure-labels/ import pandas as pd import plotly.graph_objects as go def nisa_projection_plotly(years=30, annual_deposit=80, initial_budget=100): """ This is a function to plot deposit of TSUMITATE NISA Parameters: --------------- years: integer How many years are you going to continue? annual_depoist: integer Annual deposit into the NISA account. initial_budget: integer The initial budget. Returns: -------------- plotly figures. """ dic_ = {} for j in [1.00,1.01, 1.02, 1.03, 1.04, 1.05]: original = initial_budget ganbon = [] box = [] for i in range(0,years): if i == 0: box.append(original) ganbon.append(original) gan = ganbon[-1] + annual_deposit original = original * j + annual_deposit if i > 0: box.append(original) ganbon.append(gan) dic_["{} %".format(str(j)[-1])] = box df = pd.DataFrame(dic_) fig = go.Figure() for i in df.columns: fig.add_trace(go.Scatter(x=df.index, y=df[i],name=i)) fig.update_layout( title="NISA PLOT", xaxis_title="Years", yaxis_title="Man Yen", width=500, height=400, ) fig.show() nisa_projection(30, 80, 100) nisa_projection_plotly(30, 80, 100)
[ 6738, 2603, 29487, 8019, 1330, 12972, 29487, 355, 458, 83, 198, 198, 4299, 299, 9160, 62, 16302, 295, 7, 19002, 28, 1270, 11, 5079, 62, 10378, 7434, 28, 1795, 11, 4238, 62, 37315, 28, 3064, 2599, 198, 220, 37227, 198, 220, 770, 318,...
2.288386
1,016
"""Python wrappers around TensorFlow ops. This file is MACHINE GENERATED! Do not edit. Original C++ source file: math_ops.cc """ import collections as _collections import six as _six from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow from tensorflow.python.eager import context as _context from tensorflow.python.eager import core as _core from tensorflow.python.eager import execute as _execute from tensorflow.python.framework import dtypes as _dtypes from tensorflow.python.framework import errors as _errors from tensorflow.python.framework import tensor_shape as _tensor_shape from tensorflow.core.framework import op_def_pb2 as _op_def_pb2 # Needed to trigger the call to _set_call_cpp_shape_fn. from tensorflow.python.framework import common_shapes as _common_shapes from tensorflow.python.framework import op_def_registry as _op_def_registry from tensorflow.python.framework import ops as _ops from tensorflow.python.framework import op_def_library as _op_def_library from tensorflow.python.util.tf_export import tf_export def _abs(x, name=None): r"""Computes the absolute value of a tensor. Given a tensor `x`, this operation returns a tensor containing the absolute value of each element in `x`. For example, if x is an input element and y is an output element, this operation computes \\(y = |x|\\). Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Abs", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Abs", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Abs", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return _abs_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _abs_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _abs """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Abs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Abs", _inputs_flat, _attrs, _result, name) _result, = _result return _result def accumulate_nv2(inputs, shape, name=None): r"""Returns the element-wise sum of a list of tensors. `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not wait for all of its inputs to be ready before beginning to sum. This can save memory if inputs are ready at different times, since minimum temporary storage is proportional to the output size rather than the inputs size. Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. Returns a `Tensor` of same shape and type as the elements of `inputs`. Args: inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A list of `Tensor` objects, each with same shape and type. shape: A `tf.TensorShape` or list of `ints`. Shape of elements of `inputs`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `inputs`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if not isinstance(inputs, (list, tuple)): raise TypeError( "Expected list for 'inputs' argument to " "'accumulate_nv2' Op, not %r." % inputs) _attr_N = len(inputs) shape = _execute.make_shape(shape, "shape") _, _, _op = _op_def_lib._apply_op_helper( "AccumulateNV2", inputs=inputs, shape=shape, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape", _op.get_attr("shape")) _execute.record_gradient( "AccumulateNV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "AccumulateNV2", name, _ctx._post_execution_callbacks, inputs, "shape", shape) return _result except _core._FallbackException: return accumulate_nv2_eager_fallback( inputs, shape=shape, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def accumulate_nv2_eager_fallback(inputs, shape, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function accumulate_nv2 """ _ctx = ctx if ctx else _context.context() if not isinstance(inputs, (list, tuple)): raise TypeError( "Expected list for 'inputs' argument to " "'accumulate_nv2' Op, not %r." % inputs) _attr_N = len(inputs) shape = _execute.make_shape(shape, "shape") _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx) _inputs_flat = list(inputs) _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape) _result = _execute.execute(b"AccumulateNV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "AccumulateNV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def acos_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function acos """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Acos", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Acos", _inputs_flat, _attrs, _result, name) _result, = _result return _result def acosh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function acosh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Acosh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Acosh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def add_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function add """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Add", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Add", _inputs_flat, _attrs, _result, name) _result, = _result return _result def add_n(inputs, name=None): r"""Add all input tensors element wise. Args: inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `variant`. Must all be the same size and shape. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `inputs`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if not isinstance(inputs, (list, tuple)): raise TypeError( "Expected list for 'inputs' argument to " "'add_n' Op, not %r." % inputs) _attr_N = len(inputs) _, _, _op = _op_def_lib._apply_op_helper( "AddN", inputs=inputs, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T")) _execute.record_gradient( "AddN", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "AddN", name, _ctx._post_execution_callbacks, inputs) return _result except _core._FallbackException: return add_n_eager_fallback( inputs, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def add_n_eager_fallback(inputs, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function add_n """ _ctx = ctx if ctx else _context.context() if not isinstance(inputs, (list, tuple)): raise TypeError( "Expected list for 'inputs' argument to " "'add_n' Op, not %r." % inputs) _attr_N = len(inputs) _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx) _inputs_flat = list(inputs) _attrs = ("N", _attr_N, "T", _attr_T) _result = _execute.execute(b"AddN", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "AddN", _inputs_flat, _attrs, _result, name) _result, = _result return _result def add_v2(x, y, name=None): r"""Returns x + y element-wise. *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "AddV2", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "AddV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "AddV2", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return add_v2_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def add_v2_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function add_v2 """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"AddV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "AddV2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _all(input, axis, keep_dims=False, name=None): r"""Computes the "logical and" of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor` of type `bool`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "All", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "All", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "All", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return _all_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _all_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _all """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) input = _ops.convert_to_tensor(input, _dtypes.bool) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx) _result = _execute.execute(b"All", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "All", _inputs_flat, _attrs, _result, name) _result, = _result return _result def angle(input, Tout=_dtypes.float32, name=None): r"""Returns the argument of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the argument of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. The argument returned by this operation is of the form \\(atan2(b, a)\\). For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.angle(input) ==> [2.0132, 1.056] ``` @compatibility(numpy) Equivalent to np.angle. @end_compatibility Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `Tout`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _, _, _op = _op_def_lib._apply_op_helper( "Angle", input=input, Tout=Tout, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout")) _execute.record_gradient( "Angle", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Angle", name, _ctx._post_execution_callbacks, input, "Tout", Tout) return _result except _core._FallbackException: return angle_eager_fallback( input, Tout=Tout, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def angle_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function angle """ _ctx = ctx if ctx else _context.context() if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64) _inputs_flat = [input] _attrs = ("T", _attr_T, "Tout", Tout) _result = _execute.execute(b"Angle", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Angle", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _any(input, axis, keep_dims=False, name=None): r"""Computes the "logical or" of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor` of type `bool`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Any", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Any", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Any", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return _any_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _any_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _any """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) input = _ops.convert_to_tensor(input, _dtypes.bool) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx) _result = _execute.execute(b"Any", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Any", _inputs_flat, _attrs, _result, name) _result, = _result return _result def approximate_equal(x, y, tolerance=1e-05, name=None): r"""Returns the truth value of abs(x-y) < tolerance element-wise. Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. y: A `Tensor`. Must have the same type as `x`. tolerance: An optional `float`. Defaults to `1e-05`. name: A name for the operation (optional). Returns: A `Tensor` of type `bool`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if tolerance is None: tolerance = 1e-05 tolerance = _execute.make_float(tolerance, "tolerance") _, _, _op = _op_def_lib._apply_op_helper( "ApproximateEqual", x=x, y=y, tolerance=tolerance, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "tolerance", _op.get_attr("tolerance")) _execute.record_gradient( "ApproximateEqual", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ApproximateEqual", name, _ctx._post_execution_callbacks, x, y, "tolerance", tolerance) return _result except _core._FallbackException: return approximate_equal_eager_fallback( x, y, tolerance=tolerance, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def approximate_equal_eager_fallback(x, y, tolerance=1e-05, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function approximate_equal """ _ctx = ctx if ctx else _context.context() if tolerance is None: tolerance = 1e-05 tolerance = _execute.make_float(tolerance, "tolerance") _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T, "tolerance", tolerance) _result = _execute.execute(b"ApproximateEqual", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ApproximateEqual", _inputs_flat, _attrs, _result, name) _result, = _result return _result def arg_max(input, dimension, output_type=_dtypes.int64, name=None): r"""Returns the index with the largest value across dimensions of a tensor. Note that in case of ties the identity of the return value is not guaranteed. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`. int32 or int64, must be in the range `[-rank(input), rank(input))`. Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. name: A name for the operation (optional). Returns: A `Tensor` of type `output_type`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if output_type is None: output_type = _dtypes.int64 output_type = _execute.make_type(output_type, "output_type") _, _, _op = _op_def_lib._apply_op_helper( "ArgMax", input=input, dimension=dimension, output_type=output_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"), "output_type", _op.get_attr("output_type")) _execute.record_gradient( "ArgMax", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ArgMax", name, _ctx._post_execution_callbacks, input, dimension, "output_type", output_type) return _result except _core._FallbackException: return arg_max_eager_fallback( input, dimension, output_type=output_type, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def arg_max_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function arg_max """ _ctx = ctx if ctx else _context.context() if output_type is None: output_type = _dtypes.int64 output_type = _execute.make_type(output_type, "output_type") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32) _inputs_flat = [input, dimension] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type) _result = _execute.execute(b"ArgMax", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ArgMax", _inputs_flat, _attrs, _result, name) _result, = _result return _result def arg_min(input, dimension, output_type=_dtypes.int64, name=None): r"""Returns the index with the smallest value across dimensions of a tensor. Note that in case of ties the identity of the return value is not guaranteed. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`. int32 or int64, must be in the range `[-rank(input), rank(input))`. Describes which dimension of the input Tensor to reduce across. For vectors, use dimension = 0. output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`. name: A name for the operation (optional). Returns: A `Tensor` of type `output_type`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if output_type is None: output_type = _dtypes.int64 output_type = _execute.make_type(output_type, "output_type") _, _, _op = _op_def_lib._apply_op_helper( "ArgMin", input=input, dimension=dimension, output_type=output_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"), "output_type", _op.get_attr("output_type")) _execute.record_gradient( "ArgMin", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ArgMin", name, _ctx._post_execution_callbacks, input, dimension, "output_type", output_type) return _result except _core._FallbackException: return arg_min_eager_fallback( input, dimension, output_type=output_type, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def arg_min_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function arg_min """ _ctx = ctx if ctx else _context.context() if output_type is None: output_type = _dtypes.int64 output_type = _execute.make_type(output_type, "output_type") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32) _inputs_flat = [input, dimension] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type) _result = _execute.execute(b"ArgMin", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ArgMin", _inputs_flat, _attrs, _result, name) _result, = _result return _result def asin_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function asin """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Asin", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Asin", _inputs_flat, _attrs, _result, name) _result, = _result return _result def asinh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function asinh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Asinh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Asinh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def atan_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function atan """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Atan", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Atan", _inputs_flat, _attrs, _result, name) _result, = _result return _result def atan2_eager_fallback(y, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function atan2 """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, x], _ctx) (y, x) = _inputs_T _inputs_flat = [y, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Atan2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Atan2", _inputs_flat, _attrs, _result, name) _result, = _result return _result def atanh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function atanh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Atanh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Atanh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def batch_mat_mul(x, y, adj_x=False, adj_y=False, name=None): r"""Multiplies slices of two tensors in batches. Multiplies all slices of `Tensor` `x` and `y` (each slice can be viewed as an element of a batch), and arranges the individual results in a single output tensor of the same batch size. Each of the individual slices can optionally be adjointed (to adjoint a matrix means to transpose and conjugate it) before multiplication by setting the `adj_x` or `adj_y` flag to `True`, which are by default `False`. The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` and `[..., r_y, c_y]`. The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: r_o = c_x if adj_x else r_x c_o = r_y if adj_y else c_y It is computed as: output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`. 2-D or higher with shape `[..., r_x, c_x]`. y: A `Tensor`. Must have the same type as `x`. 2-D or higher with shape `[..., r_y, c_y]`. adj_x: An optional `bool`. Defaults to `False`. If `True`, adjoint the slices of `x`. Defaults to `False`. adj_y: An optional `bool`. Defaults to `False`. If `True`, adjoint the slices of `y`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if adj_x is None: adj_x = False adj_x = _execute.make_bool(adj_x, "adj_x") if adj_y is None: adj_y = False adj_y = _execute.make_bool(adj_y, "adj_y") _, _, _op = _op_def_lib._apply_op_helper( "BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "adj_x", _op.get_attr("adj_x"), "adj_y", _op.get_attr("adj_y")) _execute.record_gradient( "BatchMatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatMul", name, _ctx._post_execution_callbacks, x, y, "adj_x", adj_x, "adj_y", adj_y) return _result except _core._FallbackException: return batch_mat_mul_eager_fallback( x, y, adj_x=adj_x, adj_y=adj_y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def batch_mat_mul_eager_fallback(x, y, adj_x=False, adj_y=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function batch_mat_mul """ _ctx = ctx if ctx else _context.context() if adj_x is None: adj_x = False adj_x = _execute.make_bool(adj_x, "adj_x") if adj_y is None: adj_y = False adj_y = _execute.make_bool(adj_y, "adj_y") _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y) _result = _execute.execute(b"BatchMatMul", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BatchMatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result def bessel_i0e(x, name=None): r"""Computes the Bessel i0e function of `x` element-wise. Exponentially scaled modified Bessel function of order 0 defined as `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`. This function is faster and numerically stabler than `bessel_i0(x)`. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "BesselI0e", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BesselI0e", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BesselI0e", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return bessel_i0e_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def bessel_i0e_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bessel_i0e """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"BesselI0e", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BesselI0e", _inputs_flat, _attrs, _result, name) _result, = _result return _result def bessel_i1e(x, name=None): r"""Computes the Bessel i1e function of `x` element-wise. Exponentially scaled modified Bessel function of order 0 defined as `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`. This function is faster and numerically stabler than `bessel_i1(x)`. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "BesselI1e", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "BesselI1e", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "BesselI1e", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return bessel_i1e_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def bessel_i1e_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bessel_i1e """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"BesselI1e", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "BesselI1e", _inputs_flat, _attrs, _result, name) _result, = _result return _result def betainc_eager_fallback(a, b, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function betainc """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b, x], _ctx) (a, b, x) = _inputs_T _inputs_flat = [a, b, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Betainc", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Betainc", _inputs_flat, _attrs, _result, name) _result, = _result return _result def bincount(arr, size, weights, name=None): r"""Counts the number of occurrences of each value in an integer array. Outputs a vector with length `size` and the same dtype as `weights`. If `weights` are empty, then index `i` stores the number of times the value `i` is counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of the value in `weights` at each index where the corresponding value in `arr` is `i`. Values in `arr` outside of the range [0, size) are ignored. Args: arr: A `Tensor` of type `int32`. int32 `Tensor`. size: A `Tensor` of type `int32`. non-negative int32 scalar `Tensor`. weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. is an int32, int64, float32, or float64 `Tensor` with the same shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights equal to 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `weights`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Bincount", arr=arr, size=size, weights=weights, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Bincount", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Bincount", name, _ctx._post_execution_callbacks, arr, size, weights) return _result except _core._FallbackException: return bincount_eager_fallback( arr, size, weights, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def bincount_eager_fallback(arr, size, weights, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bincount """ _ctx = ctx if ctx else _context.context() _attr_T, (weights,) = _execute.args_to_matching_eager([weights], _ctx) arr = _ops.convert_to_tensor(arr, _dtypes.int32) size = _ops.convert_to_tensor(size, _dtypes.int32) _inputs_flat = [arr, size, weights] _attrs = ("T", _attr_T) _result = _execute.execute(b"Bincount", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Bincount", _inputs_flat, _attrs, _result, name) _result, = _result return _result def bucketize(input, boundaries, name=None): r"""Bucketizes 'input' based on 'boundaries'. For example, if the inputs are boundaries = [0, 10, 100] input = [[-5, 10000] [150, 10] [5, 100]] then the output will be output = [[0, 3] [3, 2] [1, 3]] Args: input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. Any shape of Tensor contains with int or float type. boundaries: A list of `floats`. A sorted list of floats gives the boundary of the buckets. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if not isinstance(boundaries, (list, tuple)): raise TypeError( "Expected list for 'boundaries' argument to " "'bucketize' Op, not %r." % boundaries) boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries] _, _, _op = _op_def_lib._apply_op_helper( "Bucketize", input=input, boundaries=boundaries, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "boundaries", _op.get_attr("boundaries")) _execute.record_gradient( "Bucketize", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Bucketize", name, _ctx._post_execution_callbacks, input, "boundaries", boundaries) return _result except _core._FallbackException: return bucketize_eager_fallback( input, boundaries=boundaries, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def bucketize_eager_fallback(input, boundaries, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function bucketize """ _ctx = ctx if ctx else _context.context() if not isinstance(boundaries, (list, tuple)): raise TypeError( "Expected list for 'boundaries' argument to " "'bucketize' Op, not %r." % boundaries) boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries] _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "boundaries", boundaries) _result = _execute.execute(b"Bucketize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Bucketize", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cast(x, DstT, name=None): r"""Cast x of type SrcT to y of DstT. Args: x: A `Tensor`. DstT: A `tf.DType`. name: A name for the operation (optional). Returns: A `Tensor` of type `DstT`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: DstT = _execute.make_type(DstT, "DstT") _, _, _op = _op_def_lib._apply_op_helper( "Cast", x=x, DstT=DstT, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("SrcT", _op.get_attr("SrcT"), "DstT", _op.get_attr("DstT")) _execute.record_gradient( "Cast", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Cast", name, _ctx._post_execution_callbacks, x, "DstT", DstT) return _result except _core._FallbackException: return cast_eager_fallback( x, DstT=DstT, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def cast_eager_fallback(x, DstT, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cast """ _ctx = ctx if ctx else _context.context() DstT = _execute.make_type(DstT, "DstT") _attr_SrcT, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("SrcT", _attr_SrcT, "DstT", DstT) _result = _execute.execute(b"Cast", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cast", _inputs_flat, _attrs, _result, name) _result, = _result return _result def ceil_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function ceil """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Ceil", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Ceil", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _clip_by_value(t, clip_value_min, clip_value_max, name=None): r"""Clips tensor values to a specified min and max. Given a tensor `t`, this operation returns a tensor of the same type and shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. Any values less than `clip_value_min` are set to `clip_value_min`. Any values greater than `clip_value_max` are set to `clip_value_max`. Args: t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A `Tensor`. clip_value_min: A `Tensor`. Must have the same type as `t`. A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape as `t`. The minimum value to clip by. clip_value_max: A `Tensor`. Must have the same type as `t`. A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape as `t`. The maximum value to clip by. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `t`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "ClipByValue", t=t, clip_value_min=clip_value_min, clip_value_max=clip_value_max, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "ClipByValue", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ClipByValue", name, _ctx._post_execution_callbacks, t, clip_value_min, clip_value_max) return _result except _core._FallbackException: return _clip_by_value_eager_fallback( t, clip_value_min, clip_value_max, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _clip_by_value """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([t, clip_value_min, clip_value_max], _ctx) (t, clip_value_min, clip_value_max) = _inputs_T _inputs_flat = [t, clip_value_min, clip_value_max] _attrs = ("T", _attr_T) _result = _execute.execute(b"ClipByValue", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ClipByValue", _inputs_flat, _attrs, _result, name) _result, = _result return _result def compare_and_bitpack(input, threshold, name=None): r"""Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. Each comparison returns a boolean `true` (if `input_value > threshold`) or and `false` otherwise. This operation is useful for Locality-Sensitive-Hashing (LSH) and other algorithms that use hashing approximations of cosine and `L2` distances; codes can be generated from an input via: ```python codebook_size = 50 codebook_bits = codebook_size * 32 codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], dtype=x.dtype, initializer=tf.orthogonal_initializer()) codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 # now codes has shape x.shape[:-1] + [codebook_size] ``` **NOTE**: Currently, the innermost dimension of the tensor must be divisible by 8. Given an `input` shaped `[s0, s1, ..., s_n]`, the output is a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. Args: input: A `Tensor`. Must be one of the following types: `bool`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`. Values to compare against `threshold` and bitpack. threshold: A `Tensor`. Must have the same type as `input`. Threshold to compare against. name: A name for the operation (optional). Returns: A `Tensor` of type `uint8`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "CompareAndBitpack", input=input, threshold=threshold, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "CompareAndBitpack", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "CompareAndBitpack", name, _ctx._post_execution_callbacks, input, threshold) return _result except _core._FallbackException: return compare_and_bitpack_eager_fallback( input, threshold, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def compare_and_bitpack_eager_fallback(input, threshold, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function compare_and_bitpack """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([input, threshold], _ctx) (input, threshold) = _inputs_T _inputs_flat = [input, threshold] _attrs = ("T", _attr_T) _result = _execute.execute(b"CompareAndBitpack", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "CompareAndBitpack", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _complex(real, imag, Tout=_dtypes.complex64, name=None): r"""Converts two real numbers to a complex number. Given a tensor `real` representing the real part of a complex number, and a tensor `imag` representing the imaginary part of a complex number, this operation returns complex numbers elementwise of the form \\(a + bj\\), where *a* represents the `real` part and *b* represents the `imag` part. The input tensors `real` and `imag` must have the same shape. For example: ``` # tensor 'real' is [2.25, 3.25] # tensor `imag` is [4.75, 5.75] tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] ``` Args: real: A `Tensor`. Must be one of the following types: `float32`, `float64`. imag: A `Tensor`. Must have the same type as `real`. Tout: An optional `tf.DType` from: `tf.complex64, tf.complex128`. Defaults to `tf.complex64`. name: A name for the operation (optional). Returns: A `Tensor` of type `Tout`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Tout is None: Tout = _dtypes.complex64 Tout = _execute.make_type(Tout, "Tout") _, _, _op = _op_def_lib._apply_op_helper( "Complex", real=real, imag=imag, Tout=Tout, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout")) _execute.record_gradient( "Complex", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Complex", name, _ctx._post_execution_callbacks, real, imag, "Tout", Tout) return _result except _core._FallbackException: return _complex_eager_fallback( real, imag, Tout=Tout, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _complex_eager_fallback(real, imag, Tout=_dtypes.complex64, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _complex """ _ctx = ctx if ctx else _context.context() if Tout is None: Tout = _dtypes.complex64 Tout = _execute.make_type(Tout, "Tout") _attr_T, _inputs_T = _execute.args_to_matching_eager([real, imag], _ctx, _dtypes.float32) (real, imag) = _inputs_T _inputs_flat = [real, imag] _attrs = ("T", _attr_T, "Tout", Tout) _result = _execute.execute(b"Complex", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Complex", _inputs_flat, _attrs, _result, name) _result, = _result return _result def complex_abs(x, Tout=_dtypes.float32, name=None): r"""Computes the complex absolute value of a tensor. Given a tensor `x` of complex numbers, this operation returns a tensor of type `float` or `double` that is the absolute value of each element in `x`. All elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute value is computed as \\( \sqrt{a^2 + b^2}\\). Args: x: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `Tout`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _, _, _op = _op_def_lib._apply_op_helper( "ComplexAbs", x=x, Tout=Tout, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout")) _execute.record_gradient( "ComplexAbs", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ComplexAbs", name, _ctx._post_execution_callbacks, x, "Tout", Tout) return _result except _core._FallbackException: return complex_abs_eager_fallback( x, Tout=Tout, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def complex_abs_eager_fallback(x, Tout=_dtypes.float32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function complex_abs """ _ctx = ctx if ctx else _context.context() if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.complex64) _inputs_flat = [x] _attrs = ("T", _attr_T, "Tout", Tout) _result = _execute.execute(b"ComplexAbs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ComplexAbs", _inputs_flat, _attrs, _result, name) _result, = _result return _result def conj(input, name=None): r"""Returns the complex conjugate of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of complex numbers that are the complex conjugate of each element in `input`. The complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part. The complex conjugate returned by this operation is of the form \\(a - bj\\). For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] ``` Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`, `variant`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Conj", input=input, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Conj", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Conj", name, _ctx._post_execution_callbacks, input) return _result except _core._FallbackException: return conj_eager_fallback( input, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def conj_eager_fallback(input, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function conj """ _ctx = ctx if ctx else _context.context() _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"Conj", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Conj", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cos_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cos """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Cos", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cos", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cosh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cosh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Cosh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cosh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cross_eager_fallback(a, b, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cross """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx) (a, b) = _inputs_T _inputs_flat = [a, b] _attrs = ("T", _attr_T) _result = _execute.execute(b"Cross", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cross", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cumprod(x, axis, exclusive=False, reverse=False, name=None): r"""Compute the cumulative product of the tensor `x` along `axis`. By default, this op performs an inclusive cumprod, which means that the first element of the input is identical to the first element of the output: ```python tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumprod is performed instead: ```python tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] ``` By setting the `reverse` kwarg to `True`, the cumprod is performed in the opposite direction: ```python tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```python tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: An optional `bool`. Defaults to `False`. If `True`, perform exclusive cumprod. reverse: An optional `bool`. Defaults to `False`. A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if exclusive is None: exclusive = False exclusive = _execute.make_bool(exclusive, "exclusive") if reverse is None: reverse = False reverse = _execute.make_bool(reverse, "reverse") _, _, _op = _op_def_lib._apply_op_helper( "Cumprod", x=x, axis=axis, exclusive=exclusive, reverse=reverse, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("exclusive", _op.get_attr("exclusive"), "reverse", _op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Cumprod", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Cumprod", name, _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive, "reverse", reverse) return _result except _core._FallbackException: return cumprod_eager_fallback( x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def cumprod_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cumprod """ _ctx = ctx if ctx else _context.context() if exclusive is None: exclusive = False exclusive = _execute.make_bool(exclusive, "exclusive") if reverse is None: reverse = False reverse = _execute.make_bool(reverse, "reverse") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [x, axis] _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Cumprod", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cumprod", _inputs_flat, _attrs, _result, name) _result, = _result return _result def cumsum(x, axis, exclusive=False, reverse=False, name=None): r"""Compute the cumulative sum of the tensor `x` along `axis`. By default, this op performs an inclusive cumsum, which means that the first element of the input is identical to the first element of the output: ```python tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] ``` By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed instead: ```python tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] ``` By setting the `reverse` kwarg to `True`, the cumsum is performed in the opposite direction: ```python tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] ``` This is more efficient than using separate `tf.reverse` ops. The `reverse` and `exclusive` kwargs can also be combined: ```python tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] ``` Args: x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. A `Tensor` of type `int32` (default: 0). Must be in the range `[-rank(x), rank(x))`. exclusive: An optional `bool`. Defaults to `False`. If `True`, perform exclusive cumsum. reverse: An optional `bool`. Defaults to `False`. A `bool` (default: False). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if exclusive is None: exclusive = False exclusive = _execute.make_bool(exclusive, "exclusive") if reverse is None: reverse = False reverse = _execute.make_bool(reverse, "reverse") _, _, _op = _op_def_lib._apply_op_helper( "Cumsum", x=x, axis=axis, exclusive=exclusive, reverse=reverse, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("exclusive", _op.get_attr("exclusive"), "reverse", _op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Cumsum", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Cumsum", name, _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive, "reverse", reverse) return _result except _core._FallbackException: return cumsum_eager_fallback( x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def cumsum_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function cumsum """ _ctx = ctx if ctx else _context.context() if exclusive is None: exclusive = False exclusive = _execute.make_bool(exclusive, "exclusive") if reverse is None: reverse = False reverse = _execute.make_bool(reverse, "reverse") _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [x, axis] _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Cumsum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Cumsum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def digamma_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function digamma """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Digamma", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Digamma", _inputs_flat, _attrs, _result, name) _result, = _result return _result def div(x, y, name=None): r"""Returns x / y element-wise. *NOTE*: `Div` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Div", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Div", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Div", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return div_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def div_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function div """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Div", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Div", _inputs_flat, _attrs, _result, name) _result, = _result return _result def equal_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function equal """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Equal", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Equal", _inputs_flat, _attrs, _result, name) _result, = _result return _result def erf(x, name=None): r"""Computes the Gauss error function of `x` element-wise. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Erf", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Erf", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Erf", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return erf_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def erf_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function erf """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Erf", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Erf", _inputs_flat, _attrs, _result, name) _result, = _result return _result def erfc_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function erfc """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Erfc", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Erfc", _inputs_flat, _attrs, _result, name) _result, = _result return _result def exp_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function exp """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Exp", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Exp", _inputs_flat, _attrs, _result, name) _result, = _result return _result def expm1_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function expm1 """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Expm1", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Expm1", _inputs_flat, _attrs, _result, name) _result, = _result return _result def floor_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function floor """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Floor", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Floor", _inputs_flat, _attrs, _result, name) _result, = _result return _result def floor_div(x, y, name=None): r"""Returns x // y element-wise. *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "FloorDiv", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "FloorDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FloorDiv", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return floor_div_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def floor_div_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function floor_div """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"FloorDiv", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FloorDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result def floor_mod(x, y, name=None): r"""Returns element-wise remainder of division. When `x < 0` xor `y < 0` is true, this follows Python semantics in that the result here is consistent with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. *NOTE*: `FloorMod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "FloorMod", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "FloorMod", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "FloorMod", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return floor_mod_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def floor_mod_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function floor_mod """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"FloorMod", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "FloorMod", _inputs_flat, _attrs, _result, name) _result, = _result return _result def greater_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function greater """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Greater", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Greater", _inputs_flat, _attrs, _result, name) _result, = _result return _result def greater_equal_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function greater_equal """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"GreaterEqual", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "GreaterEqual", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _histogram_fixed_width(values, value_range, nbins, dtype=_dtypes.int32, name=None): r"""Return histogram of values. Given the tensor `values`, this operation returns a rank 1 histogram counting the number of entries in `values` that fall into every bin. The bins are equal width and determined by the arguments `value_range` and `nbins`. ```python # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) nbins = 5 value_range = [0.0, 5.0] new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] with tf.get_default_session() as sess: hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) variables.global_variables_initializer().run() sess.run(hist) => [2, 1, 1, 0, 2] ``` Args: values: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`. Numeric `Tensor`. value_range: A `Tensor`. Must have the same type as `values`. Shape [2] `Tensor` of same `dtype` as `values`. values <= value_range[0] will be mapped to hist[0], values >= value_range[1] will be mapped to hist[-1]. nbins: A `Tensor` of type `int32`. Scalar `int32 Tensor`. Number of histogram bins. dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if dtype is None: dtype = _dtypes.int32 dtype = _execute.make_type(dtype, "dtype") _, _, _op = _op_def_lib._apply_op_helper( "HistogramFixedWidth", values=values, value_range=value_range, nbins=nbins, dtype=dtype, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "dtype", _op.get_attr("dtype")) _execute.record_gradient( "HistogramFixedWidth", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "HistogramFixedWidth", name, _ctx._post_execution_callbacks, values, value_range, nbins, "dtype", dtype) return _result except _core._FallbackException: return _histogram_fixed_width_eager_fallback( values, value_range, nbins, dtype=dtype, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype=_dtypes.int32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _histogram_fixed_width """ _ctx = ctx if ctx else _context.context() if dtype is None: dtype = _dtypes.int32 dtype = _execute.make_type(dtype, "dtype") _attr_T, _inputs_T = _execute.args_to_matching_eager([values, value_range], _ctx) (values, value_range) = _inputs_T nbins = _ops.convert_to_tensor(nbins, _dtypes.int32) _inputs_flat = [values, value_range, nbins] _attrs = ("T", _attr_T, "dtype", dtype) _result = _execute.execute(b"HistogramFixedWidth", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "HistogramFixedWidth", _inputs_flat, _attrs, _result, name) _result, = _result return _result def igamma_eager_fallback(a, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function igamma """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx) (a, x) = _inputs_T _inputs_flat = [a, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Igamma", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Igamma", _inputs_flat, _attrs, _result, name) _result, = _result return _result def igamma_grad_a(a, x, name=None): r"""Computes the gradient of `igamma(a, x)` wrt `a`. Args: a: A `Tensor`. Must be one of the following types: `float32`, `float64`. x: A `Tensor`. Must have the same type as `a`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `a`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "IgammaGradA", a=a, x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "IgammaGradA", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "IgammaGradA", name, _ctx._post_execution_callbacks, a, x) return _result except _core._FallbackException: return igamma_grad_a_eager_fallback( a, x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def igamma_grad_a_eager_fallback(a, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function igamma_grad_a """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx) (a, x) = _inputs_T _inputs_flat = [a, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"IgammaGradA", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "IgammaGradA", _inputs_flat, _attrs, _result, name) _result, = _result return _result def igammac_eager_fallback(a, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function igammac """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx) (a, x) = _inputs_T _inputs_flat = [a, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Igammac", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Igammac", _inputs_flat, _attrs, _result, name) _result, = _result return _result def imag(input, Tout=_dtypes.float32, name=None): r"""Returns the imaginary part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the imaginary part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part and *b* is the imaginary part returned by this operation. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.imag(input) ==> [4.75, 5.75] ``` Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `Tout`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _, _, _op = _op_def_lib._apply_op_helper( "Imag", input=input, Tout=Tout, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout")) _execute.record_gradient( "Imag", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Imag", name, _ctx._post_execution_callbacks, input, "Tout", Tout) return _result except _core._FallbackException: return imag_eager_fallback( input, Tout=Tout, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def imag_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function imag """ _ctx = ctx if ctx else _context.context() if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64) _inputs_flat = [input] _attrs = ("T", _attr_T, "Tout", Tout) _result = _execute.execute(b"Imag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Imag", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inv(x, name=None): r"""Computes the reciprocal of x element-wise. I.e., \\(y = 1 / x\\). Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Inv", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Inv", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Inv", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return inv_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def inv_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function inv """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Inv", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Inv", _inputs_flat, _attrs, _result, name) _result, = _result return _result def inv_grad(y, dy, name=None): r"""Computes the gradient for the inverse of `x` wrt its input. Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "InvGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "InvGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "InvGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return inv_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def inv_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function inv_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"InvGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "InvGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def is_finite_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function is_finite """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"IsFinite", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "IsFinite", _inputs_flat, _attrs, _result, name) _result, = _result return _result def is_inf_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function is_inf """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"IsInf", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "IsInf", _inputs_flat, _attrs, _result, name) _result, = _result return _result def is_nan_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function is_nan """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"IsNan", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "IsNan", _inputs_flat, _attrs, _result, name) _result, = _result return _result def less_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function less """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Less", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Less", _inputs_flat, _attrs, _result, name) _result, = _result return _result def less_equal_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function less_equal """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"LessEqual", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LessEqual", _inputs_flat, _attrs, _result, name) _result, = _result return _result def lgamma_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function lgamma """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Lgamma", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Lgamma", _inputs_flat, _attrs, _result, name) _result, = _result return _result def lin_space_eager_fallback(start, stop, num, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function lin_space """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([start, stop], _ctx) (start, stop) = _inputs_T _attr_Tidx, (num,) = _execute.args_to_matching_eager([num], _ctx, _dtypes.int32) _inputs_flat = [start, stop, num] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"LinSpace", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LinSpace", _inputs_flat, _attrs, _result, name) _result, = _result return _result def log_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function log """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Log", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Log", _inputs_flat, _attrs, _result, name) _result, = _result return _result def log1p_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function log1p """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Log1p", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Log1p", _inputs_flat, _attrs, _result, name) _result, = _result return _result def logical_and_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function logical_and """ _ctx = ctx if ctx else _context.context() x = _ops.convert_to_tensor(x, _dtypes.bool) y = _ops.convert_to_tensor(y, _dtypes.bool) _inputs_flat = [x, y] _attrs = None _result = _execute.execute(b"LogicalAnd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LogicalAnd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def logical_not_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function logical_not """ _ctx = ctx if ctx else _context.context() x = _ops.convert_to_tensor(x, _dtypes.bool) _inputs_flat = [x] _attrs = None _result = _execute.execute(b"LogicalNot", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LogicalNot", _inputs_flat, _attrs, _result, name) _result, = _result return _result def logical_or_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function logical_or """ _ctx = ctx if ctx else _context.context() x = _ops.convert_to_tensor(x, _dtypes.bool) y = _ops.convert_to_tensor(y, _dtypes.bool) _inputs_flat = [x, y] _attrs = None _result = _execute.execute(b"LogicalOr", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "LogicalOr", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mat_mul(a, b, transpose_a=False, transpose_b=False, name=None): r"""Multiply the matrix "a" by the matrix "b". The inputs must be two-dimensional matrices and the inner dimension of "a" (after being transposed if transpose_a is true) must match the outer dimension of "b" (after being transposed if transposed_b is true). *Note*: The default kernel implementation for MatMul on GPUs uses cublas. Args: a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`. b: A `Tensor`. Must have the same type as `a`. transpose_a: An optional `bool`. Defaults to `False`. If true, "a" is transposed before multiplication. transpose_b: An optional `bool`. Defaults to `False`. If true, "b" is transposed before multiplication. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `a`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") _, _, _op = _op_def_lib._apply_op_helper( "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b", _op.get_attr("transpose_b"), "T", _op.get_attr("T")) _execute.record_gradient( "MatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "MatMul", name, _ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a, "transpose_b", transpose_b) return _result except _core._FallbackException: return mat_mul_eager_fallback( a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mat_mul """ _ctx = ctx if ctx else _context.context() if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx) (a, b) = _inputs_T _inputs_flat = [a, b] _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "T", _attr_T) _result = _execute.execute(b"MatMul", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "MatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _max(input, axis, keep_dims=False, name=None): r"""Computes the maximum of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Max", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Max", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Max", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return _max_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _max_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _max """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Max", _inputs_flat, _attrs, _result, name) _result, = _result return _result def maximum_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function maximum """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Maximum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Maximum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mean(input, axis, keep_dims=False, name=None): r"""Computes the mean of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Mean", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Mean", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Mean", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return mean_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def mean_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mean """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Mean", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Mean", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _min(input, axis, keep_dims=False, name=None): r"""Computes the minimum of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Min", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Min", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Min", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return _min_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _min_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _min """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Min", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Min", _inputs_flat, _attrs, _result, name) _result, = _result return _result def minimum_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function minimum """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Minimum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Minimum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mod(x, y, name=None): r"""Returns element-wise remainder of division. This emulates C semantics in that the result here is consistent with a truncating divide. E.g. `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. *NOTE*: `Mod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `half`, `half`, `bfloat16`, `float32`, `float64`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Mod", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Mod", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Mod", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return mod_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def mod_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mod """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Mod", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Mod", _inputs_flat, _attrs, _result, name) _result, = _result return _result def mul(x, y, name=None): r"""Returns x * y element-wise. *NOTE*: `Multiply` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Mul", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Mul", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Mul", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return mul_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def mul_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function mul """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Mul", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Mul", _inputs_flat, _attrs, _result, name) _result, = _result return _result def neg(x, name=None): r"""Computes numerical negative value element-wise. I.e., \\(y = -x\\). Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Neg", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Neg", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Neg", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return neg_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def neg_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function neg """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Neg", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Neg", _inputs_flat, _attrs, _result, name) _result, = _result return _result def not_equal_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function not_equal """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"NotEqual", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "NotEqual", _inputs_flat, _attrs, _result, name) _result, = _result return _result def polygamma_eager_fallback(a, x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function polygamma """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx) (a, x) = _inputs_T _inputs_flat = [a, x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Polygamma", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Polygamma", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _pow(x, y, name=None): r"""Computes the power of one value to another. Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for corresponding elements in `x` and `y`. For example: ``` # tensor 'x' is [[2, 2]], [3, 3]] # tensor 'y' is [[8, 16], [2, 3]] tf.pow(x, y) ==> [[256, 65536], [9, 27]] ``` Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Pow", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Pow", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Pow", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return _pow_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _pow_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _pow """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Pow", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Pow", _inputs_flat, _attrs, _result, name) _result, = _result return _result def prod(input, axis, keep_dims=False, name=None): r"""Computes the product of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Prod", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Prod", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Prod", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return prod_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def prod_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function prod """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Prod", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Prod", _inputs_flat, _attrs, _result, name) _result, = _result return _result _quantize_down_and_shrink_range_outputs = ["output", "output_min", "output_max"] _QuantizeDownAndShrinkRangeOutput = _collections.namedtuple( "QuantizeDownAndShrinkRange", _quantize_down_and_shrink_range_outputs) def quantize_down_and_shrink_range(input, input_min, input_max, out_type, name=None): r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the actual distribution of the values to maximize the usage of the lower bit depth and adjusting the output min and max ranges accordingly. [input_min, input_max] are scalar floats that specify the range for the float interpretation of the 'input' data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. This operator tries to squeeze as much precision as possible into an output with a lower bit depth by calculating the actual min and max values found in the data. For example, maybe that quint16 input has no values lower than 16,384 and none higher than 49,152. That means only half the range is actually needed, all the float interpretations are between -0.5f and 0.5f, so if we want to compress the data into a quint8 output, we can use that range rather than the theoretical -1.0f to 1.0f that is suggested by the input min and max. In practice, this is most useful for taking output from operations like QuantizedMatMul that can produce higher bit-depth outputs than their inputs and may have large potential output ranges, but in practice have a distribution of input values that only uses a small fraction of the possible range. By feeding that output into this operator, we can reduce it from 32 bits down to 8 with minimal loss of accuracy. Args: input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. input_min: A `Tensor` of type `float32`. The float value that the minimum quantized input value represents. input_max: A `Tensor` of type `float32`. The float value that the maximum quantized input value represents. out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. The type of the output. Should be a lower bit depth than Tinput. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, output_min, output_max). output: A `Tensor` of type `out_type`. output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "QuantizeDownAndShrinkRange", input=input, input_min=input_min, input_max=input_max, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name) _result = _QuantizeDownAndShrinkRangeOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizeDownAndShrinkRange", name, _ctx._post_execution_callbacks, input, input_min, input_max, "out_type", out_type) _result = _QuantizeDownAndShrinkRangeOutput._make(_result) return _result except _core._FallbackException: return quantize_down_and_shrink_range_eager_fallback( input, input_min, input_max, out_type=out_type, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantize_down_and_shrink_range """ _ctx = ctx if ctx else _context.context() out_type = _execute.make_type(out_type, "out_type") _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx) input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) _inputs_flat = [input, input_min, input_max] _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) _result = _execute.execute(b"QuantizeDownAndShrinkRange", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name) _result = _QuantizeDownAndShrinkRangeOutput._make(_result) return _result _quantized_add_outputs = ["z", "min_z", "max_z"] _QuantizedAddOutput = _collections.namedtuple( "QuantizedAdd", _quantized_add_outputs) def quantized_add(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None): r"""Returns x + y element-wise, working on quantized buffers. Args: x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. min_x: A `Tensor` of type `float32`. The float value that the lowest quantized `x` value represents. max_x: A `Tensor` of type `float32`. The float value that the highest quantized `x` value represents. min_y: A `Tensor` of type `float32`. The float value that the lowest quantized `y` value represents. max_y: A `Tensor` of type `float32`. The float value that the highest quantized `y` value represents. Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (z, min_z, max_z). z: A `Tensor` of type `Toutput`. min_z: A `Tensor` of type `float32`. max_z: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") _, _, _op = _op_def_lib._apply_op_helper( "QuantizedAdd", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y, max_y=max_y, Toutput=Toutput, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput", _op.get_attr("Toutput")) _execute.record_gradient( "QuantizedAdd", _inputs_flat, _attrs, _result, name) _result = _QuantizedAddOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedAdd", name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y, max_y, "Toutput", Toutput) _result = _QuantizedAddOutput._make(_result) return _result except _core._FallbackException: return quantized_add_eager_fallback( x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_add """ _ctx = ctx if ctx else _context.context() if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") _attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx) min_x = _ops.convert_to_tensor(min_x, _dtypes.float32) max_x = _ops.convert_to_tensor(max_x, _dtypes.float32) min_y = _ops.convert_to_tensor(min_y, _dtypes.float32) max_y = _ops.convert_to_tensor(max_y, _dtypes.float32) _inputs_flat = [x, y, min_x, max_x, min_y, max_y] _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput) _result = _execute.execute(b"QuantizedAdd", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedAdd", _inputs_flat, _attrs, _result, name) _result = _QuantizedAddOutput._make(_result) return _result _quantized_mat_mul_outputs = ["out", "min_out", "max_out"] _QuantizedMatMulOutput = _collections.namedtuple( "QuantizedMatMul", _quantized_mat_mul_outputs) def quantized_mat_mul(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None): r"""Perform a quantized matrix multiplication of `a` by the matrix `b`. The inputs must be two-dimensional matrices and the inner dimension of `a` (after being transposed if `transpose_a` is non-zero) must match the outer dimension of `b` (after being transposed if `transposed_b` is non-zero). Args: a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. Must be a two-dimensional tensor. b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. Must be a two-dimensional tensor. min_a: A `Tensor` of type `float32`. The float value that the lowest quantized `a` value represents. max_a: A `Tensor` of type `float32`. The float value that the highest quantized `a` value represents. min_b: A `Tensor` of type `float32`. The float value that the lowest quantized `b` value represents. max_b: A `Tensor` of type `float32`. The float value that the highest quantized `b` value represents. Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. transpose_a: An optional `bool`. Defaults to `False`. If true, `a` is transposed before multiplication. transpose_b: An optional `bool`. Defaults to `False`. If true, `b` is transposed before multiplication. Tactivation: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`. The type of output produced by activation function following this operation. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (out, min_out, max_out). out: A `Tensor` of type `Toutput`. min_out: A `Tensor` of type `float32`. max_out: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") if Tactivation is None: Tactivation = _dtypes.quint8 Tactivation = _execute.make_type(Tactivation, "Tactivation") _, _, _op = _op_def_lib._apply_op_helper( "QuantizedMatMul", a=a, b=b, min_a=min_a, max_a=max_a, min_b=min_b, max_b=max_b, Toutput=Toutput, transpose_a=transpose_a, transpose_b=transpose_b, Tactivation=Tactivation, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput", _op.get_attr("Toutput"), "transpose_a", _op.get_attr("transpose_a"), "transpose_b", _op.get_attr("transpose_b"), "Tactivation", _op.get_attr("Tactivation")) _execute.record_gradient( "QuantizedMatMul", _inputs_flat, _attrs, _result, name) _result = _QuantizedMatMulOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedMatMul", name, _ctx._post_execution_callbacks, a, b, min_a, max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation) _result = _QuantizedMatMulOutput._make(_result) return _result except _core._FallbackException: return quantized_mat_mul_eager_fallback( a, b, min_a, max_a, min_b, max_b, Toutput=Toutput, transpose_a=transpose_a, transpose_b=transpose_b, Tactivation=Tactivation, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_mat_mul """ _ctx = ctx if ctx else _context.context() if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") if Tactivation is None: Tactivation = _dtypes.quint8 Tactivation = _execute.make_type(Tactivation, "Tactivation") _attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx) _attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx) min_a = _ops.convert_to_tensor(min_a, _dtypes.float32) max_a = _ops.convert_to_tensor(max_a, _dtypes.float32) min_b = _ops.convert_to_tensor(min_b, _dtypes.float32) max_b = _ops.convert_to_tensor(max_b, _dtypes.float32) _inputs_flat = [a, b, min_a, max_a, min_b, max_b] _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a", transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation) _result = _execute.execute(b"QuantizedMatMul", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedMatMul", _inputs_flat, _attrs, _result, name) _result = _QuantizedMatMulOutput._make(_result) return _result _quantized_mul_outputs = ["z", "min_z", "max_z"] _QuantizedMulOutput = _collections.namedtuple( "QuantizedMul", _quantized_mul_outputs) def quantized_mul(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None): r"""Returns x * y element-wise, working on quantized buffers. Args: x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. min_x: A `Tensor` of type `float32`. The float value that the lowest quantized `x` value represents. max_x: A `Tensor` of type `float32`. The float value that the highest quantized `x` value represents. min_y: A `Tensor` of type `float32`. The float value that the lowest quantized `y` value represents. max_y: A `Tensor` of type `float32`. The float value that the highest quantized `y` value represents. Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (z, min_z, max_z). z: A `Tensor` of type `Toutput`. min_z: A `Tensor` of type `float32`. max_z: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") _, _, _op = _op_def_lib._apply_op_helper( "QuantizedMul", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y, max_y=max_y, Toutput=Toutput, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput", _op.get_attr("Toutput")) _execute.record_gradient( "QuantizedMul", _inputs_flat, _attrs, _result, name) _result = _QuantizedMulOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedMul", name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y, max_y, "Toutput", Toutput) _result = _QuantizedMulOutput._make(_result) return _result except _core._FallbackException: return quantized_mul_eager_fallback( x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function quantized_mul """ _ctx = ctx if ctx else _context.context() if Toutput is None: Toutput = _dtypes.qint32 Toutput = _execute.make_type(Toutput, "Toutput") _attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx) _attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx) min_x = _ops.convert_to_tensor(min_x, _dtypes.float32) max_x = _ops.convert_to_tensor(max_x, _dtypes.float32) min_y = _ops.convert_to_tensor(min_y, _dtypes.float32) max_y = _ops.convert_to_tensor(max_y, _dtypes.float32) _inputs_flat = [x, y, min_x, max_x, min_y, max_y] _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput) _result = _execute.execute(b"QuantizedMul", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "QuantizedMul", _inputs_flat, _attrs, _result, name) _result = _QuantizedMulOutput._make(_result) return _result def _range(start, limit, delta, name=None): r"""Creates a sequence of numbers. This operation creates a sequence of numbers that begins at `start` and extends by increments of `delta` up to but not including `limit`. For example: ``` # 'start' is 3 # 'limit' is 18 # 'delta' is 3 tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] ``` Args: start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`. 0-D (scalar). First entry in the sequence. limit: A `Tensor`. Must have the same type as `start`. 0-D (scalar). Upper limit of sequence, exclusive. delta: A `Tensor`. Must have the same type as `start`. 0-D (scalar). Optional. Default is 1. Number that increments `start`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `start`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Range", start=start, limit=limit, delta=delta, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Range", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Range", name, _ctx._post_execution_callbacks, start, limit, delta) return _result except _core._FallbackException: return _range_eager_fallback( start, limit, delta, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _range_eager_fallback(start, limit, delta, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _range """ _ctx = ctx if ctx else _context.context() _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([start, limit, delta], _ctx, _dtypes.int32) (start, limit, delta) = _inputs_Tidx _inputs_flat = [start, limit, delta] _attrs = ("Tidx", _attr_Tidx) _result = _execute.execute(b"Range", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Range", _inputs_flat, _attrs, _result, name) _result, = _result return _result def real(input, Tout=_dtypes.float32, name=None): r"""Returns the real part of a complex number. Given a tensor `input` of complex numbers, this operation returns a tensor of type `float` that is the real part of each element in `input`. All elements in `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real part returned by this operation and *b* is the imaginary part. For example: ``` # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] tf.real(input) ==> [-2.25, 3.25] ``` Args: input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`. Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`. name: A name for the operation (optional). Returns: A `Tensor` of type `Tout`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _, _, _op = _op_def_lib._apply_op_helper( "Real", input=input, Tout=Tout, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout")) _execute.record_gradient( "Real", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Real", name, _ctx._post_execution_callbacks, input, "Tout", Tout) return _result except _core._FallbackException: return real_eager_fallback( input, Tout=Tout, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def real_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function real """ _ctx = ctx if ctx else _context.context() if Tout is None: Tout = _dtypes.float32 Tout = _execute.make_type(Tout, "Tout") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64) _inputs_flat = [input] _attrs = ("T", _attr_T, "Tout", Tout) _result = _execute.execute(b"Real", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Real", _inputs_flat, _attrs, _result, name) _result, = _result return _result def real_div(x, y, name=None): r"""Returns x / y element-wise for real types. If `x` and `y` are reals, this will return the floating-point division. *NOTE*: `Div` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "RealDiv", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "RealDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "RealDiv", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return real_div_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def real_div_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function real_div """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"RealDiv", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "RealDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reciprocal_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reciprocal """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Reciprocal", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Reciprocal", _inputs_flat, _attrs, _result, name) _result, = _result return _result def reciprocal_grad(y, dy, name=None): r"""Computes the gradient for the inverse of `x` wrt its input. Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "ReciprocalGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "ReciprocalGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "ReciprocalGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return reciprocal_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def reciprocal_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function reciprocal_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"ReciprocalGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "ReciprocalGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result _requantization_range_outputs = ["output_min", "output_max"] _RequantizationRangeOutput = _collections.namedtuple( "RequantizationRange", _requantization_range_outputs) def requantization_range(input, input_min, input_max, name=None): r"""Given a quantized tensor described by (input, input_min, input_max), outputs a range that covers the actual values present in that tensor. This op is typically used to produce the requested_output_min and requested_output_max for Requantize. Args: input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. input_min: A `Tensor` of type `float32`. The float value that the minimum quantized input value represents. input_max: A `Tensor` of type `float32`. The float value that the maximum quantized input value represents. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output_min, output_max). output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "RequantizationRange", input=input, input_min=input_min, input_max=input_max, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tinput", _op.get_attr("Tinput")) _execute.record_gradient( "RequantizationRange", _inputs_flat, _attrs, _result, name) _result = _RequantizationRangeOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "RequantizationRange", name, _ctx._post_execution_callbacks, input, input_min, input_max) _result = _RequantizationRangeOutput._make(_result) return _result except _core._FallbackException: return requantization_range_eager_fallback( input, input_min, input_max, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def requantization_range_eager_fallback(input, input_min, input_max, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function requantization_range """ _ctx = ctx if ctx else _context.context() _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx) input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) _inputs_flat = [input, input_min, input_max] _attrs = ("Tinput", _attr_Tinput) _result = _execute.execute(b"RequantizationRange", 2, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "RequantizationRange", _inputs_flat, _attrs, _result, name) _result = _RequantizationRangeOutput._make(_result) return _result _requantize_outputs = ["output", "output_min", "output_max"] _RequantizeOutput = _collections.namedtuple( "Requantize", _requantize_outputs) def requantize(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None): r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the output range specified with 'requested_output_min' and 'requested_output_max'. [input_min, input_max] are scalar floats that specify the range for the float interpretation of the 'input' data. For example, if input_min is -1.0f and input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. Args: input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. input_min: A `Tensor` of type `float32`. The float value that the minimum quantized input value represents. input_max: A `Tensor` of type `float32`. The float value that the maximum quantized input value represents. requested_output_min: A `Tensor` of type `float32`. The float value that the minimum quantized output value represents. requested_output_max: A `Tensor` of type `float32`. The float value that the maximum quantized output value represents. out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. The type of the output. Should be a lower bit depth than Tinput. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (output, output_min, output_max). output: A `Tensor` of type `out_type`. output_min: A `Tensor` of type `float32`. output_max: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: out_type = _execute.make_type(out_type, "out_type") _, _, _op = _op_def_lib._apply_op_helper( "Requantize", input=input, input_min=input_min, input_max=input_max, requested_output_min=requested_output_min, requested_output_max=requested_output_max, out_type=out_type, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type", _op.get_attr("out_type")) _execute.record_gradient( "Requantize", _inputs_flat, _attrs, _result, name) _result = _RequantizeOutput._make(_result) return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Requantize", name, _ctx._post_execution_callbacks, input, input_min, input_max, requested_output_min, requested_output_max, "out_type", out_type) _result = _RequantizeOutput._make(_result) return _result except _core._FallbackException: return requantize_eager_fallback( input, input_min, input_max, requested_output_min, requested_output_max, out_type=out_type, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function requantize """ _ctx = ctx if ctx else _context.context() out_type = _execute.make_type(out_type, "out_type") _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx) input_min = _ops.convert_to_tensor(input_min, _dtypes.float32) input_max = _ops.convert_to_tensor(input_max, _dtypes.float32) requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32) requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32) _inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max] _attrs = ("Tinput", _attr_Tinput, "out_type", out_type) _result = _execute.execute(b"Requantize", 3, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Requantize", _inputs_flat, _attrs, _result, name) _result = _RequantizeOutput._make(_result) return _result def rint_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function rint """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Rint", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Rint", _inputs_flat, _attrs, _result, name) _result, = _result return _result def round(x, name=None): r"""Rounds the values of a tensor to the nearest integer, element-wise. Rounds half to even. Also known as bankers rounding. If you want to round according to the current system rounding mode use std::cint. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Round", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Round", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Round", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return round_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def round_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function round """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Round", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Round", _inputs_flat, _attrs, _result, name) _result, = _result return _result def rsqrt_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function rsqrt """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Rsqrt", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Rsqrt", _inputs_flat, _attrs, _result, name) _result, = _result return _result def rsqrt_grad(y, dy, name=None): r"""Computes the gradient for the rsqrt of `x` wrt its input. Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "RsqrtGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "RsqrtGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "RsqrtGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return rsqrt_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def rsqrt_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function rsqrt_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"RsqrtGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "RsqrtGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def segment_max_eager_fallback(data, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function segment_max """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _inputs_flat = [data, segment_ids] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"SegmentMax", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SegmentMax", _inputs_flat, _attrs, _result, name) _result, = _result return _result def segment_mean_eager_fallback(data, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function segment_mean """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _inputs_flat = [data, segment_ids] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"SegmentMean", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SegmentMean", _inputs_flat, _attrs, _result, name) _result, = _result return _result def segment_min_eager_fallback(data, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function segment_min """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _inputs_flat = [data, segment_ids] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"SegmentMin", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SegmentMin", _inputs_flat, _attrs, _result, name) _result, = _result return _result def segment_prod_eager_fallback(data, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function segment_prod """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _inputs_flat = [data, segment_ids] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"SegmentProd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SegmentProd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def segment_sum_eager_fallback(data, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function segment_sum """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _inputs_flat = [data, segment_ids] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) _result = _execute.execute(b"SegmentSum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SegmentSum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def select(condition, x, y, name=None): r"""Selects elements from `x` or `y`, depending on `condition`. The `x`, and `y` tensors must all have the same shape, and the output will also have that shape. The `condition` tensor must be a scalar if `x` and `y` are scalars. If `x` and `y` are vectors or higher rank, then `condition` must be either a scalar, a vector with size matching the first dimension of `x`, or must have the same shape as `x`. The `condition` tensor acts as a mask that chooses, based on the value at each element, whether the corresponding element / row in the output should be taken from `x` (if true) or `y` (if false). If `condition` is a vector and `x` and `y` are higher rank matrices, then it chooses which row (outer dimension) to copy from `x` and `y`. If `condition` has the same shape as `x` and `y`, then it chooses which element to copy from `x` and `y`. For example: ```python # 'condition' tensor is [[True, False] # [False, True]] # 't' is [[1, 2], # [3, 4]] # 'e' is [[5, 6], # [7, 8]] select(condition, t, e) # => [[1, 6], [7, 4]] # 'condition' tensor is [True, False] # 't' is [[1, 2], # [3, 4]] # 'e' is [[5, 6], # [7, 8]] select(condition, t, e) ==> [[1, 2], [7, 8]] ``` Args: condition: A `Tensor` of type `bool`. x: A `Tensor` which may have the same shape as `condition`. If `condition` is rank 1, `x` may have higher rank, but its first dimension must match the size of `condition`. y: A `Tensor` with the same type and shape as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `t`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Select", condition=condition, t=x, e=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Select", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Select", name, _ctx._post_execution_callbacks, condition, x, y) return _result except _core._FallbackException: return select_eager_fallback( condition, x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def select_eager_fallback(condition, x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function select """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T condition = _ops.convert_to_tensor(condition, _dtypes.bool) _inputs_flat = [condition, x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Select", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Select", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sigmoid(x, name=None): r"""Computes sigmoid of `x` element-wise. Specifically, `y = 1 / (1 + exp(-x))`. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Sigmoid", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Sigmoid", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Sigmoid", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return sigmoid_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sigmoid_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sigmoid """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sigmoid", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sigmoid", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sigmoid_grad(y, dy, name=None): r"""Computes the gradient of the sigmoid of `x` wrt its input. Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SigmoidGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "SigmoidGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SigmoidGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return sigmoid_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sigmoid_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sigmoid_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"SigmoidGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SigmoidGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sign(x, name=None): r"""Returns an element-wise indication of the sign of a number. `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Sign", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Sign", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Sign", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return sign_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sign_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sign """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sign", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sign", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sin_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sin """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sin", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sin", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sinh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sinh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sinh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sinh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_mat_mul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None): r"""Multiply matrix "a" by matrix "b". The inputs must be two-dimensional matrices and the inner dimension of "a" must match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not `SparseTensor`s. This op is optimized for the case where at least one of "a" or "b" is sparse, in the sense that they have a large proportion of zero values. The breakeven for using this versus a dense matrix multiply on one platform was 30% zero values in the sparse matrix. The gradient computation of this operation will only take advantage of sparsity in the input gradient when that gradient comes from a Relu. Args: a: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`. b: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`. transpose_a: An optional `bool`. Defaults to `False`. transpose_b: An optional `bool`. Defaults to `False`. a_is_sparse: An optional `bool`. Defaults to `False`. b_is_sparse: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") if a_is_sparse is None: a_is_sparse = False a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse") if b_is_sparse is None: b_is_sparse = False b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse") _, _, _op = _op_def_lib._apply_op_helper( "SparseMatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b", _op.get_attr("transpose_b"), "a_is_sparse", _op.get_attr("a_is_sparse"), "b_is_sparse", _op.get_attr("b_is_sparse"), "Ta", _op.get_attr("Ta"), "Tb", _op.get_attr("Tb")) _execute.record_gradient( "SparseMatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseMatMul", name, _ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse) return _result except _core._FallbackException: return sparse_mat_mul_eager_fallback( a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_mat_mul """ _ctx = ctx if ctx else _context.context() if transpose_a is None: transpose_a = False transpose_a = _execute.make_bool(transpose_a, "transpose_a") if transpose_b is None: transpose_b = False transpose_b = _execute.make_bool(transpose_b, "transpose_b") if a_is_sparse is None: a_is_sparse = False a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse") if b_is_sparse is None: b_is_sparse = False b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse") _attr_Ta, (a,) = _execute.args_to_matching_eager([a], _ctx, _dtypes.float32) _attr_Tb, (b,) = _execute.args_to_matching_eager([b], _ctx, _dtypes.float32) _inputs_flat = [a, b] _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", _attr_Ta, "Tb", _attr_Tb) _result = _execute.execute(b"SparseMatMul", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseMatMul", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_mean(data, indices, segment_ids, name=None): r"""Computes the mean along sparse segments of a tensor. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentMean", data=data, indices=indices, segment_ids=segment_ids, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "SparseSegmentMean", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentMean", name, _ctx._post_execution_callbacks, data, indices, segment_ids) return _result except _core._FallbackException: return sparse_segment_mean_eager_fallback( data, indices, segment_ids, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_mean_eager_fallback(data, indices, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_mean """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"SparseSegmentMean", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentMean", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0, name=None): r"""Computes gradients for SparseSegmentMean. Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0. Args: grad: A `Tensor`. Must be one of the following types: `float32`, `float64`. gradient propagated to the SparseSegmentMean op. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. indices passed to the corresponding SparseSegmentMean op. segment_ids: A `Tensor` of type `int32`. segment_ids passed to the corresponding SparseSegmentMean op. output_dim0: A `Tensor` of type `int32`. dimension 0 of "data" passed to SparseSegmentMean op. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `grad`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentMeanGrad", grad=grad, indices=indices, segment_ids=segment_ids, output_dim0=output_dim0, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentMeanGrad", name, _ctx._post_execution_callbacks, grad, indices, segment_ids, output_dim0) return _result except _core._FallbackException: return sparse_segment_mean_grad_eager_fallback( grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_mean_grad """ _ctx = ctx if ctx else _context.context() _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32) _inputs_flat = [grad, indices, segment_ids, output_dim0] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"SparseSegmentMeanGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments, name=None): r"""Computes the mean along sparse segments of a tensor. Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is misisng, the `output` tensor at that position will be zeroed. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. Should equal the number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentMeanWithNumSegments", data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"), "Tnumsegments", _op.get_attr("Tnumsegments")) _execute.record_gradient( "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentMeanWithNumSegments", name, _ctx._post_execution_callbacks, data, indices, segment_ids, num_segments) return _result except _core._FallbackException: return sparse_segment_mean_with_num_segments_eager_fallback( data, indices, segment_ids, num_segments, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_mean_with_num_segments """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"SparseSegmentMeanWithNumSegments", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_sqrt_n(data, indices, segment_ids, name=None): r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N. N is the size of the segment being reduced. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentSqrtN", data=data, indices=indices, segment_ids=segment_ids, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentSqrtN", name, _ctx._post_execution_callbacks, data, indices, segment_ids) return _result except _core._FallbackException: return sparse_segment_sqrt_n_eager_fallback( data, indices, segment_ids, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_sqrt_n """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"SparseSegmentSqrtN", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0, name=None): r"""Computes gradients for SparseSegmentSqrtN. Returns tensor "output" with same shape as grad, except for dimension 0 whose value is output_dim0. Args: grad: A `Tensor`. Must be one of the following types: `float32`, `float64`. gradient propagated to the SparseSegmentSqrtN op. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. indices passed to the corresponding SparseSegmentSqrtN op. segment_ids: A `Tensor` of type `int32`. segment_ids passed to the corresponding SparseSegmentSqrtN op. output_dim0: A `Tensor` of type `int32`. dimension 0 of "data" passed to SparseSegmentSqrtN op. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `grad`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentSqrtNGrad", grad=grad, indices=indices, segment_ids=segment_ids, output_dim0=output_dim0, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentSqrtNGrad", name, _ctx._post_execution_callbacks, grad, indices, segment_ids, output_dim0) return _result except _core._FallbackException: return sparse_segment_sqrt_n_grad_eager_fallback( grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_sqrt_n_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_sqrt_n_grad """ _ctx = ctx if ctx else _context.context() _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32) _inputs_flat = [grad, indices, segment_ids, output_dim0] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"SparseSegmentSqrtNGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments, name=None): r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N. N is the size of the segment being reduced. Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is misisng, the `output` tensor at that position will be zeroed. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. Should equal the number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentSqrtNWithNumSegments", data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"), "Tnumsegments", _op.get_attr("Tnumsegments")) _execute.record_gradient( "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentSqrtNWithNumSegments", name, _ctx._post_execution_callbacks, data, indices, segment_ids, num_segments) return _result except _core._FallbackException: return sparse_segment_sqrt_n_with_num_segments_eager_fallback( data, indices, segment_ids, num_segments, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_sqrt_n_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_sqrt_n_with_num_segments """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"SparseSegmentSqrtNWithNumSegments", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_sum(data, indices, segment_ids, name=None): r"""Computes the sum along sparse segments of a tensor. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first dimension, selecting a subset of dimension 0, specified by `indices`. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) # Select two rows, one segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) # => [[0 0 0 0]] # Select two rows, two segment. tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) # => [[ 1 2 3 4] # [-1 -2 -3 -4]] # Select all rows, two segments. tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) # => [[0 0 0 0] # [5 6 7 8]] # Which is equivalent to: tf.segment_sum(c, tf.constant([0, 0, 1])) ``` Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentSum", data=data, indices=indices, segment_ids=segment_ids, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "SparseSegmentSum", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentSum", name, _ctx._post_execution_callbacks, data, indices, segment_ids) return _result except _core._FallbackException: return sparse_segment_sum_eager_fallback( data, indices, segment_ids, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_sum_eager_fallback(data, indices, segment_ids, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_sum """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"SparseSegmentSum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentSum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments, name=None): r"""Computes the sum along sparse segments of a tensor. Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is misisng, the `output` tensor at that position will be zeroed. Read @{$math_ops#Segmentation$the section on segmentation} for an explanation of segments. For example: ```python c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) tf.sparse_segment_sum_with_num_segments( c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) # => [[0 0 0 0] # [0 0 0 0] # [0 0 0 0]] tf.sparse_segment_sum_with_num_segments(c, tf.constant([0, 1]), tf.constant([0, 2], num_segments=4)) # => [[ 1 2 3 4] # [ 0 0 0 0] # [-1 -2 -3 -4] # [ 0 0 0 0]] ``` Args: data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A 1-D tensor. Has same rank as `segment_ids`. segment_ids: A `Tensor` of type `int32`. A 1-D tensor. Values should be sorted and can be repeated. num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`. Should equal the number of distinct segment IDs. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `data`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SparseSegmentSumWithNumSegments", data=data, indices=indices, segment_ids=segment_ids, num_segments=num_segments, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"), "Tnumsegments", _op.get_attr("Tnumsegments")) _execute.record_gradient( "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SparseSegmentSumWithNumSegments", name, _ctx._post_execution_callbacks, data, indices, segment_ids, num_segments) return _result except _core._FallbackException: return sparse_segment_sum_with_num_segments_eager_fallback( data, indices, segment_ids, num_segments, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sparse_segment_sum_with_num_segments """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32) _inputs_flat = [data, indices, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"SparseSegmentSumWithNumSegments", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sqrt(x, name=None): r"""Computes square root of x element-wise. I.e., \\(y = \sqrt{x} = x^{1/2}\\). Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Sqrt", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Sqrt", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Sqrt", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return sqrt_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sqrt_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sqrt """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sqrt", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sqrt", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sqrt_grad(y, dy, name=None): r"""Computes the gradient for the sqrt of `x` wrt its input. Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "SqrtGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "SqrtGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "SqrtGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return sqrt_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sqrt_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sqrt_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"SqrtGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SqrtGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def square(x, name=None): r"""Computes square of x element-wise. I.e., \\(y = x * x = x^2\\). Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Square", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Square", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Square", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return square_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def square_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function square """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Square", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Square", _inputs_flat, _attrs, _result, name) _result, = _result return _result def squared_difference_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function squared_difference """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"SquaredDifference", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "SquaredDifference", _inputs_flat, _attrs, _result, name) _result, = _result return _result def sub(x, y, name=None): r"""Returns x - y element-wise. *NOTE*: `Subtract` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Sub", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Sub", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Sub", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return sub_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def sub_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function sub """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"Sub", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sub", _inputs_flat, _attrs, _result, name) _result, = _result return _result def _sum(input, axis, keep_dims=False, name=None): r"""Computes the sum of elements across dimensions of a tensor. Reduces `input` along the dimensions given in `axis`. Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keep_dims` is true, the reduced dimensions are retained with length 1. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. The tensor to reduce. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The dimensions to reduce. Must be in the range `[-rank(input), rank(input))`. keep_dims: An optional `bool`. Defaults to `False`. If true, retain reduced dimensions with length 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _, _, _op = _op_def_lib._apply_op_helper( "Sum", input=input, reduction_indices=axis, keep_dims=keep_dims, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx")) _execute.record_gradient( "Sum", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Sum", name, _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims) return _result except _core._FallbackException: return _sum_eager_fallback( input, axis, keep_dims=keep_dims, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def _sum_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function _sum """ _ctx = ctx if ctx else _context.context() if keep_dims is None: keep_dims = False keep_dims = _execute.make_bool(keep_dims, "keep_dims") _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"Sum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Sum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tan_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tan """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Tan", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Tan", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tanh(x, name=None): r"""Computes hyperbolic tangent of `x` element-wise. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "Tanh", x=x, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "Tanh", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "Tanh", name, _ctx._post_execution_callbacks, x) return _result except _core._FallbackException: return tanh_eager_fallback( x, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def tanh_eager_fallback(x, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tanh """ _ctx = ctx if ctx else _context.context() _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"Tanh", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Tanh", _inputs_flat, _attrs, _result, name) _result, = _result return _result def tanh_grad(y, dy, name=None): r"""Computes the gradient for the tanh of `x` wrt its input. Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy` is the corresponding input gradient. Args: y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`. dy: A `Tensor`. Must have the same type as `y`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `y`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "TanhGrad", y=y, dy=dy, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "TanhGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TanhGrad", name, _ctx._post_execution_callbacks, y, dy) return _result except _core._FallbackException: return tanh_grad_eager_fallback( y, dy, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def tanh_grad_eager_fallback(y, dy, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function tanh_grad """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx) (y, dy) = _inputs_T _inputs_flat = [y, dy] _attrs = ("T", _attr_T) _result = _execute.execute(b"TanhGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TanhGrad", _inputs_flat, _attrs, _result, name) _result, = _result return _result def truncate_div(x, y, name=None): r"""Returns x / y element-wise for integer types. Truncation designates that negative numbers will round fractional quantities toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different than Python semantics. See `FloorDiv` for a division function that matches Python Semantics. *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "TruncateDiv", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "TruncateDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TruncateDiv", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return truncate_div_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def truncate_div_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function truncate_div """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"TruncateDiv", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TruncateDiv", _inputs_flat, _attrs, _result, name) _result, = _result return _result def truncate_mod(x, y, name=None): r"""Returns element-wise remainder of division. This emulates C semantics in that the result here is consistent with a truncating divide. E.g. `truncate(x / y) * y + truncate_mod(x, y) = x`. *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) Args: x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`. y: A `Tensor`. Must have the same type as `x`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context if _ctx is None or not _ctx._eager_context.is_eager: _, _, _op = _op_def_lib._apply_op_helper( "TruncateMod", x=x, y=y, name=name) _result = _op.outputs[:] _inputs_flat = _op.inputs _attrs = ("T", _op.get_attr("T")) _execute.record_gradient( "TruncateMod", _inputs_flat, _attrs, _result, name) _result, = _result return _result else: try: _result = _pywrap_tensorflow.TFE_Py_FastPathExecute( _ctx._context_handle, _ctx._eager_context.device_name, "TruncateMod", name, _ctx._post_execution_callbacks, x, y) return _result except _core._FallbackException: return truncate_mod_eager_fallback( x, y, name=name, ctx=_ctx) except _core._NotOkStatusException as e: if name is not None: message = e.message + " name: " + name else: message = e.message _six.raise_from(_core._status_to_exception(e.code, message), None) def truncate_mod_eager_fallback(x, y, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function truncate_mod """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T) _result = _execute.execute(b"TruncateMod", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "TruncateMod", _inputs_flat, _attrs, _result, name) _result, = _result return _result def unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unsorted_segment_max """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) _inputs_flat = [data, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"UnsortedSegmentMax", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UnsortedSegmentMax", _inputs_flat, _attrs, _result, name) _result, = _result return _result def unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unsorted_segment_min """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) _inputs_flat = [data, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"UnsortedSegmentMin", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UnsortedSegmentMin", _inputs_flat, _attrs, _result, name) _result, = _result return _result def unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unsorted_segment_prod """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) _inputs_flat = [data, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"UnsortedSegmentProd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UnsortedSegmentProd", _inputs_flat, _attrs, _result, name) _result, = _result return _result def unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function unsorted_segment_sum """ _ctx = ctx if ctx else _context.context() _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx) _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx) _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32) _inputs_flat = [data, segment_ids, num_segments] _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments", _attr_Tnumsegments) _result = _execute.execute(b"UnsortedSegmentSum", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "UnsortedSegmentSum", _inputs_flat, _attrs, _result, name) _result, = _result return _result def zeta_eager_fallback(x, q, name=None, ctx=None): r"""This is the slowpath function for Eager mode. This is for function zeta """ _ctx = ctx if ctx else _context.context() _attr_T, _inputs_T = _execute.args_to_matching_eager([x, q], _ctx) (x, q) = _inputs_T _inputs_flat = [x, q] _attrs = ("T", _attr_T) _result = _execute.execute(b"Zeta", 1, inputs=_inputs_flat, attrs=_attrs, ctx=_ctx, name=name) _execute.record_gradient( "Zeta", _inputs_flat, _attrs, _result, name) _result, = _result return _result # op { # name: "Abs" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "AccumulateNV2" # input_arg { # name: "inputs" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "sum" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "shape" # type: "shape" # } # is_aggregate: true # is_commutative: true # } # op { # name: "Acos" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Acosh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Add" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_STRING # } # } # } # } # op { # name: "AddN" # input_arg { # name: "inputs" # type_attr: "T" # number_attr: "N" # } # output_arg { # name: "sum" # type_attr: "T" # } # attr { # name: "N" # type: "int" # has_minimum: true # minimum: 1 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # type: DT_VARIANT # } # } # } # is_aggregate: true # is_commutative: true # } # op { # name: "AddV2" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # is_aggregate: true # is_commutative: true # } # op { # name: "All" # input_arg { # name: "input" # type: DT_BOOL # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type: DT_BOOL # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Angle" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "Tout" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # attr { # name: "Tout" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Any" # input_arg { # name: "input" # type: DT_BOOL # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type: DT_BOOL # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ApproximateEqual" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "tolerance" # type: "float" # default_value { # f: 1e-05 # } # } # is_commutative: true # } # op { # name: "ArgMax" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "dimension" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "output_type" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "output_type" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "ArgMin" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "dimension" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "output_type" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "output_type" # type: "type" # default_value { # type: DT_INT64 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Asin" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Asinh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Atan" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Atan2" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Atanh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "BatchMatMul" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # attr { # name: "adj_x" # type: "bool" # default_value { # b: false # } # } # attr { # name: "adj_y" # type: "bool" # default_value { # b: false # } # } # } # op { # name: "BesselI0e" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "BesselI1e" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Betainc" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "b" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Bincount" # input_arg { # name: "arr" # type: DT_INT32 # } # input_arg { # name: "size" # type: DT_INT32 # } # input_arg { # name: "weights" # type_attr: "T" # } # output_arg { # name: "bins" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Bucketize" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type: DT_INT32 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "boundaries" # type: "list(float)" # } # } # op { # name: "Cast" # input_arg { # name: "x" # type_attr: "SrcT" # } # output_arg { # name: "y" # type_attr: "DstT" # } # attr { # name: "SrcT" # type: "type" # } # attr { # name: "DstT" # type: "type" # } # } # op { # name: "Ceil" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "ClipByValue" # input_arg { # name: "t" # type_attr: "T" # } # input_arg { # name: "clip_value_min" # type_attr: "T" # } # input_arg { # name: "clip_value_max" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "CompareAndBitpack" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "threshold" # type_attr: "T" # } # output_arg { # name: "output" # type: DT_UINT8 # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BOOL # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT8 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Complex" # input_arg { # name: "real" # type_attr: "T" # } # input_arg { # name: "imag" # type_attr: "T" # } # output_arg { # name: "out" # type_attr: "Tout" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tout" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "ComplexAbs" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "Tout" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # attr { # name: "Tout" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Conj" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # type: DT_VARIANT # } # } # } # } # op { # name: "Cos" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Cosh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Cross" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "b" # type_attr: "T" # } # output_arg { # name: "product" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "Cumprod" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "axis" # type_attr: "Tidx" # } # output_arg { # name: "out" # type_attr: "T" # } # attr { # name: "exclusive" # type: "bool" # default_value { # b: false # } # } # attr { # name: "reverse" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Cumsum" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "axis" # type_attr: "Tidx" # } # output_arg { # name: "out" # type_attr: "T" # } # attr { # name: "exclusive" # type: "bool" # default_value { # b: false # } # } # attr { # name: "reverse" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Digamma" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Div" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Equal" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_QUINT8 # type: DT_QINT8 # type: DT_QINT32 # type: DT_STRING # type: DT_BOOL # type: DT_COMPLEX128 # } # } # } # is_commutative: true # } # op { # name: "Erf" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Erfc" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Exp" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Expm1" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Floor" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "FloorDiv" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "FloorMod" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Greater" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "GreaterEqual" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "HistogramFixedWidth" # input_arg { # name: "values" # type_attr: "T" # } # input_arg { # name: "value_range" # type_attr: "T" # } # input_arg { # name: "nbins" # type: DT_INT32 # } # output_arg { # name: "out" # type_attr: "dtype" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "dtype" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Igamma" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "IgammaGradA" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Igammac" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Imag" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "Tout" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # attr { # name: "Tout" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Inv" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "InvGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "IsFinite" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "IsInf" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "IsNan" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Less" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "LessEqual" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # } # op { # name: "Lgamma" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "LinSpace" # input_arg { # name: "start" # type_attr: "T" # } # input_arg { # name: "stop" # type_attr: "T" # } # input_arg { # name: "num" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Log" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Log1p" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "LogicalAnd" # input_arg { # name: "x" # type: DT_BOOL # } # input_arg { # name: "y" # type: DT_BOOL # } # output_arg { # name: "z" # type: DT_BOOL # } # is_commutative: true # } # op { # name: "LogicalNot" # input_arg { # name: "x" # type: DT_BOOL # } # output_arg { # name: "y" # type: DT_BOOL # } # } # op { # name: "LogicalOr" # input_arg { # name: "x" # type: DT_BOOL # } # input_arg { # name: "y" # type: DT_BOOL # } # output_arg { # name: "z" # type: DT_BOOL # } # is_commutative: true # } # op { # name: "MatMul" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "b" # type_attr: "T" # } # output_arg { # name: "product" # type_attr: "T" # } # attr { # name: "transpose_a" # type: "bool" # default_value { # b: false # } # } # attr { # name: "transpose_b" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Max" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Maximum" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # } # } # } # is_commutative: true # } # op { # name: "Mean" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Min" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Minimum" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # } # } # } # is_commutative: true # } # op { # name: "Mod" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_HALF # type: DT_HALF # type: DT_BFLOAT16 # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Mul" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # is_commutative: true # } # op { # name: "Neg" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "NotEqual" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type: DT_BOOL # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_QUINT8 # type: DT_QINT8 # type: DT_QINT32 # type: DT_STRING # type: DT_BOOL # type: DT_COMPLEX128 # } # } # } # is_commutative: true # } # op { # name: "Polygamma" # input_arg { # name: "a" # type_attr: "T" # } # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Pow" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_FLOAT # type: DT_HALF # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Prod" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "QuantizeDownAndShrinkRange" # input_arg { # name: "input" # type_attr: "Tinput" # } # input_arg { # name: "input_min" # type: DT_FLOAT # } # input_arg { # name: "input_max" # type: DT_FLOAT # } # output_arg { # name: "output" # type_attr: "out_type" # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "Tinput" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "out_type" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # } # op { # name: "QuantizedAdd" # input_arg { # name: "x" # type_attr: "T1" # } # input_arg { # name: "y" # type_attr: "T2" # } # input_arg { # name: "min_x" # type: DT_FLOAT # } # input_arg { # name: "max_x" # type: DT_FLOAT # } # input_arg { # name: "min_y" # type: DT_FLOAT # } # input_arg { # name: "max_y" # type: DT_FLOAT # } # output_arg { # name: "z" # type_attr: "Toutput" # } # output_arg { # name: "min_z" # type: DT_FLOAT # } # output_arg { # name: "max_z" # type: DT_FLOAT # } # attr { # name: "T1" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "T2" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "Toutput" # type: "type" # default_value { # type: DT_QINT32 # } # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # is_commutative: true # } # op { # name: "QuantizedMatMul" # input_arg { # name: "a" # type_attr: "T1" # } # input_arg { # name: "b" # type_attr: "T2" # } # input_arg { # name: "min_a" # type: DT_FLOAT # } # input_arg { # name: "max_a" # type: DT_FLOAT # } # input_arg { # name: "min_b" # type: DT_FLOAT # } # input_arg { # name: "max_b" # type: DT_FLOAT # } # output_arg { # name: "out" # type_attr: "Toutput" # } # output_arg { # name: "min_out" # type: DT_FLOAT # } # output_arg { # name: "max_out" # type: DT_FLOAT # } # attr { # name: "T1" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "T2" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "Toutput" # type: "type" # default_value { # type: DT_QINT32 # } # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "transpose_a" # type: "bool" # default_value { # b: false # } # } # attr { # name: "transpose_b" # type: "bool" # default_value { # b: false # } # } # attr { # name: "Tactivation" # type: "type" # default_value { # type: DT_QUINT8 # } # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # } # op { # name: "QuantizedMul" # input_arg { # name: "x" # type_attr: "T1" # } # input_arg { # name: "y" # type_attr: "T2" # } # input_arg { # name: "min_x" # type: DT_FLOAT # } # input_arg { # name: "max_x" # type: DT_FLOAT # } # input_arg { # name: "min_y" # type: DT_FLOAT # } # input_arg { # name: "max_y" # type: DT_FLOAT # } # output_arg { # name: "z" # type_attr: "Toutput" # } # output_arg { # name: "min_z" # type: DT_FLOAT # } # output_arg { # name: "max_z" # type: DT_FLOAT # } # attr { # name: "T1" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "T2" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "Toutput" # type: "type" # default_value { # type: DT_QINT32 # } # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # is_commutative: true # } # op { # name: "Range" # input_arg { # name: "start" # type_attr: "Tidx" # } # input_arg { # name: "limit" # type_attr: "Tidx" # } # input_arg { # name: "delta" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "Tidx" # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Real" # input_arg { # name: "input" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "Tout" # } # attr { # name: "T" # type: "type" # default_value { # type: DT_COMPLEX64 # } # allowed_values { # list { # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # attr { # name: "Tout" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "RealDiv" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Reciprocal" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "ReciprocalGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "RequantizationRange" # input_arg { # name: "input" # type_attr: "Tinput" # } # input_arg { # name: "input_min" # type: DT_FLOAT # } # input_arg { # name: "input_max" # type: DT_FLOAT # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "Tinput" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # } # op { # name: "Requantize" # input_arg { # name: "input" # type_attr: "Tinput" # } # input_arg { # name: "input_min" # type: DT_FLOAT # } # input_arg { # name: "input_max" # type: DT_FLOAT # } # input_arg { # name: "requested_output_min" # type: DT_FLOAT # } # input_arg { # name: "requested_output_max" # type: DT_FLOAT # } # output_arg { # name: "output" # type_attr: "out_type" # } # output_arg { # name: "output_min" # type: DT_FLOAT # } # output_arg { # name: "output_max" # type: DT_FLOAT # } # attr { # name: "Tinput" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # attr { # name: "out_type" # type: "type" # allowed_values { # list { # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_QINT16 # type: DT_QUINT16 # } # } # } # } # op { # name: "Rint" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "Round" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Rsqrt" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "RsqrtGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "SegmentMax" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SegmentMean" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SegmentMin" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SegmentProd" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SegmentSum" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Select" # input_arg { # name: "condition" # type: DT_BOOL # } # input_arg { # name: "t" # type_attr: "T" # } # input_arg { # name: "e" # type_attr: "T" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # } # } # op { # name: "Sigmoid" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "SigmoidGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Sign" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Sin" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Sinh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "SparseMatMul" # input_arg { # name: "a" # type_attr: "Ta" # } # input_arg { # name: "b" # type_attr: "Tb" # } # output_arg { # name: "product" # type: DT_FLOAT # } # attr { # name: "transpose_a" # type: "bool" # default_value { # b: false # } # } # attr { # name: "transpose_b" # type: "bool" # default_value { # b: false # } # } # attr { # name: "a_is_sparse" # type: "bool" # default_value { # b: false # } # } # attr { # name: "b_is_sparse" # type: "bool" # default_value { # b: false # } # } # attr { # name: "Ta" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_BFLOAT16 # } # } # } # attr { # name: "Tb" # type: "type" # default_value { # type: DT_FLOAT # } # allowed_values { # list { # type: DT_FLOAT # type: DT_BFLOAT16 # } # } # } # } # op { # name: "SparseSegmentMean" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentMeanGrad" # input_arg { # name: "grad" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # input_arg { # name: "output_dim0" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentMeanWithNumSegments" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentSqrtN" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentSqrtNGrad" # input_arg { # name: "grad" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # input_arg { # name: "output_dim0" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentSqrtNWithNumSegments" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentSum" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "SparseSegmentSumWithNumSegments" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "indices" # type_attr: "Tidx" # } # input_arg { # name: "segment_ids" # type: DT_INT32 # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Sqrt" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "SqrtGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Square" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "SquaredDifference" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # is_commutative: true # } # op { # name: "Sub" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Sum" # input_arg { # name: "input" # type_attr: "T" # } # input_arg { # name: "reduction_indices" # type_attr: "Tidx" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "keep_dims" # type: "bool" # default_value { # b: false # } # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tidx" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Tan" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "Tanh" # input_arg { # name: "x" # type_attr: "T" # } # output_arg { # name: "y" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "TanhGrad" # input_arg { # name: "y" # type_attr: "T" # } # input_arg { # name: "dy" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "TruncateDiv" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # type: DT_UINT8 # type: DT_INT8 # type: DT_UINT16 # type: DT_INT16 # type: DT_INT32 # type: DT_INT64 # type: DT_COMPLEX64 # type: DT_COMPLEX128 # } # } # } # } # op { # name: "TruncateMod" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "y" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_HALF # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } # op { # name: "UnsortedSegmentMax" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UnsortedSegmentMin" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_INT64 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UnsortedSegmentProd" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "UnsortedSegmentSum" # input_arg { # name: "data" # type_attr: "T" # } # input_arg { # name: "segment_ids" # type_attr: "Tindices" # } # input_arg { # name: "num_segments" # type_attr: "Tnumsegments" # } # output_arg { # name: "output" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # type: DT_INT32 # type: DT_UINT8 # type: DT_INT16 # type: DT_INT8 # type: DT_COMPLEX64 # type: DT_INT64 # type: DT_QINT8 # type: DT_QUINT8 # type: DT_QINT32 # type: DT_BFLOAT16 # type: DT_UINT16 # type: DT_COMPLEX128 # type: DT_HALF # type: DT_UINT32 # type: DT_UINT64 # } # } # } # attr { # name: "Tindices" # type: "type" # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # attr { # name: "Tnumsegments" # type: "type" # default_value { # type: DT_INT32 # } # allowed_values { # list { # type: DT_INT32 # type: DT_INT64 # } # } # } # } # op { # name: "Zeta" # input_arg { # name: "x" # type_attr: "T" # } # input_arg { # name: "q" # type_attr: "T" # } # output_arg { # name: "z" # type_attr: "T" # } # attr { # name: "T" # type: "type" # allowed_values { # list { # type: DT_FLOAT # type: DT_DOUBLE # } # } # } # } _op_def_lib = _InitOpDefLibrary(b"\n,\n\003Abs\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\no\n\rAccumulateNV2\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\200\001\001\220\001\001\n/\n\004Acos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Acosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\003Add\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\005\003\t\010\022\007\nW\n\004AddN\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\025\200\001\001\220\001\001\nA\n\005AddV2\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\032\n\001T\022\004type:\017\n\r2\013\016\023\001\002\004\006\005\003\t\010\022\200\001\001\220\001\001\nh\n\003All\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nT\n\005Angle\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\nh\n\003Any\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\ni\n\020ApproximateEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\ttolerance\022\005float\032\005%\254\305\'7\220\001\001\n\233\001\n\006ArgMax\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n\233\001\n\006ArgMin\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n/\n\004Asin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Asinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Atan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n4\n\005Atan2\022\006\n\001y\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n.\n\005Atanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nh\n\013BatchMatMul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\"\021\n\005adj_x\022\004bool\032\002(\000\"\021\n\005adj_y\022\004bool\032\002(\000\n0\n\tBesselI0e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\tBesselI1e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n<\n\007Betainc\022\006\n\001a\"\001T\022\006\n\001b\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nK\n\010Bincount\022\007\n\003arr\030\003\022\010\n\004size\030\003\022\014\n\007weights\"\001T\032\t\n\004bins\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\nS\n\tBucketize\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\nboundaries\022\013list(float)\n8\n\004Cast\022\t\n\001x\"\004SrcT\032\t\n\001y\"\004DstT\"\014\n\004SrcT\022\004type\"\014\n\004DstT\022\004type\n+\n\004Ceil\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\nn\n\013ClipByValue\022\006\n\001t\"\001T\022\023\n\016clip_value_min\"\001T\022\023\n\016clip_value_max\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\nT\n\021CompareAndBitpack\022\n\n\005input\"\001T\022\016\n\tthreshold\"\001T\032\n\n\006output\030\004\"\027\n\001T\022\004type:\014\n\n2\010\n\023\001\002\006\005\003\t\n]\n\007Complex\022\t\n\004real\"\001T\022\t\n\004imag\"\001T\032\013\n\003out\"\004Tout\"\025\n\001T\022\004type\032\0020\001:\006\n\0042\002\001\002\"\030\n\004Tout\022\004type\032\0020\010:\006\n\0042\002\010\022\nP\n\nComplexAbs\022\006\n\001x\"\001T\032\t\n\001y\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n7\n\004Conj\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type\032\0020\010:\007\n\0052\003\010\022\025\n,\n\003Cos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Cosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\005Cross\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\221\001\n\007Cumprod\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\006Cumsum\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\007Digamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\003Div\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\nB\n\005Equal\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n*\n\003Erf\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\004Erfc\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n,\n\003Exp\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Expm1\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n,\n\005Floor\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n?\n\010FloorDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n9\n\010FloorMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n=\n\007Greater\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nB\n\014GreaterEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n}\n\023HistogramFixedWidth\022\013\n\006values\"\001T\022\020\n\013value_range\"\001T\022\t\n\005nbins\030\003\032\014\n\003out\"\005dtype\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\005dtype\022\004type\032\0020\003:\006\n\0042\002\003\t\n3\n\006Igamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n8\n\013IgammaGradA\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n4\n\007Igammac\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nS\n\004Imag\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n.\n\003Inv\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n9\n\007InvGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\010IsFinite\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsInf\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsNan\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\004Less\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n?\n\tLessEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n-\n\006Lgamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\ni\n\010LinSpace\022\n\n\005start\"\001T\022\t\n\004stop\"\001T\022\013\n\003num\"\004Tidx\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\016\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n,\n\003Log\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Log1p\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n$\n\nLogicalAnd\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\n\032\n\nLogicalNot\022\005\n\001x\030\n\032\005\n\001y\030\n\n#\n\tLogicalOr\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\np\n\006MatMul\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\n\214\001\n\003Max\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Maximum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n\215\001\n\004Mean\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\003Min\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Minimum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n5\n\003Mod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\003\t\023\023\016\001\002\n=\n\003Mul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\220\001\001\n.\n\003Neg\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nE\n\010NotEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n6\n\tPolygamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n6\n\003Pow\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\001\023\002\003\t\010\022\n\215\001\n\004Prod\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\267\001\n\032QuantizeDownAndShrinkRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedAdd\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\n\235\002\n\017QuantizedMatMul\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\"\n\013Tactivation\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedMul\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\na\n\005Range\022\r\n\005start\"\004Tidx\022\r\n\005limit\"\004Tidx\022\r\n\005delta\"\004Tidx\032\016\n\006output\"\004Tidx\"\033\n\004Tidx\022\004type\032\0020\003:\t\n\0072\005\016\001\002\003\t\nS\n\004Real\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n>\n\007RealDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\nReciprocal\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n@\n\016ReciprocalGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\177\n\023RequantizationRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\001\n\nRequantize\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\022\030\n\024requested_output_min\030\001\022\030\n\024requested_output_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n+\n\004Rint\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\005Round\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Rsqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n;\n\tRsqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nt\n\nSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentMean\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\nSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\ny\n\nSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n?\n\006Select\022\r\n\tcondition\030\n\022\006\n\001t\"\001T\022\006\n\001e\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n0\n\007Sigmoid\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n=\n\013SigmoidGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Sign\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n,\n\003Sin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Sinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\301\001\n\014SparseMatMul\022\007\n\001a\"\002Ta\022\007\n\001b\"\002Tb\032\013\n\007product\030\001\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\027\n\013a_is_sparse\022\004bool\032\002(\000\"\027\n\013b_is_sparse\022\004bool\032\002(\000\"\026\n\002Ta\022\004type\032\0020\001:\006\n\0042\002\001\016\"\026\n\002Tb\022\004type\032\0020\001:\006\n\0042\002\001\016\nz\n\021SparseSegmentMean\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\217\001\n\025SparseSegmentMeanGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\311\001\n SparseSegmentMeanWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n{\n\022SparseSegmentSqrtN\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\026SparseSegmentSqrtNGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\312\001\n!SparseSegmentSqrtNWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\001\n\020SparseSegmentSum\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\322\001\n\037SparseSegmentSumWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n-\n\004Sqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010SqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n1\n\006Square\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nG\n\021SquaredDifference\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\220\001\001\n:\n\003Sub\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n\214\001\n\003Sum\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\003Tan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n-\n\004Tanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010TanhGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\013TruncateDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n<\n\013TruncateMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n\274\001\n\022UnsortedSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\022UnsortedSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\302\001\n\023UnsortedSegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\301\001\n\022UnsortedSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n1\n\004Zeta\022\006\n\001x\"\001T\022\006\n\001q\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002")
[ 37811, 37906, 7917, 11799, 1088, 309, 22854, 37535, 39628, 13, 198, 198, 1212, 2393, 318, 337, 16219, 8881, 24700, 1137, 11617, 0, 2141, 407, 4370, 13, 198, 20556, 327, 4880, 2723, 2393, 25, 10688, 62, 2840, 13, 535, 198, 37811, 198, ...
2.094776
167,985
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. from uuid import uuid4 from sqlalchemy.dialects.postgresql import ARRAY, UUID from sqlalchemy.ext.declarative import declared_attr from werkzeug.urls import url_parse from indico.core.db import db from indico.core.db.sqlalchemy import PyIntEnum from indico.modules.oauth import logger from indico.util.i18n import _ from indico.util.struct.enum import IndicoEnum SCOPES = {'read:user': _("User information (read only)"), 'read:legacy_api': _('Legacy API (read only)'), 'write:legacy_api': _('Legacy API (write only)'), 'registrants': _('Event registrants')} def reset_client_secret(self): self.client_secret = str(uuid4()) logger.info("Client secret for %s has been reset.", self) def validate_redirect_uri(self, redirect_uri): """Called by flask-oauthlib to validate the redirect_uri. Uses a logic similar to the one at GitHub, i.e. protocol and host/port must match exactly and if there is a path in the whitelisted URL, the path of the redirect_uri must start with that path. """ uri_data = url_parse(redirect_uri) for valid_uri_data in map(url_parse, self.redirect_uris): if (uri_data.scheme == valid_uri_data.scheme and uri_data.netloc == valid_uri_data.netloc and uri_data.path.startswith(valid_uri_data.path)): return True return False
[ 2, 770, 2393, 318, 636, 286, 1423, 3713, 13, 198, 2, 15069, 357, 34, 8, 6244, 532, 33448, 327, 28778, 198, 2, 198, 2, 1423, 3713, 318, 1479, 3788, 26, 345, 460, 17678, 4163, 340, 290, 14, 273, 198, 2, 13096, 340, 739, 262, 2846,...
2.570533
638
import paddle.fluid as fluid def loss(x, y, clip_value=10.0): """Calculate the sigmoid cross entropy with logits for input(x). Args: x: Variable with shape with shape [batch, dim] y: Input label Returns: loss: cross entropy logits: prediction """ logits = fluid.layers.fc( input=x, size=1, bias_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(0.))) loss = fluid.layers.sigmoid_cross_entropy_with_logits(x=logits, label=y) loss = fluid.layers.reduce_mean( fluid.layers.clip( loss, min=-clip_value, max=clip_value)) return loss, logits def ffn(input, d_inner_hid, d_hid, name=None): """Position-wise Feed-Forward Network """ hidden = fluid.layers.fc(input=input, size=d_inner_hid, num_flatten_dims=2, param_attr=fluid.ParamAttr(name=name + '_fc.w_0'), bias_attr=fluid.ParamAttr( name=name + '_fc.b_0', initializer=fluid.initializer.Constant(0.)), act="relu") out = fluid.layers.fc(input=hidden, size=d_hid, num_flatten_dims=2, param_attr=fluid.ParamAttr(name=name + '_fc.w_1'), bias_attr=fluid.ParamAttr( name=name + '_fc.b_1', initializer=fluid.initializer.Constant(0.))) return out def dot_product_attention(query, key, value, d_key, q_mask=None, k_mask=None, dropout_rate=None, mask_cache=None): """Dot product layer. Args: query: a tensor with shape [batch, Q_time, Q_dimension] key: a tensor with shape [batch, time, K_dimension] value: a tensor with shape [batch, time, V_dimension] q_lengths: a tensor with shape [batch] k_lengths: a tensor with shape [batch] Returns: a tensor with shape [batch, query_time, value_dimension] Raises: AssertionError: if Q_dimension not equal to K_dimension when attention type is dot. """ logits = fluid.layers.matmul( x=query, y=key, transpose_y=True, alpha=d_key**(-0.5)) if (q_mask is not None) and (k_mask is not None): if mask_cache is not None and q_mask.name in mask_cache and k_mask.name in mask_cache[ q_mask.name]: mask, another_mask = mask_cache[q_mask.name][k_mask.name] else: mask = fluid.layers.matmul(x=q_mask, y=k_mask, transpose_y=True) another_mask = fluid.layers.scale( mask, scale=float(2**32 - 1), bias=float(-1), bias_after_scale=False) if mask_cache is not None: if q_mask.name not in mask_cache: mask_cache[q_mask.name] = dict() mask_cache[q_mask.name][k_mask.name] = [mask, another_mask] logits = mask * logits + another_mask attention = fluid.layers.softmax(logits) if dropout_rate: attention = fluid.layers.dropout( input=attention, dropout_prob=dropout_rate, is_test=False, seed=2) atten_out = fluid.layers.matmul(x=attention, y=value) return atten_out def block(name, query, key, value, d_key, q_mask=None, k_mask=None, is_layer_norm=True, dropout_rate=None, mask_cache=None): """ """ att_out = dot_product_attention( query, key, value, d_key, q_mask, k_mask, dropout_rate, mask_cache=mask_cache) y = query + att_out if is_layer_norm: y = fluid.layers.layer_norm( input=y, begin_norm_axis=len(y.shape) - 1, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.), name=name + '_layer_norm.w_0'), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.), name=name + '_layer_norm.b_0')) z = ffn(y, d_key, d_key, name) w = y + z if is_layer_norm: w = fluid.layers.layer_norm( input=w, begin_norm_axis=len(w.shape) - 1, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.), name=name + '_layer_norm.w_1'), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.), name=name + '_layer_norm.b_1')) return w
[ 11748, 39517, 13, 35522, 312, 355, 11711, 628, 198, 4299, 2994, 7, 87, 11, 331, 11, 10651, 62, 8367, 28, 940, 13, 15, 2599, 198, 220, 220, 220, 37227, 9771, 3129, 378, 262, 264, 17225, 1868, 3272, 40709, 351, 2604, 896, 329, 5128, ...
1.801966
2,747
from salts_lib.pyjsparser.pyjsparserdata import * REGEXP_SPECIAL_SINGLE = {'\\', '^', '$', '*', '+', '?', '.'} NOT_PATTERN_CHARS = {'^', '$', '\\', '.', '*', '+', '?', '(', ')', '[', ']', '|'} # what about '{', '}', ??? CHAR_CLASS_ESCAPE = {'d', 'D', 's', 'S', 'w', 'W'} CONTROL_ESCAPE_CHARS = {'f', 'n', 'r', 't', 'v'} CONTROL_LETTERS = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'} a = JsRegExpParser('a(?=x)', '') print(a.parsePattern())
[ 6738, 37056, 62, 8019, 13, 9078, 73, 2777, 28198, 13, 9078, 73, 2777, 28198, 7890, 1330, 1635, 198, 198, 31553, 49864, 62, 48451, 12576, 62, 50, 2751, 2538, 796, 1391, 6, 6852, 3256, 705, 61, 3256, 705, 3, 3256, 705, 9, 3256, 705, ...
1.74505
404
import torch.nn as nn from .basic import *
[ 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 764, 35487, 1330, 1635, 198 ]
3.071429
14
# pylint: disable=W0201, E1101 """ handle request for markdown pages """ import logging import os import importlib from tornado.web import RequestHandler, HTTPError from tornado.escape import url_escape from ..utils.converter_mixin import ConverterMixin from .access_control import UserMixin from ..utils.nav import nav LOGGER = logging.getLogger(__name__) EMPTY_TOC = '<div class="toc">\n<ul></ul>\n</div>\n'
[ 2, 279, 2645, 600, 25, 15560, 28, 54, 15, 1264, 11, 412, 1157, 486, 198, 37811, 5412, 2581, 329, 1317, 2902, 5468, 37227, 198, 11748, 18931, 198, 11748, 28686, 198, 11748, 1330, 8019, 198, 6738, 33718, 13, 12384, 1330, 19390, 25060, 1...
3.128788
132
# @Title: (Binary Search Tree to Greater Sum Tree) # @Author: KivenC # @Date: 2019-05-15 19:52:08 # @Runtime: 48 ms # @Memory: 13 MB # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None
[ 198, 2, 2488, 19160, 25, 220, 357, 33, 3219, 11140, 12200, 284, 18169, 5060, 12200, 8, 198, 2, 2488, 13838, 25, 509, 1469, 34, 198, 2, 2488, 10430, 25, 13130, 12, 2713, 12, 1314, 678, 25, 4309, 25, 2919, 198, 2, 2488, 41006, 25, ...
2.292308
130
import os import option import utility import grapeMenu import grapeGit as git import grapeConfig
[ 11748, 28686, 198, 11748, 3038, 198, 11748, 10361, 198, 11748, 30777, 23381, 198, 11748, 30777, 38, 270, 355, 17606, 198, 11748, 30777, 16934, 628 ]
4.125
24
# -*- coding: utf-8 -*- # Copyright (c) 2016, German Neuroinformatics Node (G-Node) # Achilleas Koutsou <achilleas.k@gmail.com> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted under the terms of the BSD License. See # LICENSE file in the root of the Project. """ Tests for neo.io.nixio """ import os from datetime import datetime try: import unittest2 as unittest except ImportError: import unittest try: from unittest import mock except ImportError: import mock import string import itertools from six import string_types import numpy as np import quantities as pq from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, IrregularlySampledSignal, Unit, SpikeTrain, Event, Epoch) from neo.test.iotest.common_io_test import BaseTestIO try: import nixio HAVE_NIX = True except ImportError: HAVE_NIX = False from neo.io.nixio import NixIO from neo.io.nixio import nixtypes
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 15069, 357, 66, 8, 1584, 11, 2679, 13782, 259, 18982, 873, 19081, 357, 38, 12, 19667, 8, 198, 2, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220...
2.737533
381
# -*- coding: utf-8 -*- """ Created on Wed Mar 28 11:25:43 2019 @author: Taufik Sutanto taufik@tau-data.id https://tau-data.id ~~Perjanjian Penggunaan Materi & Codes (PPMC) - License:~~ * Modul Python dan gambar-gambar (images) yang digunakan adalah milik dari berbagai sumber sebagaimana yang telah dicantumkan dalam masing-masing license modul, caption atau watermark. * Materi & Codes diluar point (1) (i.e. code ini & semua slide ".ipynb)) yang digunakan di tau-data dapat digunakan untuk keperluan akademis dan kegiatan non-komersil lainnya. * Untuk keperluan diluar point (2), maka dibutuhkan izin tertulis dari Taufik Edy Sutanto (selanjutnya disebut sebagai pengarang). * Materi & Codes tidak boleh dipublikasikan tanpa izin dari pengarang. * Materi & codes diberikan "as-is", tanpa warranty. Pengarang tidak bertanggung jawab atas penggunaannya diluar kegiatan resmi yang dilaksanakan pengarang. * Dengan menggunakan materi dan codes ini berarti pengguna telah menyetujui PPMC ini. """ import re, numpy as np import itertools, nltk from collections import Counter from nltk.corpus import wordnet as wn from nltk.stem import PorterStemmer;ps = PorterStemmer() from itertools import chain import warnings; warnings.simplefilter('ignore') corpus = 'data/corpus_sederhana.txt' WORDS = Counter(words(open(corpus).read())) def P(word): "Probability of `word`." N=sum(WORDS.values()) return WORDS[word] / N def correction(word): "Most probable spelling correction for word." return max(candidates(word), key=P) def candidates(word): "Generate possible spelling corrections for word." return (known([word]) or known(edits1(word)) or known(edits2(word)) or [word]) def known(words): "The subset of `words` that appear in the dictionary of WORDS." return set(w for w in words if w in WORDS) def edits1(word): "All edits that are one edit away from `word`." letters = 'abcdefghijklmnopqrstuvwxyz' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] deletes = [L + R[1:] for L, R in splits if R] transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1] replaces = [L + c + R[1:] for L, R in splits if R for c in letters] inserts = [L + c + R for L, R in splits for c in letters] return set(deletes + transposes + replaces + inserts) def edits2(word): "All edits that are two edits away from `word`." return (e2 for e1 in edits1(word) for e2 in edits1(e1)) def lDistance(firstString, secondString): "Function to find the Levenshtein distance between two words/sentences - gotten from http://rosettacode.org/wiki/Levenshtein_distance#Python" if len(firstString) > len(secondString): firstString, secondString = secondString, firstString distances = range(len(firstString) + 1) for index2, char2 in enumerate(secondString): newDistances = [index2 + 1] for index1, char1 in enumerate(firstString): if char1 == char2: newDistances.append(distances[index1]) else: newDistances.append(1 + min((distances[index1], distances[index1+1], newDistances[-1]))) distances = newDistances return distances[-1]
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 201, 198, 37811, 201, 198, 41972, 319, 3300, 1526, 2579, 1367, 25, 1495, 25, 3559, 13130, 201, 198, 31, 9800, 25, 36849, 69, 1134, 45220, 14723, 201, 198, 83, 559, 69, 1134...
2.445341
1,363
from __future__ import annotations from copy import deepcopy from dataclasses import dataclass, field from typing import List, Iterator, TypeVar, Union, Any, Generic import pandas as pd from pandas.core.indexing import _LocIndexer from reamber.base.Map import Map from reamber.base.Property import stack_props NoteListT = TypeVar('NoteListT') HitListT = TypeVar('HitListT') HoldListT = TypeVar('HoldListT') BpmListT = TypeVar('BpmListT') MapT = TypeVar('MapT')
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4866, 1330, 2769, 30073, 198, 6738, 4818, 330, 28958, 1330, 4818, 330, 31172, 11, 2214, 198, 6738, 19720, 1330, 7343, 11, 40806, 1352, 11, 5994, 19852, 11, 4479, 11, 4377, 11, 42044,...
3.127517
149
""" ========= filtering.py ========= This module provides more granular filtering for captures. You can customize your own filters too. """ from __future__ import annotations import re from abc import ABC, ABCMeta, abstractmethod from dataclasses import dataclass from json import JSONEncoder from pathlib import PosixPath from typing import ( Any, Dict, Iterable, Mapping, NewType, Optional, Protocol, Type, TypedDict, Union, ) import h5py import numpy as np from h5py import File as Fast5File from ..hdf5 import ( HasFast5, HDF5_Group, HDF5_GroupSerialableDataclass, HDF5_GroupSerializable, HDF5_GroupSerializing, IsAttr, ) from ..logger import Logger, getLogger from ..signals import Capture from .core import NumpyArrayLike, PathLikeOrString, ReadId, stripped_by_keys from .plugin import Plugin CaptureOrTimeSeries = Union[Capture, NumpyArrayLike] # Unique identifier for a collection of filters (e.g. "ProfJeffsAwesomeFilters") FilterSetId = NewType("FilterSetId", str) # Unique identifier for an individual filter (e.g. "min_frac") FilterName = NewType("FilterName", str) __all__ = [ "does_pass_filters", "get_filters", "FilterName", "FilterSetId", "FilterConfig", "Filter", "Filters", "DEFAULT_FILTER_PLUGINS", "FilterSet", "FilterConfigs", "FilterPlugin", "PATH", ] # Mapping of a FilterName to filter configurations. FilterConfigs = NewType("FilterConfigs", Dict[FilterName, FilterConfig]) # TODO: Filter Plugin should check that name is unique. https://github.com/uwmisl/poretitioner/issues/91 RANGE_FILTER_DEFAULT_MINIMUM: float = -np.inf RANGE_FILTER_DEFAULT_MAXIMUM: float = np.inf """ How to Create Your Own Custom Filter: Need more advanced filtering than what we provide out of the box? No problem. Create your own custom filter by inheriting from the FilterPlugin class. For this example, let's do something complex. Say you only want to examine captures that have more than 5 samples with a hyperbolic tangent greater than some threshold. That means our custom filter's `apply` function should return True if and only if the signal has more than 5 samples greater than the threshold, after taking the hyperbolic tangent in `extract`. """ def apply_feature_filters(capture: CaptureOrTimeSeries, filters: List[FilterPlugin]) -> bool: """ Check whether an array of current values (i.e. a single nanopore capture) passes a set of filters. Filters can be based on summary statistics (e.g., mean) and/or a range of allowed values. Notes on filter behavior: If the filters list is empty, there are no filters and the capture passes. Parameters ---------- capture : CaptureOrTimeSeries | NumpyArrayLike Capture containing time series of nanopore current values for a single capture, or the signal itself. filters : List[FilterPlugin] List of FilterPlugin instances. Write your own filter by subclassing FilterPlugin. Returns ------- boolean True if capture passes all filters; False otherwise. """ if filters is None: filters = [] # TODO: Parallelize? https://github.com/uwmisl/poretitioner/issues/67 filtered = [filter_out(capture) for filter_out in filters] print(filtered) # Did this signal pass all filters? all_passed = all(filtered) return all_passed def check_capture_ejection_by_read(f5, read_id): """Checks whether the current capture was in the pore until the voltage was reversed. Parameters ---------- f5 : h5py.File object (open for reading or more) Capture fast5 file read_id : TODO Returns ------- boolean True if the end of the capture coincides with the end of a voltage window. """ try: ejected = f5.get(f"/read_{read_id}/Signal").attrs["ejected"] except AttributeError: raise ValueError(f"path /read_{read_id} does not exist in the fast5 file.") return ejected def check_capture_ejection(end_capture, voltage_ends, tol_obs=20): """Checks whether the current capture was in the pore until the voltage was reversed. Essentially checks whether a value (end_capture) is close enough (within a margin of tol_obs) to any value in voltage_ends. Parameters ---------- end_capture : numeric The end time of the capture. voltage_ends : list of numeric List of times when the standard voltage ends. tol_obs : int, optional Tolerance for defining when the end of the capture = voltage end, by default 20 Returns ------- boolean True if the end of the capture coincides with the end of a voltage window. """ for voltage_end in voltage_ends: if np.abs(end_capture - voltage_end) < tol_obs: return True return False __DEFAULT_FILTER_PLUGINS = [ MeanFilter, StandardDeviationFilter, MedianFilter, MinimumFilter, MaximumFilter, LengthFilter, ] DEFAULT_FILTER_PLUGINS = { filter_plugin_class.name(): filter_plugin_class for filter_plugin_class in __DEFAULT_FILTER_PLUGINS } import json # class Filters(HDF5_GroupSerialableDataclass): # filters: Filters = Dict[FilterName, Filter] def get_filters(filter_configs: Optional[FilterConfigs] = None) -> Filters: """Creates Filters from a list of filter configurations. Parameters ---------- filter_configs : Optional[FilterConfigs] A mapping of filter names to their configurations, None by default (i.e. no filtering). Returns ------- Filters A set of callable/applyable filters. """ filter_configs = filter_configs if filter_configs is not None else FilterConfigs({}) my_filters = { name: filter_from_config(name, filter_config) for name, filter_config in filter_configs.items() } return my_filters def does_pass_filters(capture: CaptureOrTimeSeries, filters: Iterable[Filter]) -> bool: """ Check whether an array of values (e.g. a single nanopore capture) passes a set of filters. Filters can be based on summary statistics (e.g., mean) and/or a range of allowed values. Parameters ---------- capture : CaptureOrTimeSeries | NumpyArrayLike Capture containing time series of nanopore current values for a single capture, or the signal itself. filters : Iterable[Filter] The set of filters to apply. Write your own filter by subclassing FilterPlugin. Returns ------- boolean True if capture passes all filters; False otherwise. """ all_passed = True for some_filter in filters: if not some_filter(capture): return False return all_passed class HDF5_FilterSet(FilterSet, HDF5_GroupSerialableDataclass): ############################ # # HDF5_GroupSerializable # ############################ # @classmethod # def from_group( # cls, group: HDF5_Group, log: Optional[Logger] = None # ) -> HDF5_GroupSerializable: # raise NotImplementedError( # f"from_group not implemented for {cls.__name__}. Make sure you write a method that returns a serialzied version of this object." # ) def filter_from_config(name: str, config: FilterConfig, log: Logger = getLogger()) -> Filter: """Creates a Filter from a config spefication. If no "filename" is present in the FilterConfig, it's assumed to be one of the default filtesr Parameters ---------- name : str The unique name of a filter. config : FilterConfig Filter configuration to build the plugin. log : Logger, optional Logger to use for information/warnings/debug, by default getLogger() Returns ------- Filter A filter that can be applied to some data. Raises ------ AttributeError A filter plugin could not be built from the configuration description. If this error is raised, be sure to check 1) A plugin class with the name in the configuration is defined at the filepath described in the configuration 2) The plugin class inherits from the `FilterPlugin` abstract base class. """ filepath = config.get("filepath", None) # TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91 plugin = None if name in DEFAULT_FILTER_PLUGINS: plugin = DEFAULT_FILTER_PLUGINS[name]() else: # TODO: For non-default FilterPlugins, load the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91 plugin = plugin_from_file(name, filepath) pass # Make sure any plugin attributes defined in the config are moved over to the plugin instance. try: # Here, we take care of setting whatever attributes the plugin config defines on the new plugin instance. for key, value in config.items(): object.__setattr__(plugin, key, value) except AttributeError as e: log.warning( """ Uh oh, couldn't find plugin '{name}'. Are you sure: 1) A plugin class with the name '{name}' is defined in the file {filepath}? 2) That plugin class inherits from `FilterPlugin`? """ ) raise e my_filter = Filter(config, plugin) return my_filter def plugin_from_file(name: str, filepath: PathLikeOrString): """[summary] Parameters ---------- name : str [description] filepath : PathLikeOrString [description] Returns ------- [type] [description] Raises ------ NotImplementedError [description] """ # TODO: For non-default FilterPlugins, load/unpickle the class from the filepath. https://github.com/uwmisl/poretitioner/issues/91 raise NotImplementedError( "Plugin from file has not been implemented! This method should take in a filepath and filter name, and return a runnable FilterPlugin!" )
[ 37811, 198, 2559, 28, 198, 10379, 20212, 13, 9078, 198, 2559, 28, 198, 198, 1212, 8265, 3769, 517, 19468, 934, 25431, 329, 23007, 13, 198, 1639, 460, 24184, 534, 898, 16628, 1165, 13, 198, 198, 37811, 198, 6738, 11593, 37443, 834, 133...
2.881549
3,512
# The show_mammal_info function accepts an object # as an argument, and calls its show_species # and make_sound methods. # Call the main function. main()
[ 198, 2, 383, 905, 62, 76, 6475, 282, 62, 10951, 2163, 18178, 281, 2134, 198, 2, 355, 281, 4578, 11, 290, 3848, 663, 905, 62, 35448, 198, 2, 290, 787, 62, 23661, 5050, 13, 198, 198, 2, 4889, 262, 1388, 2163, 13, 198, 12417, 3419,...
3.391304
46
""" 4 Um grande cliente seu sofreu um ataque hacker: o servidor foi sequestrado por um software malicioso, que criptografou todos os discos e pede a digitao de uma senha para a liberao da mquina. E claro que os criminosos exigem um pagamento para informar a senha. Ao analisar o cdigo do programa deles, porm, voc descobre que a senha composta da palavra LIBERDADE seguida do fatorial dos minutos que a mquina estiver marcando no momento da digitao da senha (se a mquina estiver marcando 5 minutos, a senha ser LIBERDADE120). Crie um programa que receba do usurio os minutos atuais e exiba na tela a senha necessria para desbloqueio. ATENO: seu programa no pode utilizar funes prontas para o clculo do fatorial. Ele deve obrigatoriamente utilizar loop. """ print('\nPrograma para gerar de desbloqueio do servidor do ataque Hacker!!!\n') print('Descobrimos que a senha a palavra LIBERDADE + o calculo de fatorial dos minutos no seu computador.\n') minuto = input('Digite os minutos que aparecem neste computador: ') minuto = int(minuto) fatorial = 1 for i in range (minuto, 0, -1): fatorial *= i print(f'\nA senha que voc precisa digitar LIBERDADE{fatorial} para desbloquear o servidor.\nAteno!!!: voc tem 60 segundos validos at que a senha mude novamente!!!\n')
[ 37811, 198, 19, 220, 21039, 4490, 68, 5456, 68, 384, 84, 523, 19503, 84, 23781, 379, 18251, 23385, 25, 267, 1113, 312, 273, 11511, 72, 46314, 81, 4533, 16964, 23781, 3788, 6428, 291, 4267, 78, 11, 8358, 269, 1968, 519, 32188, 280, 2...
2.75162
463
# -*- coding: iso-8859-1 -*- # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2012-2014 Bastian Kleineidam from .util import fetchUrl, getPageContent, getQueryParams def queryNamer(paramName, usePageUrl=False): """Get name from URL query part.""" return _namer def regexNamer(regex, usePageUrl=False): """Get name from regular expression.""" return _namer def bounceStarter(url, nextSearch): """Get start URL by "bouncing" back and forth one time.""" return _starter def indirectStarter(url, latestSearch): """Get start URL by indirection.""" return _starter
[ 2, 532, 9, 12, 19617, 25, 47279, 12, 3459, 3270, 12, 16, 532, 9, 12, 198, 2, 15069, 357, 34, 8, 5472, 12, 14315, 833, 4103, 15300, 328, 9038, 290, 11232, 27593, 198, 2, 15069, 357, 34, 8, 2321, 12, 4967, 17520, 666, 15983, 500, ...
3.024038
208
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.data_decoders.tf_example_decoder.""" import os import numpy as np import six import tensorflow.compat.v1 as tf from object_detection.core import standard_fields as fields from object_detection.data_decoders import tf_example_decoder from object_detection.protos import input_reader_pb2 from object_detection.utils import dataset_util from object_detection.utils import test_case if __name__ == '__main__': tf.test.main()
[ 2, 15069, 2177, 383, 309, 22854, 37535, 46665, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, ...
3.748366
306
# -*- coding: utf-8 -*- """ Created on Tue Apr 21 08:09:31 2020 @author: Shivadhar SIngh """ def remove_substring_everywhere(string, substring): ''' Remove all occurrences of substring from string, and return the resulting string. Both arguments must be strings. ''' p = string.find(substring) if p == -1: return string i = p newstr = string[0:i] lsub = len(substring) # length of the substring while p < len(string) and string.find(substring) != -1: p = string.find(substring) if p==-1: return newstr+string[i+lsub:] newstr += string[p + lsub : p] return newstr
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 41972, 319, 30030, 2758, 2310, 8487, 25, 2931, 25, 3132, 12131, 198, 198, 31, 9800, 25, 43305, 324, 9869, 311, 818, 456, 198, 37811, 198, 198, 4299, 4781, ...
2.366906
278
""" .. _GenerateSpectrum-at-api: **GenerateSpectrum_AT** --- Generates synthetic test spectra. ------------------------------------------------------------- This module defines the GenerateSpectrum_AT class. """ from admit.AT import AT import admit.util.bdp_types as bt from admit.bdp.CubeSpectrum_BDP import CubeSpectrum_BDP import admit.util.filter.Filter1D as Filter1D import admit.util.Table as Table import admit.util.utils as utils from admit.util import APlot import admit.util.Image as Image from admit.util import SpectralLineSearch from admit.Summary import SummaryEntry import os import numpy as np from copy import deepcopy # @todo this could go as a very generic routine in utils # def getspec(file, xcol=0, ycol=1): """ read a spectrum/table from column 1,2 returns: (freq,spec) """ lines = open(file).readlines() x = [] y = [] mincol = max(xcol,ycol) + 1 for line in lines: if line[0] == '#': continue w = line.split() if len(w) < mincol: continue x.append(float(w[xcol])) y.append(float(w[ycol])) return (np.array(x),np.array(y))
[ 37811, 11485, 4808, 8645, 378, 49738, 6582, 12, 265, 12, 15042, 25, 628, 220, 220, 12429, 8645, 378, 49738, 6582, 62, 1404, 1174, 11420, 2980, 689, 18512, 1332, 5444, 430, 13, 198, 220, 220, 20368, 1783, 32501, 628, 220, 220, 770, 826...
2.417154
513
#!/usr/bin/env python # -*- mode: python; encoding: utf-8 -*- """Tests for Interrogate.""" import socket from grr.client import vfs from grr.lib import action_mocks from grr.lib import aff4 from grr.lib import artifact_test from grr.lib import client_index from grr.lib import config_lib from grr.lib import flags from grr.lib import flow from grr.lib import rdfvalue from grr.lib import search from grr.lib import test_lib def main(argv): # Run the full test suite test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 4235, 25, 21015, 26, 21004, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 37811, 51, 3558, 329, 4225, 3828, 378, 526, 15931, 198, 198, 11748, 17802, 198, 198, 6738, ...
2.80597
201
import sys from PySide2.QtWidgets import QGraphicsView, QGraphicsScene, QApplication from PySide2.QtCore import * from PySide2.QtGui import * if __name__ == "__main__": app = QApplication(sys.argv) appView = GraphicsView() appView.scene().addSimpleText('liff.engineer@gmail.com') appView.scene().addRect(-200, -150, 400, 300) appView.show() sys.exit(app.exec_())
[ 11748, 25064, 198, 6738, 9485, 24819, 17, 13, 48, 83, 54, 312, 11407, 1330, 1195, 18172, 7680, 11, 1195, 18172, 36542, 11, 1195, 23416, 198, 6738, 9485, 24819, 17, 13, 48, 83, 14055, 1330, 1635, 198, 6738, 9485, 24819, 17, 13, 48, 8...
2.582781
151
from armstrong.dev.tests.utils import ArmstrongTestCase import random
[ 6738, 3211, 11576, 13, 7959, 13, 41989, 13, 26791, 1330, 21166, 14402, 20448, 198, 11748, 4738, 628, 628 ]
4.055556
18
import unittest from logics.classes.propositional import Inference, Formula from logics.classes.propositional.proof_theories import NaturalDeductionStep, NaturalDeductionRule from logics.utils.parsers import classical_parser from logics.instances.propositional.natural_deduction import classical_natural_deduction_system as nd_system if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 198, 6738, 2604, 873, 13, 37724, 13, 1676, 1930, 1859, 1330, 554, 4288, 11, 19639, 198, 6738, 2604, 873, 13, 37724, 13, 1676, 1930, 1859, 13, 13288, 62, 1169, 1749, 1330, 12068, 35, 276, 8110, 8600, 11, 12...
3.347826
115
#!/usr/bin/python # # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # """ Responsible for generating the decoder based on parsed table representations. """ import dgen_opt import dgen_output import dgen_actuals # This file generates the class decoder Decoder as defined by the # decoder tables. The code is specifically written to minimize the # number of decoder classes needed to parse valid ARM # instructions. Many rows in the table use the same decoder class. In # addition, we optimize tables by merging, so long as the same decoder # class is built. # # The following files are generated: # # decoder.h # decoder.cc # # decoder.h declares the generated decoder parser class while # decoder.cc contains the implementation of that decoder class. # # For testing purposes (see dgen_test_output.py) different rules are # applied. Note: It may be worth reading dgen_test_output.py preamble # to get a better understanding of decoder actions, and why we need # the "action_filter" methods. """The current command line arguments to use""" _cl_args = {} NEWLINE_STR=""" """ COMMENTED_NEWLINE_STR=""" //""" # Defines the header for decoder.h H_HEADER="""%(FILE_HEADER)s #ifndef %(IFDEF_NAME)s #define %(IFDEF_NAME)s #include "native_client/src/trusted/validator_arm/decode.h" #include "%(FILENAME_BASE)s_actuals.h" namespace nacl_arm_dec { """ DECODER_DECLARE_HEADER=""" // Defines a decoder class selector for instructions. class %(decoder_name)s : DecoderState { public: explicit %(decoder_name)s(); // Parses the given instruction, returning the decoder to use. virtual const ClassDecoder& decode(const Instruction) const; // Returns the class decoder to use to process the fictitious instruction // that is inserted before the first instruction in the code block by // the validator. const ClassDecoder &fictitious_decoder() const { return %(fictitious_decoder)s_instance_; } private: """ DECODER_DECLARE_METHOD_COMMENTS=""" // The following list of methods correspond to each decoder table, // and implements the pattern matching of the corresponding bit // patterns. After matching the corresponding bit patterns, they // either call other methods in this list (corresponding to another // decoder table), or they return the instance field that implements // the class decoder that should be used to decode the particular // instruction. """ DECODER_DECLARE_METHOD=""" inline const ClassDecoder& decode_%(table_name)s( const Instruction inst) const; """ DECODER_DECLARE_FIELD_COMMENTS=""" // The following fields define the set of class decoders // that can be returned by the API function "decode". They // are created once as instance fields, and then returned // by the table methods above. This speeds up the code since // the class decoders need to only be built once (and reused // for each call to "decode").""" DECODER_DECLARE_FIELD=""" const %(decoder)s %(decoder)s_instance_;""" DECODER_DECLARE_FOOTER=""" }; """ H_FOOTER=""" } // namespace nacl_arm_dec #endif // %(IFDEF_NAME)s """ def generate_h(decoder, decoder_name, filename, out, cl_args): """Entry point to the decoder for .h file. Args: decoder: The decoder defined by the list of Table objects to process. decoder_name: The name of the decoder state to build. filename: The (localized) name for the .h file. named_decoders: If true, generate a decoder state with named instances. out: a COutput object to write to. cl_args: A dictionary of additional command line arguments. """ global _cl_args assert filename.endswith('.h') _cl_args = cl_args # Before starting, remove all testing information from the parsed tables. decoder = decoder.action_filter(['actual']) values = { 'FILE_HEADER': dgen_output.HEADER_BOILERPLATE, 'IFDEF_NAME': dgen_output.ifdef_name(filename), 'FILENAME_BASE': filename[:-len('.h')], 'decoder_name': decoder_name, } out.write(H_HEADER % values) values['fictitious_decoder'] = ( decoder.get_value('FictitiousFirst').actual()) out.write(DECODER_DECLARE_HEADER % values) out.write(DECODER_DECLARE_METHOD_COMMENTS) for table in decoder.tables(): values['table_name'] = table.name out.write(DECODER_DECLARE_METHOD % values) out.write(DECODER_DECLARE_FIELD_COMMENTS) for action in decoder.action_filter(['actual']).decoders(): values['decoder'] = action.actual() out.write(DECODER_DECLARE_FIELD % values) out.write(DECODER_DECLARE_FOOTER % values) out.write(H_FOOTER % values) # Defines the header for DECODER.h CC_HEADER="""%(FILE_HEADER)s #include "%(header_filename)s" namespace nacl_arm_dec { """ CONSTRUCTOR_HEADER=""" %(decoder_name)s::%(decoder_name)s() : DecoderState()""" CONSTRUCTOR_FIELD_INIT=""" , %(decoder)s_instance_()""" CONSTRUCTOR_FOOTER=""" {} """ METHOD_HEADER=""" // Implementation of table: %(table_name)s. // Specified by: %(citation)s const ClassDecoder& %(decoder_name)s::decode_%(table_name)s( const Instruction inst) const {""" METHOD_HEADER_TRACE=""" fprintf(stderr, "decode %(table_name)s\\n"); """ METHOD_DISPATCH_BEGIN=""" if (%s""" METHOD_DISPATCH_CONTINUE=""" && %s""" METHOD_DISPATCH_END=") {""" METHOD_DISPATCH_TRACE=""" fprintf(stderr, "count = %s\\n");""" METHOD_DISPATCH_CLASS_DECODER=""" return %(decoder)s_instance_;""" METHOD_DISPATCH_SUBMETHOD=""" return decode_%(subtable_name)s(inst);""" METHOD_DISPATCH_CLOSE=""" } """ METHOD_FOOTER=""" // Catch any attempt to fall though ... return %(not_implemented)s_instance_; } """ DECODER_METHOD_HEADER=""" const ClassDecoder& %(decoder_name)s::decode(const Instruction inst) const {""" DECODER_METHOD_TRACE=""" fprintf(stderr, "Parsing %%08x\\n", inst.Bits());""" DECODER_METHOD_FOOTER=""" return decode_%(entry_table_name)s(inst); } """ CC_FOOTER=""" } // namespace nacl_arm_dec """ def generate_cc(decoder, decoder_name, filename, out, cl_args): """Implementation of the decoder in .cc file Args: decoder: The decoder defined by the list of Table objects to process. decoder_name: The name of the decoder state to build. filename: The (localized) name for the .h file. named_decoders: If true, generate a decoder state with named instances. out: a COutput object to write to. cl_args: A dictionary of additional command line arguments. """ global _cl_args assert filename.endswith('.cc') _cl_args = cl_args # Before starting, remove all testing information from the parsed # tables. decoder = decoder.action_filter(['actual']) values = { 'FILE_HEADER': dgen_output.HEADER_BOILERPLATE, 'header_filename': filename[:-2] + 'h', 'decoder_name': decoder_name, 'entry_table_name': decoder.primary.name, } out.write(CC_HEADER % values) _generate_constructors(decoder, values, out) _generate_methods(decoder, values, out) out.write(DECODER_METHOD_HEADER % values) if _cl_args.get('trace') == 'True': out.write(DECODER_METHOD_TRACE % values) out.write(DECODER_METHOD_FOOTER % values) out.write(CC_FOOTER % values)
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 198, 2, 15069, 357, 66, 8, 2321, 383, 12547, 20985, 46665, 13, 1439, 2489, 10395, 13, 198, 2, 5765, 286, 428, 2723, 2438, 318, 21825, 416, 257, 347, 10305, 12, 7635, 5964, 326, 460, 30...
2.715996
2,757
from __future__ import absolute_import from __future__ import unicode_literals from compose import utils
[ 6738, 11593, 37443, 834, 1330, 4112, 62, 11748, 198, 6738, 11593, 37443, 834, 1330, 28000, 1098, 62, 17201, 874, 198, 198, 6738, 36664, 1330, 3384, 4487, 628, 628, 628 ]
3.827586
29
# Copyright 2012 Anton Beloglazov # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mocktest import * from pyqcy import * import datetime import neat.db_utils as db_utils import logging logging.disable(logging.CRITICAL) def test_insert_host_overload(self): db = db_utils.init_db('sqlite:///:memory:') hosts = {} hosts['host1'] = db.update_host('host1', 1, 1, 1) hosts['host2'] = db.update_host('host2', 1, 1, 1) db.insert_host_overload('host2', False) db.insert_host_overload('host1', True) db.insert_host_overload('host1', False) db.insert_host_overload('host2', True) result = db.host_overload.select().execute().fetchall() host1 = [x[3] for x in sorted(filter( lambda x: x[1] == hosts['host1'], result), key=lambda x: x[0])] self.assertEqual(host1, [1, 0]) host2 = [x[3] for x in sorted(filter( lambda x: x[1] == hosts['host2'], result), key=lambda x: x[0])] self.assertEqual(host2, [0, 1])
[ 2, 15069, 2321, 9261, 3944, 28678, 1031, 709, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, ...
2.459752
646
import os from bids_validator import BIDSValidator
[ 11748, 28686, 198, 6738, 27837, 62, 12102, 1352, 1330, 347, 14255, 47139, 1352, 628 ]
3.714286
14
from math import cos, sin, degrees, radians, pi from time import time from euclid import Vector2, Point2 from numpy import array as np_array from numpy.linalg import solve as np_solve __author__ = 'tom' def rotate_point(point, angle, origin=None): """ Rotate a Point2 around another Point2 :param euclid.Point2 point: The point to rotate :param float angle: Angle in radians, clockwise rotation :param euclid.Point2 origin: Origin of the rotation, defaults to (0,0) if not specified :return: A new :class:`euclid.Point2` containing the rotated input point """ if origin is None: origin = Point2(0, 0) s = sin(-angle) c = cos(-angle) return Point2(c * (point.x - origin.x) - s * (point.y - origin.y) + origin.x, s * (point.x - origin.x) + c * (point.y - origin.y) + origin.y) def rotate_vector(vector, angle, origin=None): """ Rotate a :class:`euclid.Vector2` around a :class:`euclid.Point2` :param euclid.Vector2 vector: The vector to rotate :param float angle: Angle in radians, clockwise rotation :param euclid.Point2 origin: Origin of the rotation, defaults to (0,0) if not specified :return: A new :class:`euclid.Point2` containing the rotated input point """ if origin is None: origin = Point2(0, 0) s = sin(-angle) c = cos(-angle) return Vector2(c * (vector.x - origin.x) - s * (vector.y - origin.y) + origin.x, s * (vector.x - origin.x) + c * (vector.y - origin.y) + origin.y) def smallest_difference(a, b, max_value=2 * pi): """ Given two floats, a and b, and a maximum possible value for both a and b, calculate the smallest delta from a to b. For example, if a=1.0, b=2.5 and max_value=2.6, this should return -1.1, as subtracting 1.1 from a would result in -0.1, which will then be transformed to 2.5 after taking its modulus with 2.6. If max_value was 10, it would return +1.5, as this is the lower magnitude delta needed to go from 1.0 to 2.5. This function is used when calculating the shortest delta between two pose orientations, for this reason the max_value defaults to 2*pi for use when working in radians. If either a or b are less than zero or greater than the maximum value they will be treated as a % max_value or b % max_value respectively for the purposes of this calculation. :param float a: First value (see above) :param b: Second value (see above) :param max_value: Modulus, defaults to 2*pi if not specified :return: A value d such that (a + d) % max_value == b, and abs(d) is minimal (as there would be an infinite number of possible d that satisfy this relationship). """ mod_a = a % max_value mod_b = b % max_value if abs(mod_a - mod_b) <= max_value / 2: return mod_b - mod_a elif mod_a >= mod_b: return mod_b + (max_value - mod_a) else: return -(mod_a + (max_value - mod_b)) def get_regular_triangular_chassis(wheel_distance, wheel_radius, max_rotations_per_second): """ Build a HoloChassis object with three wheels, each identical in size and maximum speed. Each wheel is positioned at the corner of a regular triangle, and with direction perpendicular to the normal vector at that corner. :param wheel_distance: Distance in millimetres between the contact points of each pair of wheels (i.e. the length of each edge of the regular triangle) :param wheel_radius: Wheel radius in millimetres :param max_rotations_per_second: Maximum wheel speed in revolutions per second :return: An appropriately configured HoloChassis """ point = Point2(0, cos(radians(30)) * wheel_distance / 2.0) vector = Vector2(-2 * pi * wheel_radius, 0) # Pink wheel_a = HoloChassis.OmniWheel( position=point, vector=vector, max_speed=max_rotations_per_second) # Yellow wheel_b = HoloChassis.OmniWheel( position=rotate_point(point, pi * 2 / 3), vector=rotate_vector(vector, pi * 2 / 3), max_speed=max_rotations_per_second) # Green wheel_c = HoloChassis.OmniWheel( position=rotate_point(point, pi * 4 / 3), vector=rotate_vector(vector, pi * 4 / 3), max_speed=max_rotations_per_second) return HoloChassis(wheels=[wheel_a, wheel_b, wheel_c])
[ 6738, 10688, 1330, 8615, 11, 7813, 11, 7370, 11, 2511, 1547, 11, 31028, 198, 6738, 640, 1330, 640, 198, 198, 6738, 304, 36616, 312, 1330, 20650, 17, 11, 6252, 17, 198, 6738, 299, 32152, 1330, 7177, 355, 45941, 62, 18747, 198, 6738, ...
2.596187
1,731
# coding: utf-8 """ Copyright 2016 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ref: https://github.com/swagger-api/swagger-codegen """ from pprint import pformat from six import iteritems import re import json from ..utils import sanitize_for_serialization def to_json(self): """ Returns the model as raw JSON """ return json.dumps(sanitize_for_serialization(self.to_dict())) def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
[ 2, 19617, 25, 3384, 69, 12, 23, 198, 198, 37811, 198, 15269, 1584, 10880, 36352, 10442, 628, 220, 220, 220, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 220, 220, 220, 345, 743, 407, 779,...
2.710145
552
COLOR_BLUE = '\033[0;34m' COLOR_GREEN = '\033[0;32m' COLOR_CYAN = '\033[0;36m' COLOR_RED = '\033[0;31m' COLOR_PURPLE = '\033[0;35m' COLOR_BROWN = '\033[0;33m' COLOR_YELLOW = '\033[1;33m' COLOR_GRAY = '\033[1;30m' COLOR_RESET = '\033[0m' FG_COLORS = [ # COLOR_BLUE, COLOR_GREEN, # COLOR_CYAN, # COLOR_RED, # COLOR_PURPLE, # COLOR_BROWN, # COLOR_YELLOW, ]
[ 46786, 62, 9148, 8924, 796, 705, 59, 44427, 58, 15, 26, 2682, 76, 6, 198, 46786, 62, 43016, 796, 705, 59, 44427, 58, 15, 26, 2624, 76, 6, 198, 46786, 62, 34, 56, 1565, 796, 705, 59, 44427, 58, 15, 26, 2623, 76, 6, 198, 46786, ...
1.735426
223
''' 8-5. Cities: Write a function called describe_city() that accepts the name of a city and its country. The function should print a simple sentence, such as Reykjavik is in Iceland. Give the parameter for the country a default value. Call your function for three different cities, at least one of which is not in the default country. ''' describe_city('Garabr','country') ''' rborg Akureyri '''
[ 7061, 6, 198, 23, 12, 20, 13, 20830, 25, 19430, 257, 2163, 1444, 6901, 62, 19205, 3419, 326, 18178, 262, 1438, 286, 257, 1748, 290, 663, 1499, 13, 220, 198, 464, 2163, 815, 3601, 257, 2829, 6827, 11, 884, 355, 24448, 42421, 615, 1...
3.575221
113
from nexus_constructor.geometry import OFFGeometryNoNexus from nexus_constructor.geometry.geometry_loader import load_geometry_from_file_object from nexus_constructor.off_renderer import repeat_shape_over_positions from PySide2.QtGui import QVector3D from io import StringIO
[ 6738, 45770, 62, 41571, 273, 13, 469, 15748, 1330, 18562, 10082, 15748, 2949, 45, 1069, 385, 198, 6738, 45770, 62, 41571, 273, 13, 469, 15748, 13, 469, 15748, 62, 29356, 1330, 3440, 62, 469, 15748, 62, 6738, 62, 7753, 62, 15252, 198, ...
3.25
88
# -*- coding: utf-8 -*- ################################################################################ ## Form generated from reading UI file 'aboutdialog.ui' ## ## Created by: Qt User Interface Compiler version 6.1.1 ## ## WARNING! All changes made in this file will be lost when recompiling UI file! ################################################################################ from PySide6.QtCore import * # type: ignore from PySide6.QtGui import * # type: ignore from PySide6.QtWidgets import * # type: ignore
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 29113, 29113, 14468, 198, 2235, 5178, 7560, 422, 3555, 12454, 2393, 705, 10755, 38969, 519, 13, 9019, 6, 198, 2235, 198, 2235, 15622, 416, 25, 33734, 11787, 26491, ...
4.046154
130
import asyncio from contextlib import suppress from unittest import mock import pytest from aiohttp.base_protocol import BaseProtocol
[ 11748, 30351, 952, 198, 6738, 4732, 8019, 1330, 18175, 198, 6738, 555, 715, 395, 1330, 15290, 198, 198, 11748, 12972, 9288, 198, 198, 6738, 257, 952, 4023, 13, 8692, 62, 11235, 4668, 1330, 7308, 19703, 4668, 628, 628, 628, 628, 628, 6...
3.465116
43
import PySimpleGUI as sg import os import time import pyautogui tela = TelaPython() tela.Iniciar()
[ 11748, 9485, 26437, 40156, 355, 264, 70, 198, 11748, 28686, 198, 11748, 640, 198, 11748, 12972, 2306, 519, 9019, 628, 198, 198, 83, 10304, 796, 309, 10304, 37906, 3419, 198, 83, 10304, 13, 818, 291, 12571, 3419 ]
2.72973
37
# Helper code to plot binary losses. # # Eli Bendersky (http://eli.thegreenplace.net) # This code is in the public domain from __future__ import print_function import matplotlib.pyplot as plt import numpy as np if __name__ == '__main__': fig, ax = plt.subplots() fig.set_tight_layout(True) xs = np.linspace(-2, 2, 500) # plot L0/1 loss ax.plot(xs, np.where(xs < 0, np.ones_like(xs), np.zeros_like(xs)), color='r', linewidth=2.0, label='$L_{01}$') # plot square loss ax.plot(xs, (xs - 1) ** 2, linestyle='-.', label='$L_2$') # plot hinge loss ax.plot(xs, np.maximum(np.zeros_like(xs), 1 - xs), color='g', linewidth=2.0, label='$L_h$') ax.grid(True) plt.ylim((-1, 4)) ax.legend() fig.savefig('loss.png', dpi=80) plt.show()
[ 2, 5053, 525, 2438, 284, 7110, 13934, 9089, 13, 198, 2, 198, 2, 25204, 347, 7338, 2584, 357, 4023, 1378, 43733, 13, 1169, 14809, 5372, 13, 3262, 8, 198, 2, 770, 2438, 318, 287, 262, 1171, 7386, 198, 6738, 11593, 37443, 834, 1330, ...
2.183288
371
#!/usr/bin/env python3 ################################ # Development tool # Auto-compiles style.less to style.css # # Requires lessc and less clean css to be installed: # npm install -g less # npm install -g less-plugin-clean-css ################################ import os, time from os import path from math import floor from _helper import * # Main application # Run application if __name__ == "__main__": try: app = Main() except KeyboardInterrupt: print("Exiting")
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 29113, 198, 2, 7712, 2891, 198, 2, 11160, 12, 5589, 2915, 3918, 13, 1203, 284, 3918, 13, 25471, 198, 2, 198, 2, 26848, 1342, 66, 290, 1342, 3424, 269, 824, 284, 307, 6589, 25, ...
3.370629
143
# -- encoding: UTF-8 -- import json import uuid from admin_export_action import report from admin_export_action.admin import export_selected_objects from admin_export_action.config import default_config, get_config from django.contrib.admin.sites import AdminSite from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from django.test import TestCase, RequestFactory from django.urls import reverse from django.utils.http import urlencode from news.models import Attachment, Category, News, NewsTag, Video from news.admin import NewsAdmin
[ 2, 1377, 21004, 25, 41002, 12, 23, 1377, 198, 11748, 33918, 198, 11748, 334, 27112, 198, 198, 6738, 13169, 62, 39344, 62, 2673, 1330, 989, 198, 6738, 13169, 62, 39344, 62, 2673, 13, 28482, 1330, 10784, 62, 34213, 62, 48205, 198, 6738,...
3.689441
161
from pytest_djangoapp import configure_djangoapp_plugin pytest_plugins = configure_djangoapp_plugin( extend_INSTALLED_APPS=[ 'django.contrib.sessions', 'django.contrib.messages', ], extend_MIDDLEWARE=[ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] )
[ 6738, 12972, 9288, 62, 28241, 14208, 1324, 1330, 17425, 62, 28241, 14208, 1324, 62, 33803, 198, 198, 9078, 9288, 62, 37390, 796, 17425, 62, 28241, 14208, 1324, 62, 33803, 7, 198, 220, 220, 220, 9117, 62, 38604, 7036, 1961, 62, 2969, 3...
2.383117
154
import argparse import numpy as np from sklearn.model_selection import StratifiedKFold import sklearn import cv2 import datetime import mxnet as mx from mxnet import ndarray as nd import pandas as pd from numpy import linalg as line import logging logging.basicConfig( format="%(asctime)s %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p", level=logging.INFO ) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Face Verification for RFW") parser.add_argument( "--data_dir", type=str, default="RFW/test/aligned_data", help="dataset root" ) parser.add_argument( "--pair_file", type=str, default="./AttributePairs/eye_narrow_pairs_6000_selected.csv", help="pair file to test", ) parser.add_argument( "--model_dir", type=str, default="/model/", help="pre-trained model directory" ) parser.add_argument("--batch_size", type=int, default="32", help="batch_size") args = parser.parse_args() validation = FaceVerification( batch_size=args.batch_size, model=None, data_dir=args.data_dir ) validation.load_model(model_dir=args.model_dir) _, _, _shape = validation.load_images(args.pair_file) tpr, fpr, acc, std = validation.verify() logging.info( "Testing Accuracy {} for {} in shape {}".format(acc, args.pair_file, _shape[0]) )
[ 11748, 1822, 29572, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 1341, 35720, 13, 19849, 62, 49283, 1330, 29186, 1431, 42, 37, 727, 198, 11748, 1341, 35720, 198, 11748, 269, 85, 17, 198, 11748, 4818, 8079, 198, 11748, 285, 87, 3262, ...
2.510949
548
#!/usr/bin/python -t # this script was written to use /etc/nixos/nixpkgs/pkgs/development/python-modules/generic/wrap.sh # which already automates python executable wrapping by extending the PATH/pythonPath # from http://docs.python.org/library/subprocess.html # Warning Invoking the system shell with shell=True can be a security hazard if combined with untrusted input. See the warning under Frequently Used Arguments for details. from subprocess import Popen, PIPE, STDOUT cmd = 'PYTHON_EXECUTABLE_PATH -t THE_CUSTOM_PATH/share/virt-manager/THE_CUSTOM_PROGRAM.py' p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True) output = p.stdout.read() print output
[ 2, 48443, 14629, 14, 8800, 14, 29412, 532, 83, 198, 2, 428, 4226, 373, 3194, 284, 779, 1220, 14784, 14, 77, 844, 418, 14, 77, 844, 35339, 82, 14, 35339, 82, 14, 31267, 14, 29412, 12, 18170, 14, 41357, 14, 37150, 13, 1477, 198, 2...
3.057522
226
import base64 import datetime from abc import ABC, abstractmethod from .conditions import AnyValue from .errors import FieldError, FormError __all__ = [ 'Field', 'StringField', 'IntegerField', 'FloatField', 'BooleanField', 'DateTimeField', 'DateField', 'TimeField', 'ListField','SetField', 'EnumField', 'BytesField' ]
[ 11748, 2779, 2414, 198, 11748, 4818, 8079, 198, 6738, 450, 66, 1330, 9738, 11, 12531, 24396, 198, 198, 6738, 764, 17561, 1756, 1330, 4377, 11395, 198, 6738, 764, 48277, 1330, 7663, 12331, 11, 5178, 12331, 198, 198, 834, 439, 834, 796, ...
3.157407
108
from __future__ import annotations from contextlib import contextmanager from contextvars import ContextVar from typing import Optional, Tuple from magicgui.widgets import FunctionGui from pydantic import BaseModel # layer source context management _LAYER_SOURCE: ContextVar[dict] = ContextVar('_LAYER_SOURCE', default={}) def current_source(): """Get the current layer :class:`Source` (inferred from context). The main place this function is used is in :meth:`Layer.__init__`. """ return Source(**_LAYER_SOURCE.get())
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 4732, 8019, 1330, 4732, 37153, 198, 6738, 4732, 85, 945, 1330, 30532, 19852, 198, 6738, 19720, 1330, 32233, 11, 309, 29291, 198, 198, 6738, 5536, 48317, 13, 28029, 11407, 1330, 15553, ...
3.364198
162
import pytest import src.constants as cnst from src.directions import BaseDirection
[ 11748, 12972, 9288, 198, 198, 11748, 12351, 13, 9979, 1187, 355, 269, 77, 301, 198, 6738, 12351, 13, 12942, 507, 1330, 7308, 35, 4154, 628, 628, 198 ]
3.296296
27
import numpy as np import cv2 #define a canvas of size 300x300 px, with 3 channels (R,G,B) and data type as 8 bit unsigned integer canvas = np.zeros((300,300,3), dtype ="uint8") #define color #draw a circle #arguments are canvas/image, midpoint, radius, color, thickness(optional) #display in cv2 window green = (0,255,0) cv2.circle(canvas,(100,100), 10, green) cv2.imshow("Single circle", canvas) cv2.waitKey(0) # draw concentric white circles # calculate the center point of canvas # generate circles using for loop # clearning the canvas canvas = np.zeros((300,300,3), dtype ="uint8") white = (255,255,255) (centerX, centerY) = (canvas.shape[1]//2, canvas.shape[0]//2) for r in range(0,175,25): cv2.circle(canvas, (centerX,centerY), r, white) cv2.imshow("concentric circles", canvas) cv2.waitKey(0) # generate random radius, center point, color # draw circles in for loop canvas = np.zeros((300,300,3), dtype ="uint8") for i in range(0, 25): radius = np.random.randint(5, high = 200) color = np.random.randint(0, high = 256, size = (3,)).tolist() pt = np.random.randint(0, high = 300, size = (2,)) cv2.circle(canvas, tuple(pt), radius, color, -1) cv2.imshow("Canvas", canvas) cv2.waitKey(0)
[ 11748, 299, 32152, 355, 45941, 198, 11748, 269, 85, 17, 198, 198, 2, 13086, 257, 21978, 286, 2546, 5867, 87, 6200, 279, 87, 11, 351, 513, 9619, 357, 49, 11, 38, 11, 33, 8, 290, 1366, 2099, 355, 807, 1643, 22165, 18253, 198, 5171, ...
2.601279
469
# -*- coding: utf-8 -*- """ Main Script """ import logging import argh import sarge import tmuxp DEV_LOGGER = logging.getLogger(__name__) def get_current_session(server=None): ''' Seems to be no easy way to grab current attached session in tmuxp so this provides a simple alternative. ''' server = tmuxp.Server() if server is None else server session_name = sarge.get_stdout('tmux display-message -p "#S"').strip() session = server.findWhere({"session_name": session_name}) return session
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 37811, 198, 13383, 12327, 198, 37811, 198, 11748, 18931, 198, 198, 11748, 610, 456, 198, 11748, 264, 1376, 198, 11748, 256, 76, 2821, 79, 198, 198, 39345, 62, 25294, 303...
2.818182
187
"""Additional Django management commands added by nautobot_capacity_metrics plugin."""
[ 37811, 17699, 37770, 4542, 9729, 2087, 416, 299, 2306, 672, 313, 62, 42404, 62, 4164, 10466, 13877, 526, 15931, 198 ]
4.35
20
from .to_2darray import to_2darray
[ 6738, 764, 1462, 62, 17, 67, 18747, 1330, 284, 62, 17, 67, 18747, 198 ]
2.5
14
import io from PIL import Image as PILImage from sqlalchemy import Column, ForeignKey, LargeBinary, Index, Integer, String from resources.models.ModelBase import Base
[ 11748, 33245, 198, 6738, 350, 4146, 1330, 7412, 355, 350, 4146, 5159, 198, 198, 6738, 44161, 282, 26599, 1330, 29201, 11, 8708, 9218, 11, 13601, 33, 3219, 11, 12901, 11, 34142, 11, 10903, 198, 6738, 4133, 13, 27530, 13, 17633, 14881, ...
3.840909
44
import os import sys import argparse import json import random import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from arch2vec.models.pretraining_nasbench101 import configs from arch2vec.utils import load_json, preprocessing, one_hot_darts from arch2vec.preprocessing.gen_isomorphism_graphs import process from arch2vec.models.model import Model from torch.distributions import MultivariateNormal from arch2vec.darts.cnn.train_search import Train def select_action(state, policy): """ MVN based action selection. :param state: 1 x dim :param policy: policy network :return: selected action: 1 x dim """ mean = policy(state.view(1, state.shape[0])) mvn = MultivariateNormal(mean, torch.eye(state.shape[0]).cuda()) action = mvn.sample() policy.saved_log_probs.append(torch.mean(mvn.log_prob(action))) return action def finish_episode(policy, optimizer): R = 0 policy_loss = [] returns = [] for r in policy.rewards: R = r + args.gamma * R returns.append(R) returns = torch.Tensor(policy.rewards) val, indices = torch.sort(returns) print("sorted validation reward:", val) returns = returns - args.objective for log_prob, R in zip(policy.saved_log_probs, returns): policy_loss.append(-log_prob * R) optimizer.zero_grad() policy_loss = torch.mean(torch.stack(policy_loss, dim=0)) print("average reward: {}, policy loss: {}".format(sum(policy.rewards)/len(policy.rewards), policy_loss.item())) policy_loss.backward() optimizer.step() del policy.rewards[:] del policy.saved_log_probs[:] policy.hx = None policy.cx = None def query(counter, seed, genotype, epochs): trainer = Train() rewards, rewards_test = trainer.main(counter, seed, genotype, epochs=epochs, train_portion=args.train_portion, save=args.logging_path) val_sum = 0 for epoch, val_acc in rewards: val_sum += val_acc val_avg = val_sum / len(rewards) return val_avg / 100. , rewards_test[-1][-1] / 100. def reinforce_search(env): """ implementation of arch2vec-RL on DARTS Search Space """ policy = Policy_LSTM(args.dim, 128).cuda() optimizer = optim.Adam(policy.parameters(), lr=1e-2) counter = 0 MAX_BUDGET = args.max_budgets state, genotype = env.get_init_state() CURR_BEST_VALID = 0 CURR_BEST_TEST = 0 CURR_BEST_GENOTYPE = None test_trace = [] valid_trace = [] genotype_trace = [] counter_trace = [] while counter < MAX_BUDGET: for c in range(args.bs): state = state.cuda() action = select_action(state, policy) state, genotype = env.step(action) reward, reward_test = query(counter=counter, seed=args.seed, genotype=genotype, epochs=args.inner_epochs) policy.rewards.append(reward) counter += 1 print('counter: {}, validation reward: {}, test reward: {}, genotype: {}'.format(counter, reward, reward_test, genotype)) if reward > CURR_BEST_VALID: CURR_BEST_VALID = reward CURR_BEST_TEST = reward_test CURR_BEST_GENOTYPE = genotype valid_trace.append(float(CURR_BEST_VALID)) test_trace.append(float(CURR_BEST_TEST)) genotype_trace.append(CURR_BEST_GENOTYPE) counter_trace.append(counter) if counter >= MAX_BUDGET: break finish_episode(policy, optimizer) res = dict() res['validation_acc'] = valid_trace res['test_acc'] = test_trace res['genotype'] = genotype_trace res['counter'] = counter_trace save_path = os.path.join(args.output_path, 'dim{}'.format(args.dim)) if not os.path.exists(save_path): os.mkdir(save_path) print('save to {}'.format(save_path)) fh = open(os.path.join(save_path, 'run_{}_arch2vec_model_darts.json'.format(args.seed)), 'w') json.dump(res, fh) fh.close() if __name__ == '__main__': parser = argparse.ArgumentParser(description="arch2vec-REINFORCE") parser.add_argument("--gamma", type=float, default=0.8, help="discount factor (default 0.99)") parser.add_argument("--seed", type=int, default=3, help="random seed") parser.add_argument('--cfg', type=int, default=4, help='configuration (default: 4)') parser.add_argument('--bs', type=int, default=16, help='batch size') parser.add_argument('--objective', type=float, default=0.95, help='rl baseline') parser.add_argument('--max_budgets', type=int, default=100, help='number of queries') parser.add_argument('--inner_epochs', type=int, default=50, help='inner loop epochs') parser.add_argument('--train_portion', type=float, default=0.9, help='train/validation split portion') parser.add_argument('--output_path', type=str, default='rl', help='rl/bo (default: rl)') parser.add_argument('--logging_path', type=str, default='', help='search logging path') parser.add_argument('--saved_arch2vec', action="store_true", default=False) parser.add_argument('--input_dim', type=int, default=11) parser.add_argument('--hidden_dim', type=int, default=128) parser.add_argument('--dim', type=int, default=16, help='feature dimension (default: 16)') parser.add_argument('--hops', type=int, default=5) parser.add_argument('--mlps', type=int, default=2) parser.add_argument('--dropout', type=float, default=0.3) args = parser.parse_args() cfg = configs[args.cfg] env = Env('REINFORCE', args.seed, cfg, data_path='data/data_darts_counter600000.json', save=args.saved_arch2vec) torch.manual_seed(args.seed) reinforce_search(env)
[ 11748, 28686, 198, 11748, 25064, 198, 198, 11748, 1822, 29572, 198, 11748, 33918, 198, 11748, 4738, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 11748, 28034, 13, ...
2.472868
2,322
"""A setuptools based setup module. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() # Arguments marked as "Required" below must be included for upload to PyPI. # Fields marked as "Optional" may be commented out. setup( name='r_map', # Required version='0.9.0', # Required description='A data structure for working with register map information', # Required long_description=long_description, # Optional long_description_content_type='text/markdown', # Optional (see note above) url='https://github.com/mentaal/r_map', # Optional # This should be your name or the name of the organization which owns the # project. author='Gregory Kuhn', # Optional # This should be a valid email address corresponding to the author listed # above. author_email='gregorykuhn@gmail.com', # Optional # Classifiers help users find your project by categorizing it. # # For a list of valid classifiers, see https://pypi.org/classifiers/ classifiers=[ # Optional # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 4 - Beta', # Indicate who your project is intended for 'Intended Audience :: Developers', 'Topic :: Software Development :: Build Tools', # Pick your license as you wish 'License :: OSI Approved :: MIT License', # Specify the Python versions you support here. In particular, ensure # that you indicate whether you support Python 2, Python 3 or both. 'Programming Language :: Python :: 3.6', ], keywords='register bitfield registermap', # Optional packages=['r_map'], python_requires='>=3.6', project_urls={ # Optional 'Bug Reports': 'https://github.com/mentaal/r_map/issues', 'Source': 'https://github.com/mentaal/r_map', }, )
[ 37811, 32, 900, 37623, 10141, 1912, 9058, 8265, 13, 198, 6214, 25, 198, 5450, 1378, 8002, 3039, 13, 29412, 13, 2398, 14, 268, 14, 42861, 14, 17080, 2455, 278, 13, 6494, 198, 5450, 1378, 12567, 13, 785, 14, 79, 4464, 64, 14, 39873, ...
2.940582
791
if __name__ == "__main__": #print(Hey().get_name()) teen = Teenager("Joseph Njeri", 924, "www.fowr.gd") print(teen.website)
[ 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1303, 4798, 7, 10814, 22446, 1136, 62, 3672, 28955, 628, 220, 220, 220, 6036, 796, 21432, 3536, 7203, 29458, 399, 73, 33442, 1600, 860, 1731, 11, 366, ...
2.245902
61
import enum from typing import Dict, List from odmantic.field import Field from odmantic.model import Model
[ 11748, 33829, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 198, 198, 6738, 16298, 76, 5109, 13, 3245, 1330, 7663, 198, 6738, 16298, 76, 5109, 13, 19849, 1330, 9104, 628, 198 ]
3.580645
31
"""Test AdaNet estimator single graph implementation. Copyright 2018 The AdaNet Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import time from absl import logging from absl.testing import parameterized from adanet import replay from adanet import tf_compat from adanet.core import testing_utils as tu from adanet.core.estimator import Estimator from adanet.core.evaluator import Evaluator from adanet.core.report_materializer import ReportMaterializer from adanet.distributed.placement import RoundRobinStrategy from adanet.ensemble import AllStrategy from adanet.ensemble import ComplexityRegularizedEnsembler from adanet.ensemble import GrowStrategy from adanet.ensemble import MixtureWeightType from adanet.ensemble import SoloStrategy from adanet.subnetwork import Builder from adanet.subnetwork import Generator from adanet.subnetwork import MaterializedReport from adanet.subnetwork import Report from adanet.subnetwork import SimpleGenerator from adanet.subnetwork import Subnetwork from adanet.subnetwork import TrainOpSpec import numpy as np import tensorflow as tf # pylint: disable=g-direct-tensorflow-import from tensorflow.python.eager import context from tensorflow.python.framework import test_util from tensorflow.python.tools import saved_model_utils # pylint: enable=g-direct-tensorflow-import from tensorflow_estimator.python.estimator.canned.head import _binary_logistic_head_with_sigmoid_cross_entropy_loss as binary_class_head_v1 from tensorflow_estimator.python.estimator.export import export from tensorflow_estimator.python.estimator.head import binary_class_head from tensorflow_estimator.python.estimator.head import multi_head as multi_head_lib from tensorflow_estimator.python.estimator.head import regression_head logging.set_verbosity(logging.INFO) XOR_FEATURES = [[1., 0.], [0., 0], [0., 1.], [1., 1.]] XOR_LABELS = [[1.], [0.], [1.], [0.]] class _AlwaysLastEvaluator(_FakeEvaluator): def evaluate(self, sess, ensemble_metrics): """Always makes the last loss the smallest.""" del sess # Unused. losses = [np.inf] * len(ensemble_metrics) losses[-1] = 0. return losses class _AlwaysSecondToLastEvaluator(_FakeEvaluator): def evaluate(self, sess, ensemble_metrics): """Always makes the second to last loss the smallest.""" del sess # Unused. losses = [np.inf] * len(ensemble_metrics) losses[-2] = 0. return losses class _EarlyStoppingHook(tf_compat.SessionRunHook): """Hook that immediately requests training to stop.""" class EstimatorTest(tu.AdanetTestCase): def test_binary_head_asserts_are_disabled(self): """Tests b/140267630.""" subnetwork_generator = SimpleGenerator([ _DNNBuilder("dnn"), _NanLossBuilder(), ]) estimator = Estimator( head=binary_class_head_v1(), subnetwork_generator=subnetwork_generator, max_iteration_steps=10, model_dir=self.test_subdirectory) eval_input_fn = tu.dummy_input_fn([[1., 0.]], [[1.]]) estimator.evaluate(input_fn=eval_input_fn, steps=1) class KerasCNNBuilder(Builder): """Builds a CNN subnetwork for AdaNet.""" def __init__(self, learning_rate, seed=42): """Initializes a `SimpleCNNBuilder`. Args: learning_rate: The float learning rate to use. seed: The random seed. Returns: An instance of `SimpleCNNBuilder`. """ self._learning_rate = learning_rate self._seed = seed def build_subnetwork(self, features, logits_dimension, training, iteration_step, summary, previous_ensemble=None): """See `adanet.subnetwork.Builder`.""" seed = self._seed if previous_ensemble: seed += len(previous_ensemble.weighted_subnetworks) images = list(features.values())[0] images = tf.reshape(images, [-1, 2, 2, 1]) kernel_initializer = tf_compat.v1.keras.initializers.he_normal(seed=seed) x = tf.keras.layers.Conv2D( filters=3, kernel_size=1, padding="same", activation="relu", kernel_initializer=kernel_initializer)( images) x = tf.keras.layers.MaxPool2D(pool_size=2, strides=1)(x) x = tf.keras.layers.Flatten()(x) x = tf.keras.layers.Dense( units=3, activation="relu", kernel_initializer=kernel_initializer)( x) logits = tf_compat.v1.layers.Dense( units=1, activation=None, kernel_initializer=kernel_initializer)( x) complexity = tf.constant(1) return Subnetwork( last_layer=x, logits=logits, complexity=complexity, persisted_tensors={}) def _check_eventfile_for_keyword(keyword, dir_): """Checks event files for the keyword.""" tf_compat.v1.summary.FileWriterCache.clear() if not tf.io.gfile.exists(dir_): raise ValueError("Directory '{}' not found.".format(dir_)) # Get last `Event` written. filenames = os.path.join(dir_, "events*") event_paths = tf.io.gfile.glob(filenames) if not event_paths: raise ValueError("Path '{}' not found.".format(filenames)) for last_event in tf_compat.v1.train.summary_iterator(event_paths[-1]): if last_event.summary is not None: for value in last_event.summary.value: if keyword == value.tag: if value.HasField("simple_value"): return value.simple_value if value.HasField("image"): return (value.image.height, value.image.width, value.image.colorspace) if value.HasField("tensor"): return value.tensor.string_val raise ValueError("Keyword '{}' not found in path '{}'.".format( keyword, filenames)) def _mean_keras_metric(value): """Returns the mean of given value as a Keras metric.""" mean = tf.keras.metrics.Mean() mean.update_state(value) return mean def _dummy_feature_dict_input_fn(features, labels): """Returns an input_fn that returns feature and labels `Tensors`.""" return _input_fn if __name__ == "__main__": tf.test.main()
[ 37811, 14402, 47395, 7934, 3959, 1352, 2060, 4823, 7822, 13, 198, 198, 15269, 2864, 383, 47395, 7934, 46665, 13, 1439, 6923, 33876, 13, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341...
2.641715
2,565
from seeker.models import Building, Classroom, Time import json import os os.chdir('../data') fileList = os.listdir() #loops through each json file for jsonfile in fileList: #opens the jsonfile and loads the data f = open(jsonfile, 'r') data = f.read() jsondata = json.loads(data) #create the building building = Building(BuildingName=os.path.splitext(jsonfile)[0]) building.save() for day in jsondata: for room in jsondata[day].keys(): #creates each classroom, adding one only if one doesn't exist classroom = Classroom.objects.get_or_create(building = Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room) for time in jsondata[day][room]: #creates each time time = Time(building=Building.objects.get(BuildingName = os.path.splitext(jsonfile)[0]), classroom=Classroom.objects.get(ClassroomName = os.path.splitext(jsonfile)[0] + ' - ' + room), DayofWeek=day, TimeValue=time) time.save() #IMPORTANT!!!!!!! # This program must be run inside a python manage.py shell for it to work, in the future a fix may be found, # but for the time being, follow these steps: # 1. open powershell and navigate to the folder that contains this file # 2. type in "python manage.py shell" # 3. copy and paste the code into the shell and press enter # 4. wait time is around 5 minutes
[ 6738, 45993, 13, 27530, 1330, 11819, 11, 5016, 3823, 11, 3862, 198, 11748, 33918, 198, 11748, 28686, 198, 198, 418, 13, 354, 15908, 10786, 40720, 7890, 11537, 198, 7753, 8053, 796, 28686, 13, 4868, 15908, 3419, 198, 198, 2, 5439, 2840, ...
2.768797
532
# AUTO GENERATED FILE - DO NOT EDIT from dash.development.base_component import Component, _explicitize_args
[ 2, 47044, 46, 24700, 1137, 11617, 45811, 532, 8410, 5626, 48483, 198, 198, 6738, 14470, 13, 31267, 13, 8692, 62, 42895, 1330, 35100, 11, 4808, 20676, 3628, 1096, 62, 22046, 628 ]
3.580645
31
"""Utility function for process to raw data """ from .util import ( cvt_pcm2wav, cvt_float2fixed, cvt_char2num, plot_frequency_response, plot_pole_zero_analysis, ) from .fi import fi __all__ = [ "fi", "cvt_pcm2wav", "cvt_float2fixed", "cvt_char2num", "plot_frequency_response", "plot_pole_zero_analysis", ]
[ 37811, 18274, 879, 2163, 329, 1429, 284, 8246, 1366, 198, 37811, 198, 6738, 764, 22602, 1330, 357, 198, 220, 220, 220, 269, 36540, 62, 79, 11215, 17, 45137, 11, 198, 220, 220, 220, 269, 36540, 62, 22468, 17, 34021, 11, 198, 220, 220...
2.220126
159
import pytest from django.core.files import File from django.urls import reverse from freezegun import freeze_time from infra.apps.catalog.tests.helpers.open_catalog import open_catalog pytestmark = pytest.mark.django_db
[ 11748, 12972, 9288, 198, 6738, 42625, 14208, 13, 7295, 13, 16624, 1330, 9220, 198, 6738, 42625, 14208, 13, 6371, 82, 1330, 9575, 198, 6738, 1479, 89, 1533, 403, 1330, 16611, 62, 2435, 198, 198, 6738, 1167, 430, 13, 18211, 13, 9246, 11...
3.026667
75
import subprocess, os ue4_win = r"C:\Program Files\Epic Games\UE_4.16" ue4_linux = "/home/qiuwch/workspace/UE416" ue4_mac = '/Users/Shared/Epic Games/UE_4.16' win_uprojects = [ r'C:\qiuwch\workspace\uprojects\UE4RealisticRendering\RealisticRendering.uproject', r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene1\ArchinteriorsVol2Scene1.uproject', r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene2\ArchinteriorsVol2Scene2.uproject', r'C:\qiuwch\workspace\uprojects\UE4ArchinteriorsVol2Scene3\ArchinteriorsVol2Scene3.uproject', r'C:\qiuwch\workspace\uprojects\UE4UrbanCity\UrbanCity.uproject', r'D:\workspace\uprojects\Matinee\Matinee.uproject', r'D:\workspace\uprojects\PhotorealisticCharacter\PhotorealisticCharacter2.uproject', ] linux_uprojects = [ os.path.expanduser('~/workspace/uprojects/UE4RealisticRendering/RealisticRendering.uproject'), os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'), os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'), os.path.expanduser('~/workspace/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'), os.path.expanduser("~/workspace/uprojects/UE4UrbanCity/UrbanCity.uproject"), ] mac_uprojects = [ os.path.expanduser('~/workspace/UnrealEngine/Templates/FP_FirstPerson/FP_FirstPerson.uproject'), os.path.expanduser('~/uprojects/RealisticRendering/RealisticRendering.uproject'), os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene1/ArchinteriorsVol2Scene1.uproject'), os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene2/ArchinteriorsVol2Scene2.uproject'), os.path.expanduser('~/uprojects/UE4ArchinteriorsVol2Scene3/ArchinteriorsVol2Scene3.uproject'), os.path.expanduser('~/uprojects/UE4UrbanCity/UrbanCity.uproject'), ] uprojects = [] for uproject_path in win_uprojects: uproject_name = os.path.basename(uproject_path).split('.')[0] uprojects.append( dict( uproject_path = uproject_path, ue4_path = ue4_win, log_file = 'log/win_%s.log' % uproject_name ), ) for uproject_path in linux_uprojects: uproject_name = os.path.basename(uproject_path).split('.')[0] uprojects.append( dict( uproject_path = uproject_path, ue4_path = ue4_linux, log_file = 'log/linux_%s.log' % uproject_name ), ) for uproject_path in mac_uprojects: uproject_name = os.path.basename(uproject_path).split('.')[0] uprojects.append( dict( uproject_path = uproject_path, ue4_path = ue4_mac, log_file = 'log/mac_%s.log' % uproject_name ), ) if __name__ == '__main__': for uproject in uprojects: uproject_path = uproject['uproject_path'] if not os.path.isfile(uproject_path): print("Can not find uproject file %s, skip this project" % uproject_path) continue cmd = [ 'python', 'build.py', '--UE4', uproject['ue4_path'], # '--output', uproject['output_folder'], uproject['uproject_path'] ] print(cmd) subprocess.call(cmd, stdout = open(uproject['log_file'], 'w')) with open(uproject['log_file']) as f: lines = f.readlines() print(''.join(lines[-10:])) # Print the last few lines
[ 11748, 850, 14681, 11, 28686, 198, 198, 518, 19, 62, 5404, 796, 374, 1, 34, 7479, 15167, 13283, 59, 13807, 291, 5776, 59, 8924, 62, 19, 13, 1433, 1, 198, 518, 19, 62, 23289, 796, 12813, 11195, 14, 80, 16115, 86, 354, 14, 5225, 1...
2.122722
1,646
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from ovos_utils.gui import can_use_gui from adapt.intent import IntentBuilder from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel from mycroft.skills.core import intent_handler from neon_solver_ddg_plugin import DDGSolver def CQS_action(self, phrase, data): """ If selected show gui """ self.display_ddg(data["answer"], data["image"]) # duck duck go api def create_skill(): return DuckDuckGoSkill()
[ 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 7330, 257, 4866, 286, 262, 13789, 379, 198, 2,...
3.377104
297
""" Django settings for openstack_lease_it project. Generated by 'django-admin startproject' using Django 1.8.7. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import ast import logging from openstack_lease_it.config import GLOBAL_CONFIG, load_config BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Load configuration load_config() # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = GLOBAL_CONFIG['DJANGO_SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! DEBUG = ast.literal_eval(GLOBAL_CONFIG['DJANGO_DEBUG']) # ALLOWED_HOSTS secure django app access ALLOWED_HOSTS = [] # A email as format must match this regular expression # If you not understand, please EMAIL_REGEXP = r"^[A-Za-z0-9\.\+_-]+@[A-Za-z0-9\.-]+\.[A-Za-z]*$" # Application definition INSTALLED_APPS = ( 'openstack_auth', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'openstack_lease_it', 'lease_it', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'openstack_lease_it.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'openstack_lease_it.wsgi.application' # Database # https://docs.djangoproject.com/en/1.8/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en' TIME_ZONE = 'Europe/Paris' USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_CHARSET = 'utf-8' # We use memcached as cache backend SESSION_ENGINE = 'django.contrib.sessions.backends.cache' CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', 'LOCATION': '{MEMCACHED_HOST}:{MEMCACHED_PORT}'.format(**GLOBAL_CONFIG), } } SESSION_COOKIE_SECURE = False SESSION_TIMEOUT = 1800 # A token can be near the end of validity when a page starts loading, and # invalid during the rendering which can cause errors when a page load. # TOKEN_TIMEOUT_MARGIN defines a time in seconds we retrieve from token # validity to avoid this issue. You can adjust this time depending on the # performance of the infrastructure. TOKEN_TIMEOUT_MARGIN = 100 # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATIC_URL = '/static/' LOGIN_URL = 'login' LOGOUT_URL = 'logout' LOGIN_REDIRECT_URL = '/' SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer' if GLOBAL_CONFIG['BACKEND_PLUGIN'] == 'Openstack': # UserId on django-openstack_auth need specific User model AUTH_USER_MODEL = 'openstack_auth.User' # Define keystone URL for authentification OPENSTACK_KEYSTONE_URL = GLOBAL_CONFIG['OS_AUTH_URL'] # We use keystone v3 API OPENSTACK_API_VERSIONS = { "identity": GLOBAL_CONFIG['OS_IDENTITY_API_VERSION'], } # We use multidomain OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True # We load Openstack_auth backend AUTHENTICATION_BACKENDS = ( 'openstack_auth.backend.KeystoneBackend', 'django.contrib.auth.backends.ModelBackend', ) else: AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', ) # Configure logging LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(levelname)s %(asctime)s: %(message)s' }, }, 'handlers': { 'django': { 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'class': 'logging.FileHandler', 'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'django.log'), 'formatter': 'simple' }, 'main': { 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'class': 'logging.FileHandler', 'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'main.log'), 'formatter': 'simple' }, 'notification': { 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'class': 'logging.FileHandler', 'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'notification.log'), 'formatter': 'simple' }, 'instances': { 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'class': 'logging.FileHandler', 'filename': os.path.join(GLOBAL_CONFIG['DJANGO_LOGDIR'], 'instances.log'), 'formatter': 'simple' }, }, 'loggers': { 'django': { 'handlers': ['django'], 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'propagate': True, }, 'main': { 'handlers': ['main'], 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'propagate': True, }, 'notification': { 'handlers': ['notification'], 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'propagate': True, }, 'instances': { 'handlers': ['instances'], 'level': GLOBAL_CONFIG['DJANGO_LOGLEVEL'], 'propagate': True, }, }, } LOGGER = logging.getLogger('main') LOGGER_NOTIFICATION = logging.getLogger('notification') LOGGER_INSTANCES = logging.getLogger('instances')
[ 37811, 198, 35, 73, 14208, 6460, 329, 1280, 25558, 62, 1274, 62, 270, 1628, 13, 198, 198, 8645, 515, 416, 705, 28241, 14208, 12, 28482, 923, 16302, 6, 1262, 37770, 352, 13, 23, 13, 22, 13, 198, 198, 1890, 517, 1321, 319, 428, 2393...
2.242148
2,961
# coding=utf-8 # Copyright 2021 RigL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Functions for pruning FLAX masked models.""" import collections from typing import Any, Callable, Mapping, Optional, Union import flax import jax.numpy as jnp from rigl.experimental.jax.pruning import masked def weight_magnitude(weights): """Creates weight magnitude-based saliencies, given a weight matrix.""" return jnp.absolute(weights) def prune( model, pruning_rate, saliency_fn = weight_magnitude, mask = None, compare_fn = jnp.greater): """Returns a mask for a model where the params in each layer are pruned using a saliency function. Args: model: The model to create a pruning mask for. pruning_rate: The fraction of lowest magnitude saliency weights that are pruned. If a float, the same rate is used for all layers, otherwise if it is a mapping, it must contain a rate for all masked layers in the model. saliency_fn: A function that returns a float number used to rank the importance of individual weights in the layer. mask: If the model has an existing mask, the mask will be applied before pruning the model. compare_fn: A pairwise operator to compare saliency with threshold, and return True if the saliency indicates the value should not be masked. Returns: A pruned mask for the given model. """ if not mask: mask = masked.simple_mask(model, jnp.ones, masked.WEIGHT_PARAM_NAMES) if not isinstance(pruning_rate, collections.Mapping): pruning_rate_dict = {} for param_name, _ in masked.iterate_mask(mask): # Get the layer name from the parameter's full name/path. layer_name = param_name.split('/')[-2] pruning_rate_dict[layer_name] = pruning_rate pruning_rate = pruning_rate_dict for param_path, param_mask in masked.iterate_mask(mask): split_param_path = param_path.split('/') layer_name = split_param_path[-2] param_name = split_param_path[-1] # If we don't have a pruning rate for the given layer, don't mask it. if layer_name in pruning_rate and mask[layer_name][param_name] is not None: param_value = model.params[layer_name][ masked.MaskedModule.UNMASKED][param_name] # Here any existing mask is first applied to weight matrix. # Note: need to check explicitly is not None for np array. if param_mask is not None: saliencies = saliency_fn(param_mask * param_value) else: saliencies = saliency_fn(param_value) # TODO: Use partition here (partial sort) instead of sort, # since it's O(N), not O(N log N), however JAX doesn't support it. sorted_param = jnp.sort(jnp.abs(saliencies.flatten())) # Figure out the weight magnitude threshold. threshold_index = jnp.round(pruning_rate[layer_name] * sorted_param.size).astype(jnp.int32) threshold = sorted_param[threshold_index] mask[layer_name][param_name] = jnp.array( compare_fn(saliencies, threshold), dtype=jnp.int32) return mask
[ 2, 19617, 28, 40477, 12, 23, 198, 2, 15069, 33448, 24666, 43, 46665, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846,...
2.935118
1,233
# AUTO GENERATED FILE - DO NOT EDIT from dash.development.base_component import Component, _explicitize_args
[ 2, 47044, 46, 24700, 1137, 11617, 45811, 532, 8410, 5626, 48483, 198, 198, 6738, 14470, 13, 31267, 13, 8692, 62, 42895, 1330, 35100, 11, 4808, 20676, 3628, 1096, 62, 22046, 628 ]
3.580645
31
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-12-28 23:15 from hanlp.components.taggers.transformers.transformer_tagger_tf import TransformerTaggerTF from tests import cdroot cdroot() tagger = TransformerTaggerTF() save_dir = 'data/model/pos/ctb9_electra_small_zh_epoch_20' tagger.fit('data/pos/ctb9/train.tsv', 'data/pos/ctb9/test.tsv', save_dir, transformer='hfl/chinese-electra-small-discriminator', max_seq_length=130, warmup_steps_ratio=0.1, epochs=20, learning_rate=5e-5) tagger.load(save_dir) print(tagger(['', '', '', '', '', ''])) tagger.evaluate('data/pos/ctb9/test.tsv', save_dir=save_dir) print(f'Model saved in {save_dir}')
[ 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9, 12, 198, 2, 6434, 25, 289, 962, 6359, 198, 2, 7536, 25, 13130, 12, 1065, 12, 2078, 2242, 25, 1314, 198, 6738, 289, 272, 34431, 13, 5589, 3906, 13, 12985, 5355, 13, 35636, 364, 1...
2.098837
344
from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from app.api import landing, login, attendance_confirmation from sql_app.database import orm_connection app = FastAPI(title="Sergio's wedding backend API", description="REST API which serves login, attendance confirmation and other features", version="1.0",) origins = [ "*" # "http://190.96.140.12:5500", # "68.251.63.208" ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.include_router(landing.router) app.include_router(login.router) app.include_router(attendance_confirmation.router)
[ 6738, 3049, 15042, 1330, 12549, 17614, 198, 6738, 3049, 15042, 13, 27171, 1574, 13, 66, 669, 1330, 23929, 12310, 2509, 1574, 198, 6738, 598, 13, 15042, 1330, 9581, 11, 17594, 11, 14858, 62, 10414, 36241, 198, 6738, 44161, 62, 1324, 13, ...
2.710425
259
from unittest import TestCase from pandora.client import APIClient from pandora.errors import InvalidAuthToken, ParameterMissing from pandora.models.pandora import Station, AdItem, PlaylistItem from pandora.py2compat import Mock, patch from pydora.utils import iterate_forever
[ 6738, 555, 715, 395, 1330, 6208, 20448, 198, 198, 6738, 19798, 5799, 13, 16366, 1330, 3486, 2149, 75, 1153, 198, 6738, 19798, 5799, 13, 48277, 1330, 17665, 30515, 30642, 11, 25139, 2357, 43730, 198, 6738, 19798, 5799, 13, 27530, 13, 79,...
3.531646
79