diff --git a/llava_next/include/fakemysql.h b/llava_next/include/fakemysql.h new file mode 100644 index 0000000000000000000000000000000000000000..19dc40ee715ef49b05e7a8c335c5bf97dfaa1d0e --- /dev/null +++ b/llava_next/include/fakemysql.h @@ -0,0 +1,335 @@ +/* + * fakemysql.h -- + * + * Fake definitions of the MySQL API sufficient to build tdbc::mysql + * without having an MySQL installation on the build system. This file + * comprises only data type, constant and function definitions. + * + * The programmers of this file believe that it contains material not + * subject to copyright under the doctrines of scenes a faire and + * of merger of idea and expression. Accordingly, this file is in the + * public domain. + * + *----------------------------------------------------------------------------- + */ + +#ifndef FAKEMYSQL_H_INCLUDED +#define FAKEMYSQL_H_INCLUDED + +#include + +#ifndef MODULE_SCOPE +#define MODULE_SCOPE extern +#endif + +MODULE_SCOPE Tcl_LoadHandle MysqlInitStubs(Tcl_Interp*); + +#ifdef _WIN32 +#define STDCALL __stdcall +#else +#define STDCALL /* nothing */ +#endif + +enum enum_field_types { + MYSQL_TYPE_DECIMAL=0, + MYSQL_TYPE_TINY=1, + MYSQL_TYPE_SHORT=2, + MYSQL_TYPE_LONG=3, + MYSQL_TYPE_FLOAT=4, + MYSQL_TYPE_DOUBLE=5, + MYSQL_TYPE_NULL=6, + MYSQL_TYPE_TIMESTAMP=7, + MYSQL_TYPE_LONGLONG=8, + MYSQL_TYPE_INT24=9, + MYSQL_TYPE_DATE=10, + MYSQL_TYPE_TIME=11, + MYSQL_TYPE_DATETIME=12, + MYSQL_TYPE_YEAR=13, + MYSQL_TYPE_NEWDATE=14, + MYSQL_TYPE_VARCHAR=15, + MYSQL_TYPE_BIT=16, + MYSQL_TYPE_NEWDECIMAL=246, + MYSQL_TYPE_ENUM=247, + MYSQL_TYPE_SET=248, + MYSQL_TYPE_TINY_BLOB=249, + MYSQL_TYPE_MEDIUM_BLOB=250, + MYSQL_TYPE_LONG_BLOB=251, + MYSQL_TYPE_BLOB=252, + MYSQL_TYPE_VAR_STRING=253, + MYSQL_TYPE_STRING=254, + MYSQL_TYPE_GEOMETRY=255 +}; + +enum mysql_option { + MYSQL_SET_CHARSET_NAME=7, +}; + +enum mysql_status { + MYSQL_STATUS_READY=0, +}; + +#define CLIENT_COMPRESS 32 +#define CLIENT_INTERACTIVE 1024 +#define MYSQL_DATA_TRUNCATED 101 +#define MYSQL_ERRMSG_SIZE 512 +#define MYSQL_NO_DATA 100 +#define SCRAMBLE_LENGTH 20 +#define SQLSTATE_LENGTH 5 + +typedef struct st_list LIST; +typedef struct st_mem_root MEM_ROOT; +typedef struct st_mysql MYSQL; +typedef struct st_mysql_bind MYSQL_BIND; +typedef struct st_mysql_field MYSQL_FIELD; +typedef struct st_mysql_res MYSQL_RES; +typedef char** MYSQL_ROW; +typedef struct st_mysql_stmt MYSQL_STMT; +typedef char my_bool; +#ifndef Socket_defined +typedef int my_socket; +#define INVALID_SOCKET -1 +#endif +typedef Tcl_WideUInt my_ulonglong; +typedef struct st_net NET; +typedef struct st_used_mem USED_MEM; +typedef struct st_vio Vio; + +struct st_mem_root { + USED_MEM *free; + USED_MEM *used; + USED_MEM *pre_alloc; + size_t min_malloc; + size_t block_size; + unsigned int block_num; + unsigned int first_block_usage; + void (*error_handler)(void); +}; + +struct st_mysql_options { + unsigned int connect_timeout; + unsigned int read_timeout; + unsigned int write_timeout; + unsigned int port; + unsigned int protocol; + unsigned long client_flag; + char *host; + char *user; + char *password; + char *unix_socket; + char *db; + struct st_dynamic_array *init_commands; + char *my_cnf_file; + char *my_cnf_group; + char *charset_dir; + char *charset_name; + char *ssl_key; + char *ssl_cert; + char *ssl_ca; + char *ssl_capath; + char *ssl_cipher; + char *shared_memory_base_name; + unsigned long max_allowed_packet; + my_bool use_ssl; + my_bool compress,named_pipe; + my_bool rpl_probe; + my_bool rpl_parse; + my_bool no_master_reads; +#if !defined(CHECK_EMBEDDED_DIFFERENCES) || defined(EMBEDDED_LIBRARY) + my_bool separate_thread; +#endif + enum mysql_option methods_to_use; + char *client_ip; + my_bool secure_auth; + my_bool report_data_truncation; + int (*local_infile_init)(void **, const char *, void *); + int (*local_infile_read)(void *, char *, unsigned int); + void (*local_infile_end)(void *); + int (*local_infile_error)(void *, char *, unsigned int); + void *local_infile_userdata; + void *extension; +}; + +struct st_net { +#if !defined(CHECK_EMBEDDED_DIFFERENCES) || !defined(EMBEDDED_LIBRARY) + Vio *vio; + unsigned char *buff; + unsigned char *buff_end; + unsigned char *write_pos; + unsigned char *read_pos; + my_socket fd; + unsigned long remain_in_buf; + unsigned long length; + unsigned long buf_length; + unsigned long where_b; + unsigned long max_packet; + unsigned long max_packet_size; + unsigned int pkt_nr; + unsigned int compress_pkt_nr; + unsigned int write_timeout; + unsigned int read_timeout; + unsigned int retry_count; + int fcntl; + unsigned int *return_status; + unsigned char reading_or_writing; + char save_char; + my_bool unused0; + my_bool unused; + my_bool compress; + my_bool unused1; +#endif + unsigned char *query_cache_query; + unsigned int last_errno; + unsigned char error; + my_bool unused2; + my_bool return_errno; + char last_error[MYSQL_ERRMSG_SIZE]; + char sqlstate[SQLSTATE_LENGTH+1]; + void *extension; +#if defined(MYSQL_SERVER) && !defined(EMBEDDED_LIBRARY) + my_bool skip_big_packet; +#endif +}; + +/* + * st_mysql differs between 5.0 and 5.1, but the 5.0 version is a + * strict subset, we don't use any of the 5.1 fields, and we don't + * ever allocate the structure ourselves. + */ + +struct st_mysql { + NET net; + unsigned char *connector_fd; + char *host; + char *user; + char *passwd; + char *unix_socket; + char *server_version; + char *host_info; + char *info; + char *db; + struct charset_info_st *charset; + MYSQL_FIELD *fields; + MEM_ROOT field_alloc; + my_ulonglong affected_rows; + my_ulonglong insert_id; + my_ulonglong extra_info; + unsigned long thread_id; + unsigned long packet_length; + unsigned int port; + unsigned long client_flag; + unsigned long server_capabilities; + unsigned int protocol_version; + unsigned int field_count; + unsigned int server_status; + unsigned int server_language; + unsigned int warning_count; + struct st_mysql_options options; + enum mysql_status status; + my_bool free_me; + my_bool reconnect; + char scramble[SCRAMBLE_LENGTH+1]; + my_bool rpl_pivot; + struct st_mysql *master; + struct st_mysql *next_slave; + struct st_mysql* last_used_slave; + struct st_mysql* last_used_con; + LIST *stmts; + const struct st_mysql_methods *methods; + void *thd; + my_bool *unbuffered_fetch_owner; + char *info_buffer; +}; + +/* + * There are different version of the MYSQL_BIND structure before and after + * MySQL 5.1. We go after the fields of the structure using accessor functions + * so that the code in this file is compatible with both versions. + */ + +struct st_mysql_bind_51 { /* Post-5.1 */ + unsigned long* length; + my_bool* is_null; + void* buffer; + my_bool* error; + unsigned char* row_ptr; + void (*store_param_func)(NET* net, MYSQL_BIND* param); + void (*fetch_result)(MYSQL_BIND*, MYSQL_FIELD*, unsigned char**); + void (*skip_result)(MYSQL_BIND*, MYSQL_FIELD*, unsigned char**); + unsigned long buffer_length; + unsigned long offset; + unsigned long length_value; + unsigned int param_number; + unsigned int pack_length; + enum enum_field_types buffer_type; + my_bool error_value; + my_bool is_unsigned; + my_bool long_data_used; + my_bool is_null_value; + void* extension; +}; + +struct st_mysql_bind_50 { /* Pre-5.1 */ + unsigned long* length; + my_bool* is_null; + void* buffer; + my_bool* error; + enum enum_field_types buffer_type; + unsigned long buffer_length; + unsigned char* row_ptr; + unsigned long offset; + unsigned long length_value; + unsigned int param_number; + unsigned int pack_length; + my_bool error_value; + my_bool is_unsigned; + my_bool long_data_used; + my_bool is_null_value; + void (*store_param_func)(NET* net, MYSQL_BIND* param); + void (*fetch_result)(MYSQL_BIND*, MYSQL_FIELD*, unsigned char**); + void (*skip_result)(MYSQL_BIND*, MYSQL_FIELD*, unsigned char**); +}; + +/* + * There are also different versions of the MYSQL_FIELD structure; fortunately, + * the 5.1 version is a strict extension of the 5.0 version. + */ + +struct st_mysql_field { + char* name; + char *org_name; + char* table; + char* org_table; + char* db; + char* catalog; + char* def; + unsigned long length; + unsigned long max_length; + unsigned int name_length; + unsigned int org_name_length; + unsigned int table_length; + unsigned int org_table_length; + unsigned int db_length; + unsigned int catalog_length; + unsigned int def_length; + unsigned int flags; + unsigned int decimals; + unsigned int charsetnr; + enum enum_field_types type; +}; +struct st_mysql_field_50 { + struct st_mysql_field field; +}; +struct st_mysql_field_51 { + struct st_mysql_field field; + void* extension; +}; +#define NOT_NULL_FLAG 1 + +#define IS_NUM(t) ((t) <= MYSQL_TYPE_INT24 || (t) == MYSQL_TYPE_YEAR || (t) == MYSQL_TYPE_NEWDECIMAL) + +#define mysql_library_init mysql_server_init +#define mysql_library_end mysql_server_end + +#include "mysqlStubs.h" + +#endif /* not FAKEMYSQL_H_INCLUDED */ diff --git a/llava_next/include/itclInt.h b/llava_next/include/itclInt.h new file mode 100644 index 0000000000000000000000000000000000000000..b9e87daae89c889b14f8558e924ed1cb4c806e7b --- /dev/null +++ b/llava_next/include/itclInt.h @@ -0,0 +1,854 @@ +/* + * itclInt.h -- + * + * This file contains internal definitions for the C-implemented part of a + * Itcl + * + * Copyright (c) 2007 by Arnulf P. Wiedemann + * + * See the file "license.terms" for information on usage and redistribution of + * this file, and for a DISCLAIMER OF ALL WARRANTIES. + */ + +#ifdef HAVE_UNISTD_H +#include +#endif +#ifdef HAVE_STDINT_H +#include +#endif +#include + +/* + * Used to tag functions that are only to be visible within the module being + * built and not outside it (where this is supported by the linker). + */ + +#ifndef MODULE_SCOPE +# ifdef __cplusplus +# define MODULE_SCOPE extern "C" +# else +# define MODULE_SCOPE extern +# endif +#endif + +#include +#include +#include +#include "itcl.h" +#include "itclMigrate2TclCore.h" +#include "itclTclIntStubsFcn.h" + +/* + * Utility macros: STRINGIFY takes an argument and wraps it in "" (double + * quotation marks). + */ + +#ifndef STRINGIFY +# define STRINGIFY(x) STRINGIFY1(x) +# define STRINGIFY1(x) #x +#endif + +/* + * MSVC 8.0 started to mark many standard C library functions depreciated + * including the *printf family and others. Tell it to shut up. + * (_MSC_VER is 1200 for VC6, 1300 or 1310 for vc7.net, 1400 for 8.0) + */ +#if defined(_MSC_VER) +# pragma warning(disable:4244) +# if _MSC_VER >= 1400 +# pragma warning(disable:4267) +# pragma warning(disable:4996) +# endif +#endif + +#ifndef TCL_INDEX_NONE +# define TCL_INDEX_NONE (-1) +#endif + +#ifndef JOIN +# define JOIN(a,b) JOIN1(a,b) +# define JOIN1(a,b) a##b +#endif + +#ifndef TCL_UNUSED +# if defined(__cplusplus) +# define TCL_UNUSED(T) T +# elif defined(__GNUC__) && (__GNUC__ > 2) +# define TCL_UNUSED(T) T JOIN(dummy, __LINE__) __attribute__((unused)) +# else +# define TCL_UNUSED(T) T JOIN(dummy, __LINE__) +# endif +#endif + +#if TCL_MAJOR_VERSION == 8 +# define ITCL_Z_MODIFIER "" +#else +# define ITCL_Z_MODIFIER TCL_Z_MODIFIER +#endif + +/* + * Since the Tcl/Tk distribution doesn't perform any asserts, + * dynamic loading can fail to find the __assert function. + * As a workaround, we'll include our own. + */ + +#undef assert +#if defined(NDEBUG) && !defined(DEBUG) +#define assert(EX) ((void)0) +#else /* !NDEBUG || DEBUG */ +#define assert(EX) (void)((EX) || (Itcl_Assert(STRINGIFY(EX), __FILE__, __LINE__), 0)) +#endif + +#define ITCL_INTERP_DATA "itcl_data" +#define ITCL_TK_VERSION "8.6" + +/* + * Convenience macros for iterating through hash tables. FOREACH_HASH_DECLS + * sets up the declarations needed for the main macro, FOREACH_HASH, which + * does the actual iteration. FOREACH_HASH_VALUE is a restricted version that + * only iterates over values. + */ + +#define FOREACH_HASH_DECLS \ + Tcl_HashEntry *hPtr;Tcl_HashSearch search +#define FOREACH_HASH(key,val,tablePtr) \ + for(hPtr=Tcl_FirstHashEntry((tablePtr),&search); hPtr!=NULL ? \ + (*(void **)&(key)=Tcl_GetHashKey((tablePtr),hPtr),\ + *(void **)&(val)=Tcl_GetHashValue(hPtr),1):0; hPtr=Tcl_NextHashEntry(&search)) +#define FOREACH_HASH_VALUE(val,tablePtr) \ + for(hPtr=Tcl_FirstHashEntry((tablePtr),&search); hPtr!=NULL ? \ + (*(void **)&(val)=Tcl_GetHashValue(hPtr),1):0;hPtr=Tcl_NextHashEntry(&search)) + +/* + * What sort of size of things we like to allocate. + */ + +#define ALLOC_CHUNK 8 + +#define ITCL_INT_NAMESPACE ITCL_NAMESPACE"::internal" +#define ITCL_INTDICTS_NAMESPACE ITCL_INT_NAMESPACE"::dicts" +#define ITCL_VARIABLES_NAMESPACE ITCL_INT_NAMESPACE"::variables" +#define ITCL_COMMANDS_NAMESPACE ITCL_INT_NAMESPACE"::commands" + +typedef struct ItclFoundation { + Itcl_Stack methodCallStack; + Tcl_Command dispatchCommand; +} ItclFoundation; + +typedef struct ItclArgList { + struct ItclArgList *nextPtr; /* pointer to next argument */ + Tcl_Obj *namePtr; /* name of the argument */ + Tcl_Obj *defaultValuePtr; /* default value or NULL if none */ +} ItclArgList; + +/* + * Common info for managing all known objects. + * Each interpreter has one of these data structures stored as + * clientData in the "itcl" namespace. It is also accessible + * as associated data via the key ITCL_INTERP_DATA. + */ +struct ItclClass; +struct ItclObject; +struct ItclMemberFunc; +struct EnsembleInfo; +struct ItclDelegatedOption; +struct ItclDelegatedFunction; + +typedef struct ItclObjectInfo { + Tcl_Interp *interp; /* interpreter that manages this info */ + Tcl_HashTable objects; /* list of all known objects key is + * ioPtr */ + Tcl_HashTable objectCmds; /* list of known objects using accessCmd */ + Tcl_HashTable unused5; /* list of known objects using namePtr */ + Tcl_HashTable classes; /* list of all known classes, + * key is iclsPtr */ + Tcl_HashTable nameClasses; /* maps from fullNamePtr to iclsPtr */ + Tcl_HashTable namespaceClasses; /* maps from nsPtr to iclsPtr */ + Tcl_HashTable procMethods; /* maps from procPtr to mFunc */ + Tcl_HashTable instances; /* maps from instanceNumber to ioPtr */ + Tcl_HashTable unused8; /* maps from ioPtr to instanceNumber */ + Tcl_HashTable frameContext; /* maps frame to context stack */ + Tcl_HashTable classTypes; /* maps from class type i.e. "widget" + * to define value i.e. ITCL_WIDGET */ + int protection; /* protection level currently in effect */ + int useOldResolvers; /* whether to use the "old" style + * resolvers or the CallFrame resolvers */ + Itcl_Stack clsStack; /* stack of class definitions currently + * being parsed */ + Itcl_Stack unused; /* Removed */ + Itcl_Stack unused6; /* obsolete field */ + struct ItclObject *currIoPtr; /* object currently being constructed + * set only during calling of constructors + * otherwise NULL */ + Tcl_ObjectMetadataType *class_meta_type; + /* type for getting the Itcl class info + * from a TclOO Tcl_Object */ + const Tcl_ObjectMetadataType *object_meta_type; + /* type for getting the Itcl object info + * from a TclOO Tcl_Object */ + Tcl_Object clazzObjectPtr; /* the root object of Itcl */ + Tcl_Class clazzClassPtr; /* the root class of Itcl */ + struct EnsembleInfo *ensembleInfo; + struct ItclClass *currContextIclsPtr; + /* context class for delegated option + * handling */ + int currClassFlags; /* flags for the class just in creation */ + int buildingWidget; /* set if in construction of a widget */ + Tcl_Size unparsedObjc; /* number options not parsed by + ItclExtendedConfigure/-Cget function */ + Tcl_Obj **unparsedObjv; /* options not parsed by + ItclExtendedConfigure/-Cget function */ + int functionFlags; /* used for creating of ItclMemberCode */ + int unused7; + struct ItclDelegatedOption *currIdoPtr; + /* the current delegated option info */ + int inOptionHandling; /* used to indicate for type/widget ... + * that there is an option processing + * and methods are allowed to be called */ + /* these are the Tcl_Obj Ptrs for the clazz unknown procedure */ + /* need to store them to be able to free them at the end */ + int itclWidgetInitted; /* set to 1 if itclWidget.tcl has already + * been called + */ + int itclHullCmdsInitted; /* set to 1 if itclHullCmds.tcl has already + * been called + */ + Tcl_Obj *unused2; + Tcl_Obj *unused3; + Tcl_Obj *unused4; + Tcl_Obj *infoVarsPtr; + Tcl_Obj *unused9; + Tcl_Obj *infoVars4Ptr; + Tcl_Obj *typeDestructorArgumentPtr; + struct ItclObject *lastIoPtr; /* last object constructed */ + Tcl_Command infoCmd; +} ItclObjectInfo; + +typedef struct EnsembleInfo { + Tcl_HashTable ensembles; /* list of all known ensembles */ + Tcl_HashTable subEnsembles; /* list of all known subensembles */ + Tcl_Size numEnsembles; + Tcl_Namespace *ensembleNsPtr; +} EnsembleInfo; +/* + * Representation for each [incr Tcl] class. + */ +#define ITCL_CLASS 0x1 +#define ITCL_TYPE 0x2 +#define ITCL_WIDGET 0x4 +#define ITCL_WIDGETADAPTOR 0x8 +#define ITCL_ECLASS 0x10 +#define ITCL_NWIDGET 0x20 +#define ITCL_WIDGET_FRAME 0x40 +#define ITCL_WIDGET_LABEL_FRAME 0x80 +#define ITCL_WIDGET_TOPLEVEL 0x100 +#define ITCL_WIDGET_TTK_FRAME 0x200 +#define ITCL_WIDGET_TTK_LABEL_FRAME 0x400 +#define ITCL_WIDGET_TTK_TOPLEVEL 0x800 +#define ITCL_CLASS_IS_DELETED 0x1000 +#define ITCL_CLASS_IS_DESTROYED 0x2000 +#define ITCL_CLASS_NS_IS_DESTROYED 0x4000 +#define ITCL_CLASS_IS_RENAMED 0x8000 /* unused */ +#define ITCL_CLASS_IS_FREED 0x10000 +#define ITCL_CLASS_DERIVED_RELEASED 0x20000 +#define ITCL_CLASS_NS_TEARDOWN 0x40000 +#define ITCL_CLASS_NO_VARNS_DELETE 0x80000 +#define ITCL_CLASS_SHOULD_VARNS_DELETE 0x100000 +#define ITCL_CLASS_DESTRUCTOR_CALLED 0x400000 + + +typedef struct ItclClass { + Tcl_Obj *namePtr; /* class name */ + Tcl_Obj *fullNamePtr; /* fully qualified class name */ + Tcl_Interp *interp; /* interpreter that manages this info */ + Tcl_Namespace *nsPtr; /* namespace representing class scope */ + Tcl_Command accessCmd; /* access command for creating instances */ + Tcl_Command thisCmd; /* needed for deletion of class */ + + struct ItclObjectInfo *infoPtr; + /* info about all known objects + * and other stuff like stacks */ + Itcl_List bases; /* list of base classes */ + Itcl_List derived; /* list of all derived classes */ + Tcl_HashTable heritage; /* table of all base classes. Look up + * by pointer to class definition. This + * provides fast lookup for inheritance + * tests. */ + Tcl_Obj *initCode; /* initialization code for new objs */ + Tcl_HashTable variables; /* definitions for all data members + in this class. Look up simple string + names and get back ItclVariable* ptrs */ + Tcl_HashTable options; /* definitions for all option members + in this class. Look up simple string + names and get back ItclOption* ptrs */ + Tcl_HashTable components; /* definitions for all component members + in this class. Look up simple string + names and get back ItclComponent* ptrs */ + Tcl_HashTable functions; /* definitions for all member functions + in this class. Look up simple string + names and get back ItclMemberFunc* ptrs */ + Tcl_HashTable delegatedOptions; /* definitions for all delegated options + in this class. Look up simple string + names and get back + ItclDelegatedOption * ptrs */ + Tcl_HashTable delegatedFunctions; /* definitions for all delegated methods + or procs in this class. Look up simple + string names and get back + ItclDelegatedFunction * ptrs */ + Tcl_HashTable methodVariables; /* definitions for all methodvariable members + in this class. Look up simple string + names and get back + ItclMethodVariable* ptrs */ + Tcl_Size numInstanceVars; /* number of instance vars in variables + table */ + Tcl_HashTable classCommons; /* used for storing variable namespace + * string for Tcl_Resolve */ + Tcl_HashTable resolveVars; /* all possible names for variables in + * this class (e.g., x, foo::x, etc.) */ + Tcl_HashTable resolveCmds; /* all possible names for functions in + * this class (e.g., x, foo::x, etc.) */ + Tcl_HashTable contextCache; /* cache for function contexts */ + struct ItclMemberFunc *unused2; + /* the class constructor or NULL */ + struct ItclMemberFunc *unused3; + /* the class destructor or NULL */ + struct ItclMemberFunc *unused1; + Tcl_Resolve *resolvePtr; + Tcl_Obj *widgetClassPtr; /* class name for widget if class is a + * ::itcl::widget */ + Tcl_Obj *hullTypePtr; /* hulltype name for widget if class is a + * ::itcl::widget */ + Tcl_Object oPtr; /* TclOO class object */ + Tcl_Class clsPtr; /* TclOO class */ + Tcl_Size numCommons; /* number of commons in this class */ + Tcl_Size numVariables; /* number of variables in this class */ + Tcl_Size numOptions; /* number of options in this class */ + Tcl_Size unique; /* unique number for #auto generation */ + int flags; /* maintains class status */ + Tcl_Size callRefCount; /* prevent deleting of class if refcount>1 */ + Tcl_Obj *typeConstructorPtr; /* initialization for types */ + int destructorHasBeenCalled; /* prevent multiple invocations of destrcutor */ + Tcl_Size refCount; +} ItclClass; + +typedef struct ItclHierIter { + ItclClass *current; /* current position in hierarchy */ + Itcl_Stack stack; /* stack used for traversal */ +} ItclHierIter; + +#define ITCL_OBJECT_IS_DELETED 0x01 +#define ITCL_OBJECT_IS_DESTRUCTED 0x02 +#define ITCL_OBJECT_IS_DESTROYED 0x04 +#define ITCL_OBJECT_IS_RENAMED 0x08 +#define ITCL_OBJECT_CLASS_DESTRUCTED 0x10 +#define ITCL_TCLOO_OBJECT_IS_DELETED 0x20 +#define ITCL_OBJECT_DESTRUCT_ERROR 0x40 +#define ITCL_OBJECT_SHOULD_VARNS_DELETE 0x80 +#define ITCL_OBJECT_ROOT_METHOD 0x8000 + +/* + * Representation for each [incr Tcl] object. + */ +typedef struct ItclObject { + ItclClass *iclsPtr; /* most-specific class */ + Tcl_Command accessCmd; /* object access command */ + + Tcl_HashTable *constructed; /* temp storage used during construction */ + Tcl_HashTable *destructed; /* temp storage used during destruction */ + Tcl_HashTable objectVariables; + /* used for storing Tcl_Var entries for + * variable resolving, key is ivPtr of + * variable, value is varPtr */ + Tcl_HashTable objectOptions; /* definitions for all option members + in this object. Look up option namePtr + names and get back ItclOption* ptrs */ + Tcl_HashTable objectComponents; /* definitions for all component members + in this object. Look up component namePtr + names and get back ItclComponent* ptrs */ + Tcl_HashTable objectMethodVariables; + /* definitions for all methodvariable members + in this object. Look up methodvariable + namePtr names and get back + ItclMethodVariable* ptrs */ + Tcl_HashTable objectDelegatedOptions; + /* definitions for all delegated option + members in this object. Look up option + namePtr names and get back + ItclOption* ptrs */ + Tcl_HashTable objectDelegatedFunctions; + /* definitions for all delegated function + members in this object. Look up function + namePtr names and get back + ItclMemberFunc * ptrs */ + Tcl_HashTable contextCache; /* cache for function contexts */ + Tcl_Obj *namePtr; + Tcl_Obj *origNamePtr; /* the original name before any rename */ + Tcl_Obj *createNamePtr; /* the temp name before any rename + * mostly used for widgetadaptor + * because that hijackes the name + * often when installing the hull */ + Tcl_Interp *interp; + ItclObjectInfo *infoPtr; + Tcl_Obj *varNsNamePtr; + Tcl_Object oPtr; /* the TclOO object */ + Tcl_Resolve *resolvePtr; + int flags; + Tcl_Size callRefCount; /* prevent deleting of object if refcount > 1 */ + Tcl_Obj *hullWindowNamePtr; /* the window path name for the hull + * (before renaming in installhull) */ + int destructorHasBeenCalled; /* is set when the destructor is called + * to avoid callin destructor twice */ + int noComponentTrace; /* don't call component traces if + * setting components in DelegationInstall */ + int hadConstructorError; /* needed for multiple calls of CallItclObjectCmd */ +} ItclObject; + +#define ITCL_IGNORE_ERRS 0x002 /* useful for construction/destruction */ + +typedef struct ItclResolveInfo { + int flags; + ItclClass *iclsPtr; + ItclObject *ioPtr; +} ItclResolveInfo; + +#define ITCL_RESOLVE_CLASS 0x01 +#define ITCL_RESOLVE_OBJECT 0x02 + +/* + * Implementation for any code body in an [incr Tcl] class. + */ +typedef struct ItclMemberCode { + int flags; /* flags describing implementation */ + Tcl_Size argcount; /* number of args in arglist */ + Tcl_Size maxargcount; /* max number of args in arglist */ + Tcl_Obj *usagePtr; /* usage string for error messages */ + Tcl_Obj *argumentPtr; /* the function arguments */ + Tcl_Obj *bodyPtr; /* the function body */ + ItclArgList *argListPtr; /* the parsed arguments */ + union { + Tcl_CmdProc *argCmd; /* (argc,argv) C implementation */ + Tcl_ObjCmdProc *objCmd; /* (objc,objv) C implementation */ + } cfunc; + void *clientData; /* client data for C implementations */ +} ItclMemberCode; + +/* + * Flag bits for ItclMemberCode: + */ +#define ITCL_IMPLEMENT_NONE 0x001 /* no implementation */ +#define ITCL_IMPLEMENT_TCL 0x002 /* Tcl implementation */ +#define ITCL_IMPLEMENT_ARGCMD 0x004 /* (argc,argv) C implementation */ +#define ITCL_IMPLEMENT_OBJCMD 0x008 /* (objc,objv) C implementation */ +#define ITCL_IMPLEMENT_C 0x00c /* either kind of C implementation */ + +#define Itcl_IsMemberCodeImplemented(mcode) \ + (((mcode)->flags & ITCL_IMPLEMENT_NONE) == 0) + +/* + * Flag bits for ItclMember: functions and variables + */ +#define ITCL_COMMON 0x010 /* non-zero => is a "proc" or common + * variable */ + +/* + * Flag bits for ItclMember: functions + */ +#define ITCL_CONSTRUCTOR 0x020 /* non-zero => is a constructor */ +#define ITCL_DESTRUCTOR 0x040 /* non-zero => is a destructor */ +#define ITCL_ARG_SPEC 0x080 /* non-zero => has an argument spec */ +#define ITCL_BODY_SPEC 0x100 /* non-zero => has an body spec */ +#define ITCL_BUILTIN 0x400 /* non-zero => built-in method */ +#define ITCL_COMPONENT 0x800 /* non-zero => component */ +#define ITCL_TYPE_METHOD 0x1000 /* non-zero => typemethod */ +#define ITCL_METHOD 0x2000 /* non-zero => method */ + +/* + * Flag bits for ItclMember: variables + */ +#define ITCL_THIS_VAR 0x20 /* non-zero => built-in "this" variable */ +#define ITCL_OPTIONS_VAR 0x40 /* non-zero => built-in "itcl_options" + * variable */ +#define ITCL_TYPE_VAR 0x80 /* non-zero => built-in "type" variable */ + /* no longer used ??? */ +#define ITCL_SELF_VAR 0x100 /* non-zero => built-in "self" variable */ +#define ITCL_SELFNS_VAR 0x200 /* non-zero => built-in "selfns" + * variable */ +#define ITCL_WIN_VAR 0x400 /* non-zero => built-in "win" variable */ +#define ITCL_COMPONENT_VAR 0x800 /* non-zero => component variable */ +#define ITCL_HULL_VAR 0x1000 /* non-zero => built-in "itcl_hull" + * variable */ +#define ITCL_OPTION_READONLY 0x2000 /* non-zero => readonly */ +#define ITCL_VARIABLE 0x4000 /* non-zero => normal variable */ +#define ITCL_TYPE_VARIABLE 0x8000 /* non-zero => typevariable */ +#define ITCL_OPTION_INITTED 0x10000 /* non-zero => option has been initialized */ +#define ITCL_OPTION_COMP_VAR 0x20000 /* variable to collect option components of extendedclass */ + +/* + * Instance components. + */ +struct ItclVariable; +typedef struct ItclComponent { + Tcl_Obj *namePtr; /* member name */ + struct ItclVariable *ivPtr; /* variable for this component */ + int flags; + int haveKeptOptions; + Tcl_HashTable keptOptions; /* table of options to keep */ +} ItclComponent; + +#define ITCL_COMPONENT_INHERIT 0x01 +#define ITCL_COMPONENT_PUBLIC 0x02 + +typedef struct ItclDelegatedFunction { + Tcl_Obj *namePtr; + ItclComponent *icPtr; + Tcl_Obj *asPtr; + Tcl_Obj *usingPtr; + Tcl_HashTable exceptions; + int flags; +} ItclDelegatedFunction; + +/* + * Representation of member functions in an [incr Tcl] class. + */ +typedef struct ItclMemberFunc { + Tcl_Obj* namePtr; /* member name */ + Tcl_Obj* fullNamePtr; /* member name with "class::" qualifier */ + ItclClass* iclsPtr; /* class containing this member */ + int protection; /* protection level */ + int flags; /* flags describing member (see above) */ + ItclObjectInfo *infoPtr; + ItclMemberCode *codePtr; /* code associated with member */ + Tcl_Command accessCmd; /* Tcl command installed for this function */ + Tcl_Size argcount; /* number of args in arglist */ + Tcl_Size maxargcount; /* max number of args in arglist */ + Tcl_Obj *usagePtr; /* usage string for error messages */ + Tcl_Obj *argumentPtr; /* the function arguments */ + Tcl_Obj *builtinArgumentPtr; /* the function arguments for builtin functions */ + Tcl_Obj *origArgsPtr; /* the argument string of the original definition */ + Tcl_Obj *bodyPtr; /* the function body */ + ItclArgList *argListPtr; /* the parsed arguments */ + ItclClass *declaringClassPtr; /* the class which declared the method/proc */ + void *tmPtr; /* TclOO methodPtr */ + ItclDelegatedFunction *idmPtr; + /* if the function is delegated != NULL */ +} ItclMemberFunc; + +/* + * Instance variables. + */ +typedef struct ItclVariable { + Tcl_Obj *namePtr; /* member name */ + Tcl_Obj *fullNamePtr; /* member name with "class::" qualifier */ + ItclClass *iclsPtr; /* class containing this member */ + ItclObjectInfo *infoPtr; + ItclMemberCode *codePtr; /* code associated with member */ + Tcl_Obj *init; /* initial value */ + Tcl_Obj *arrayInitPtr; /* initial value if variable should be array */ + int protection; /* protection level */ + int flags; /* flags describing member (see below) */ + int initted; /* is set when first time initted, to check + * for example itcl_hull var, which can be only + * initialized once */ +} ItclVariable; + + +struct ItclOption; + +typedef struct ItclDelegatedOption { + Tcl_Obj *namePtr; + Tcl_Obj *resourceNamePtr; + Tcl_Obj *classNamePtr; + struct ItclOption *ioptPtr; /* the option name or null for "*" */ + ItclComponent *icPtr; /* the component where the delegation goes + * to */ + Tcl_Obj *asPtr; + Tcl_HashTable exceptions; /* exceptions from delegation */ +} ItclDelegatedOption; + +/* + * Instance options. + */ +typedef struct ItclOption { + /* within a class hierarchy there must be only + * one option with the same name !! */ + Tcl_Obj *namePtr; /* member name */ + Tcl_Obj *fullNamePtr; /* member name with "class::" qualifier */ + Tcl_Obj *resourceNamePtr; + Tcl_Obj *classNamePtr; + ItclClass *iclsPtr; /* class containing this member */ + int protection; /* protection level */ + int flags; /* flags describing member (see below) */ + ItclMemberCode *codePtr; /* code associated with member */ + Tcl_Obj *defaultValuePtr; /* initial value */ + Tcl_Obj *cgetMethodPtr; + Tcl_Obj *cgetMethodVarPtr; + Tcl_Obj *configureMethodPtr; + Tcl_Obj *configureMethodVarPtr; + Tcl_Obj *validateMethodPtr; + Tcl_Obj *validateMethodVarPtr; + ItclDelegatedOption *idoPtr; + /* if the option is delegated != NULL */ +} ItclOption; + +/* + * Instance methodvariables. + */ +typedef struct ItclMethodVariable { + Tcl_Obj *namePtr; /* member name */ + Tcl_Obj *fullNamePtr; /* member name with "class::" qualifier */ + ItclClass *iclsPtr; /* class containing this member */ + int protection; /* protection level */ + int flags; /* flags describing member (see below) */ + Tcl_Obj *defaultValuePtr; + Tcl_Obj *callbackPtr; +} ItclMethodVariable; + +#define VAR_TYPE_VARIABLE 1 +#define VAR_TYPE_COMMON 2 + +#define CMD_TYPE_METHOD 1 +#define CMD_TYPE_PROC 2 + +typedef struct ItclClassCmdInfo { + int type; + int protection; +#if TCL_MAJOR_VERSION == 8 + int cmdNum; /* not actually used */ +#endif + Tcl_Namespace *nsPtr; + Tcl_Namespace *declaringNsPtr; +} ItclClassCmdInfo; + +/* + * Instance variable lookup entry. + */ +typedef struct ItclVarLookup { + ItclVariable* ivPtr; /* variable definition */ + int usage; /* number of uses for this record */ + int accessible; /* non-zero => accessible from class with + * this lookup record in its resolveVars */ + char *leastQualName; /* simplist name for this variable, with + * the fewest qualifiers. This string is + * taken from the resolveVars table, so + * it shouldn't be freed. */ + Tcl_Size varNum; + Tcl_Var varPtr; +} ItclVarLookup; + +/* + * Instance command lookup entry. + */ +typedef struct ItclCmdLookup { + ItclMemberFunc* imPtr; /* function definition */ +#if TCL_MAJOR_VERSION == 8 + int cmdNum; /* not actually used */ +#endif + ItclClassCmdInfo *classCmdInfoPtr; + Tcl_Command cmdPtr; +} ItclCmdLookup; + +typedef struct ItclCallContext { + int objectFlags; + Tcl_Namespace *nsPtr; + ItclObject *ioPtr; + ItclMemberFunc *imPtr; + Tcl_Size refCount; +} ItclCallContext; + +/* + * The macro below is used to modify a "char" value (e.g. by casting + * it to an unsigned character) so that it can be used safely with + * macros such as isspace. + */ + +#define UCHAR(c) ((unsigned char) (c)) +/* + * Macros used to cast between pointers and integers (e.g. when storing an int + * in ClientData), on 64-bit architectures they avoid gcc warning about "cast + * to/from pointer from/to integer of different size". + */ + +#if !defined(INT2PTR) +# define INT2PTR(p) ((void *)(ptrdiff_t)(p)) +#endif +#if !defined(PTR2INT) +# define PTR2INT(p) ((ptrdiff_t)(p)) +#endif + +#ifdef ITCL_DEBUG +MODULE_SCOPE int _itcl_debug_level; +MODULE_SCOPE void ItclShowArgs(int level, const char *str, size_t objc, + Tcl_Obj *const *objv); +#else +#define ItclShowArgs(a,b,c,d) do {(void)(c);(void)(d);} while(0) +#endif + +MODULE_SCOPE Tcl_ObjCmdProc ItclCallCCommand; +MODULE_SCOPE Tcl_ObjCmdProc ItclObjectUnknownCommand; +MODULE_SCOPE int ItclCheckCallProc(void *clientData, Tcl_Interp *interp, + Tcl_ObjectContext contextPtr, Tcl_CallFrame *framePtr, int *isFinished); + +MODULE_SCOPE void ItclPreserveClass(ItclClass *iclsPtr); +MODULE_SCOPE void ItclReleaseClass(void *iclsPtr); + +MODULE_SCOPE ItclFoundation *ItclGetFoundation(Tcl_Interp *interp); +MODULE_SCOPE Tcl_ObjCmdProc ItclClassCommandDispatcher; +MODULE_SCOPE Tcl_Command Itcl_CmdAliasProc(Tcl_Interp *interp, + Tcl_Namespace *nsPtr, const char *cmdName, void *clientData); +MODULE_SCOPE Tcl_Var Itcl_VarAliasProc(Tcl_Interp *interp, + Tcl_Namespace *nsPtr, const char *VarName, void *clientData); +MODULE_SCOPE int ItclIsClass(Tcl_Interp *interp, Tcl_Command cmd); +MODULE_SCOPE int ItclCheckCallMethod(void *clientData, Tcl_Interp *interp, + Tcl_ObjectContext contextPtr, Tcl_CallFrame *framePtr, int *isFinished); +MODULE_SCOPE int ItclAfterCallMethod(void *clientData, Tcl_Interp *interp, + Tcl_ObjectContext contextPtr, Tcl_Namespace *nsPtr, int result); +MODULE_SCOPE void ItclReportObjectUsage(Tcl_Interp *interp, + ItclObject *contextIoPtr, Tcl_Namespace *callerNsPtr, + Tcl_Namespace *contextNsPtr); +MODULE_SCOPE int ItclMapMethodNameProc(Tcl_Interp *interp, Tcl_Object oPtr, + Tcl_Class *startClsPtr, Tcl_Obj *methodObj); +MODULE_SCOPE int ItclCreateArgList(Tcl_Interp *interp, const char *str, + Tcl_Size *argcPtr, Tcl_Size *maxArgcPtr, Tcl_Obj **usagePtr, + ItclArgList **arglistPtrPtr, ItclMemberFunc *imPtr, + const char *commandName); +MODULE_SCOPE int ItclObjectCmd(void *clientData, Tcl_Interp *interp, + Tcl_Object oPtr, Tcl_Class clsPtr, size_t objc, Tcl_Obj *const *objv); +MODULE_SCOPE int ItclCreateObject (Tcl_Interp *interp, const char* name, + ItclClass *iclsPtr, size_t objc, Tcl_Obj *const objv[]); +MODULE_SCOPE void ItclDeleteObjectVariablesNamespace(Tcl_Interp *interp, + ItclObject *ioPtr); +MODULE_SCOPE void ItclDeleteClassVariablesNamespace(Tcl_Interp *interp, + ItclClass *iclsPtr); +MODULE_SCOPE int ItclInfoInit(Tcl_Interp *interp, ItclObjectInfo *infoPtr); + +MODULE_SCOPE Tcl_HashEntry *ItclResolveVarEntry( + ItclClass* iclsPtr, const char *varName); + +struct Tcl_ResolvedVarInfo; +MODULE_SCOPE int Itcl_ClassCmdResolver(Tcl_Interp *interp, const char* name, + Tcl_Namespace *nsPtr, int flags, Tcl_Command *rPtr); +MODULE_SCOPE int Itcl_ClassVarResolver(Tcl_Interp *interp, const char* name, + Tcl_Namespace *nsPtr, int flags, Tcl_Var *rPtr); +MODULE_SCOPE int Itcl_ClassCompiledVarResolver(Tcl_Interp *interp, + const char* name, Tcl_Size length, Tcl_Namespace *nsPtr, + struct Tcl_ResolvedVarInfo **rPtr); +MODULE_SCOPE int Itcl_ClassCmdResolver2(Tcl_Interp *interp, const char* name, + Tcl_Namespace *nsPtr, int flags, Tcl_Command *rPtr); +MODULE_SCOPE int Itcl_ClassVarResolver2(Tcl_Interp *interp, const char* name, + Tcl_Namespace *nsPtr, int flags, Tcl_Var *rPtr); +MODULE_SCOPE int ItclSetParserResolver(Tcl_Namespace *nsPtr); +MODULE_SCOPE void ItclProcErrorProc(Tcl_Interp *interp, Tcl_Obj *procNameObj); +MODULE_SCOPE int Itcl_CreateOption (Tcl_Interp *interp, ItclClass *iclsPtr, + ItclOption *ioptPtr); +MODULE_SCOPE int ItclCreateMethodVariable(Tcl_Interp *interp, + ItclVariable *ivPtr, Tcl_Obj* defaultPtr, Tcl_Obj* callbackPtr, + ItclMethodVariable** imvPtrPtr); +MODULE_SCOPE int DelegationInstall(Tcl_Interp *interp, ItclObject *ioPtr, + ItclClass *iclsPtr); +MODULE_SCOPE ItclClass *ItclNamespace2Class(Tcl_Namespace *nsPtr); +MODULE_SCOPE const char* ItclGetCommonInstanceVar(Tcl_Interp *interp, + const char *name, const char *name2, ItclObject *contextIoPtr, + ItclClass *contextIclsPtr); +MODULE_SCOPE int ItclCreateMethod(Tcl_Interp* interp, ItclClass *iclsPtr, + Tcl_Obj *namePtr, const char* arglist, const char* body, + ItclMemberFunc **imPtrPtr); +MODULE_SCOPE int Itcl_WidgetParseInit(Tcl_Interp *interp, + ItclObjectInfo *infoPtr); +MODULE_SCOPE void ItclDeleteObjectMetadata(void *clientData); +MODULE_SCOPE void ItclDeleteClassMetadata(void *clientData); +MODULE_SCOPE void ItclDeleteArgList(ItclArgList *arglistPtr); +MODULE_SCOPE int Itcl_ClassOptionCmd(void *clientData, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]); +MODULE_SCOPE int DelegatedOptionsInstall(Tcl_Interp *interp, + ItclClass *iclsPtr); +MODULE_SCOPE int Itcl_HandleDelegateOptionCmd(Tcl_Interp *interp, + ItclObject *ioPtr, ItclClass *iclsPtr, ItclDelegatedOption **idoPtrPtr, + Tcl_Size objc, Tcl_Obj *const objv[]); +MODULE_SCOPE int Itcl_HandleDelegateMethodCmd(Tcl_Interp *interp, + ItclObject *ioPtr, ItclClass *iclsPtr, + ItclDelegatedFunction **idmPtrPtr, Tcl_Size objc, Tcl_Obj *const objv[]); +MODULE_SCOPE int DelegateFunction(Tcl_Interp *interp, ItclObject *ioPtr, + ItclClass *iclsPtr, Tcl_Obj *componentNamePtr, + ItclDelegatedFunction *idmPtr); +MODULE_SCOPE int ItclInitObjectMethodVariables(Tcl_Interp *interp, + ItclObject *ioPtr, ItclClass *iclsPtr, const char *name); +MODULE_SCOPE int InitTclOOFunctionPointers(Tcl_Interp *interp); +MODULE_SCOPE ItclOption* ItclNewOption(Tcl_Interp *interp, ItclObject *ioPtr, + ItclClass *iclsPtr, Tcl_Obj *namePtr, const char *resourceName, + const char *className, char *init, ItclMemberCode *mCodePtr); +MODULE_SCOPE int ItclParseOption(ItclObjectInfo *infoPtr, Tcl_Interp *interp, + size_t objc, Tcl_Obj *const objv[], ItclClass *iclsPtr, + ItclObject *ioPtr, ItclOption **ioptPtrPtr); +MODULE_SCOPE void ItclDestroyClassNamesp(void *cdata); +MODULE_SCOPE int ExpandDelegateAs(Tcl_Interp *interp, ItclObject *ioPtr, + ItclClass *iclsPtr, ItclDelegatedFunction *idmPtr, + const char *funcName, Tcl_Obj *listPtr); +MODULE_SCOPE int ItclCheckForInitializedComponents(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclObject *ioPtr); +MODULE_SCOPE int ItclCreateDelegatedFunction(Tcl_Interp *interp, + ItclClass *iclsPtr, Tcl_Obj *methodNamePtr, ItclComponent *icPtr, + Tcl_Obj *targetPtr, Tcl_Obj *usingPtr, Tcl_Obj *exceptionsPtr, + ItclDelegatedFunction **idmPtrPtr); +MODULE_SCOPE void ItclDeleteDelegatedOption(char *cdata); +MODULE_SCOPE void Itcl_FinishList(); +MODULE_SCOPE void ItclDeleteDelegatedFunction(ItclDelegatedFunction *idmPtr); +MODULE_SCOPE void ItclFinishEnsemble(ItclObjectInfo *infoPtr); +MODULE_SCOPE int Itcl_EnsembleDeleteCmd(void *clientData, + Tcl_Interp *interp, int objc, Tcl_Obj *const objv[]); +MODULE_SCOPE int ItclAddClassesDictInfo(Tcl_Interp *interp, ItclClass *iclsPtr); +MODULE_SCOPE int ItclDeleteClassesDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr); +MODULE_SCOPE int ItclAddObjectsDictInfo(Tcl_Interp *interp, ItclObject *ioPtr); +MODULE_SCOPE int ItclDeleteObjectsDictInfo(Tcl_Interp *interp, + ItclObject *ioPtr); +MODULE_SCOPE int ItclAddOptionDictInfo(Tcl_Interp *interp, ItclClass *iclsPtr, + ItclOption *ioptPtr); +MODULE_SCOPE int ItclAddDelegatedOptionDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclDelegatedOption *idoPtr); +MODULE_SCOPE int ItclAddClassComponentDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclComponent *icPtr); +MODULE_SCOPE int ItclAddClassVariableDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclVariable *ivPtr); +MODULE_SCOPE int ItclAddClassFunctionDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclMemberFunc *imPtr); +MODULE_SCOPE int ItclAddClassDelegatedFunctionDictInfo(Tcl_Interp *interp, + ItclClass *iclsPtr, ItclDelegatedFunction *idmPtr); +MODULE_SCOPE int ItclClassCreateObject(void *clientData, Tcl_Interp *interp, + Tcl_Size objc, Tcl_Obj *const objv[]); + +MODULE_SCOPE void ItclRestoreInfoVars(void *clientData); + +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiMyProcCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiInstallComponentCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiCallInstanceCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiGetInstanceVarCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiMyTypeMethodCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiMyMethodCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiMyTypeVarCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiMyVarCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_BiItclHullCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_ThisCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_ExtendedClassCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_TypeClassCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_AddObjectOptionCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_AddDelegatedOptionCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_AddDelegatedFunctionCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_SetComponentCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_ClassHullTypeCmd; +MODULE_SCOPE Tcl_ObjCmdProc Itcl_ClassWidgetClassCmd; + +typedef int (ItclRootMethodProc)(ItclObject *ioPtr, Tcl_Interp *interp, + int objc, Tcl_Obj *const objv[]); + +MODULE_SCOPE const Tcl_MethodType itclRootMethodType; +MODULE_SCOPE ItclRootMethodProc ItclUnknownGuts; +MODULE_SCOPE ItclRootMethodProc ItclConstructGuts; +MODULE_SCOPE ItclRootMethodProc ItclInfoGuts; + +#include "itcl2TclOO.h" + +/* + * Include all the private API, generated from itcl.decls. + */ + +#include "itclIntDecls.h" diff --git a/llava_next/include/sqlite3.h b/llava_next/include/sqlite3.h new file mode 100644 index 0000000000000000000000000000000000000000..2618b37a7b89df1e8b214419b82f348c49b2acff --- /dev/null +++ b/llava_next/include/sqlite3.h @@ -0,0 +1,13374 @@ +/* +** 2001-09-15 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This header file defines the interface that the SQLite library +** presents to client programs. If a C-function, structure, datatype, +** or constant definition does not appear in this file, then it is +** not a published API of SQLite, is subject to change without +** notice, and should not be referenced by programs that use SQLite. +** +** Some of the definitions that are in this file are marked as +** "experimental". Experimental interfaces are normally new +** features recently added to SQLite. We do not anticipate changes +** to experimental interfaces but reserve the right to make minor changes +** if experience from use "in the wild" suggest such changes are prudent. +** +** The official C-language API documentation for SQLite is derived +** from comments in this file. This file is the authoritative source +** on how SQLite interfaces are supposed to operate. +** +** The name of this file under configuration management is "sqlite.h.in". +** The makefile makes some minor changes to this file (such as inserting +** the version number) and changes its name to "sqlite3.h" as +** part of the build process. +*/ +#ifndef SQLITE3_H +#define SQLITE3_H +#include /* Needed for the definition of va_list */ + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +extern "C" { +#endif + + +/* +** Facilitate override of interface linkage and calling conventions. +** Be aware that these macros may not be used within this particular +** translation of the amalgamation and its associated header file. +** +** The SQLITE_EXTERN and SQLITE_API macros are used to instruct the +** compiler that the target identifier should have external linkage. +** +** The SQLITE_CDECL macro is used to set the calling convention for +** public functions that accept a variable number of arguments. +** +** The SQLITE_APICALL macro is used to set the calling convention for +** public functions that accept a fixed number of arguments. +** +** The SQLITE_STDCALL macro is no longer used and is now deprecated. +** +** The SQLITE_CALLBACK macro is used to set the calling convention for +** function pointers. +** +** The SQLITE_SYSAPI macro is used to set the calling convention for +** functions provided by the operating system. +** +** Currently, the SQLITE_CDECL, SQLITE_APICALL, SQLITE_CALLBACK, and +** SQLITE_SYSAPI macros are used only when building for environments +** that require non-default calling conventions. +*/ +#ifndef SQLITE_EXTERN +# define SQLITE_EXTERN extern +#endif +#ifndef SQLITE_API +# define SQLITE_API +#endif +#ifndef SQLITE_CDECL +# define SQLITE_CDECL +#endif +#ifndef SQLITE_APICALL +# define SQLITE_APICALL +#endif +#ifndef SQLITE_STDCALL +# define SQLITE_STDCALL SQLITE_APICALL +#endif +#ifndef SQLITE_CALLBACK +# define SQLITE_CALLBACK +#endif +#ifndef SQLITE_SYSAPI +# define SQLITE_SYSAPI +#endif + +/* +** These no-op macros are used in front of interfaces to mark those +** interfaces as either deprecated or experimental. New applications +** should not use deprecated interfaces - they are supported for backwards +** compatibility only. Application writers should be aware that +** experimental interfaces are subject to change in point releases. +** +** These macros used to resolve to various kinds of compiler magic that +** would generate warning messages when they were used. But that +** compiler magic ended up generating such a flurry of bug reports +** that we have taken it all out and gone back to using simple +** noop macros. +*/ +#define SQLITE_DEPRECATED +#define SQLITE_EXPERIMENTAL + +/* +** Ensure these symbols were not defined by some previous header file. +*/ +#ifdef SQLITE_VERSION +# undef SQLITE_VERSION +#endif +#ifdef SQLITE_VERSION_NUMBER +# undef SQLITE_VERSION_NUMBER +#endif + +/* +** CAPI3REF: Compile-Time Library Version Numbers +** +** ^(The [SQLITE_VERSION] C preprocessor macro in the sqlite3.h header +** evaluates to a string literal that is the SQLite version in the +** format "X.Y.Z" where X is the major version number (always 3 for +** SQLite3) and Y is the minor version number and Z is the release number.)^ +** ^(The [SQLITE_VERSION_NUMBER] C preprocessor macro resolves to an integer +** with the value (X*1000000 + Y*1000 + Z) where X, Y, and Z are the same +** numbers used in [SQLITE_VERSION].)^ +** The SQLITE_VERSION_NUMBER for any given release of SQLite will also +** be larger than the release from which it is derived. Either Y will +** be held constant and Z will be incremented or else Y will be incremented +** and Z will be reset to zero. +** +** Since [version 3.6.18] ([dateof:3.6.18]), +** SQLite source code has been stored in the +** Fossil configuration management +** system. ^The SQLITE_SOURCE_ID macro evaluates to +** a string which identifies a particular check-in of SQLite +** within its configuration management system. ^The SQLITE_SOURCE_ID +** string contains the date and time of the check-in (UTC) and a SHA1 +** or SHA3-256 hash of the entire source tree. If the source code has +** been edited in any way since it was last checked in, then the last +** four hexadecimal digits of the hash may be modified. +** +** See also: [sqlite3_libversion()], +** [sqlite3_libversion_number()], [sqlite3_sourceid()], +** [sqlite_version()] and [sqlite_source_id()]. +*/ +#define SQLITE_VERSION "3.45.3" +#define SQLITE_VERSION_NUMBER 3045003 +#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355" + +/* +** CAPI3REF: Run-Time Library Version Numbers +** KEYWORDS: sqlite3_version sqlite3_sourceid +** +** These interfaces provide the same information as the [SQLITE_VERSION], +** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros +** but are associated with the library instead of the header file. ^(Cautious +** programmers might include assert() statements in their application to +** verify that values returned by these interfaces match the macros in +** the header, and thus ensure that the application is +** compiled with matching library and header files. +** +**
+** assert( sqlite3_libversion_number()==SQLITE_VERSION_NUMBER );
+** assert( strncmp(sqlite3_sourceid(),SQLITE_SOURCE_ID,80)==0 );
+** assert( strcmp(sqlite3_libversion(),SQLITE_VERSION)==0 );
+** 
)^ +** +** ^The sqlite3_version[] string constant contains the text of [SQLITE_VERSION] +** macro. ^The sqlite3_libversion() function returns a pointer to the +** to the sqlite3_version[] string constant. The sqlite3_libversion() +** function is provided for use in DLLs since DLL users usually do not have +** direct access to string constants within the DLL. ^The +** sqlite3_libversion_number() function returns an integer equal to +** [SQLITE_VERSION_NUMBER]. ^(The sqlite3_sourceid() function returns +** a pointer to a string constant whose value is the same as the +** [SQLITE_SOURCE_ID] C preprocessor macro. Except if SQLite is built +** using an edited copy of [the amalgamation], then the last four characters +** of the hash might be different from [SQLITE_SOURCE_ID].)^ +** +** See also: [sqlite_version()] and [sqlite_source_id()]. +*/ +SQLITE_API SQLITE_EXTERN const char sqlite3_version[]; +SQLITE_API const char *sqlite3_libversion(void); +SQLITE_API const char *sqlite3_sourceid(void); +SQLITE_API int sqlite3_libversion_number(void); + +/* +** CAPI3REF: Run-Time Library Compilation Options Diagnostics +** +** ^The sqlite3_compileoption_used() function returns 0 or 1 +** indicating whether the specified option was defined at +** compile time. ^The SQLITE_ prefix may be omitted from the +** option name passed to sqlite3_compileoption_used(). +** +** ^The sqlite3_compileoption_get() function allows iterating +** over the list of options that were defined at compile time by +** returning the N-th compile time option string. ^If N is out of range, +** sqlite3_compileoption_get() returns a NULL pointer. ^The SQLITE_ +** prefix is omitted from any strings returned by +** sqlite3_compileoption_get(). +** +** ^Support for the diagnostic functions sqlite3_compileoption_used() +** and sqlite3_compileoption_get() may be omitted by specifying the +** [SQLITE_OMIT_COMPILEOPTION_DIAGS] option at compile time. +** +** See also: SQL functions [sqlite_compileoption_used()] and +** [sqlite_compileoption_get()] and the [compile_options pragma]. +*/ +#ifndef SQLITE_OMIT_COMPILEOPTION_DIAGS +SQLITE_API int sqlite3_compileoption_used(const char *zOptName); +SQLITE_API const char *sqlite3_compileoption_get(int N); +#else +# define sqlite3_compileoption_used(X) 0 +# define sqlite3_compileoption_get(X) ((void*)0) +#endif + +/* +** CAPI3REF: Test To See If The Library Is Threadsafe +** +** ^The sqlite3_threadsafe() function returns zero if and only if +** SQLite was compiled with mutexing code omitted due to the +** [SQLITE_THREADSAFE] compile-time option being set to 0. +** +** SQLite can be compiled with or without mutexes. When +** the [SQLITE_THREADSAFE] C preprocessor macro is 1 or 2, mutexes +** are enabled and SQLite is threadsafe. When the +** [SQLITE_THREADSAFE] macro is 0, +** the mutexes are omitted. Without the mutexes, it is not safe +** to use SQLite concurrently from more than one thread. +** +** Enabling mutexes incurs a measurable performance penalty. +** So if speed is of utmost importance, it makes sense to disable +** the mutexes. But for maximum safety, mutexes should be enabled. +** ^The default behavior is for mutexes to be enabled. +** +** This interface can be used by an application to make sure that the +** version of SQLite that it is linking against was compiled with +** the desired setting of the [SQLITE_THREADSAFE] macro. +** +** This interface only reports on the compile-time mutex setting +** of the [SQLITE_THREADSAFE] flag. If SQLite is compiled with +** SQLITE_THREADSAFE=1 or =2 then mutexes are enabled by default but +** can be fully or partially disabled using a call to [sqlite3_config()] +** with the verbs [SQLITE_CONFIG_SINGLETHREAD], [SQLITE_CONFIG_MULTITHREAD], +** or [SQLITE_CONFIG_SERIALIZED]. ^(The return value of the +** sqlite3_threadsafe() function shows only the compile-time setting of +** thread safety, not any run-time changes to that setting made by +** sqlite3_config(). In other words, the return value from sqlite3_threadsafe() +** is unchanged by calls to sqlite3_config().)^ +** +** See the [threading mode] documentation for additional information. +*/ +SQLITE_API int sqlite3_threadsafe(void); + +/* +** CAPI3REF: Database Connection Handle +** KEYWORDS: {database connection} {database connections} +** +** Each open SQLite database is represented by a pointer to an instance of +** the opaque structure named "sqlite3". It is useful to think of an sqlite3 +** pointer as an object. The [sqlite3_open()], [sqlite3_open16()], and +** [sqlite3_open_v2()] interfaces are its constructors, and [sqlite3_close()] +** and [sqlite3_close_v2()] are its destructors. There are many other +** interfaces (such as +** [sqlite3_prepare_v2()], [sqlite3_create_function()], and +** [sqlite3_busy_timeout()] to name but three) that are methods on an +** sqlite3 object. +*/ +typedef struct sqlite3 sqlite3; + +/* +** CAPI3REF: 64-Bit Integer Types +** KEYWORDS: sqlite_int64 sqlite_uint64 +** +** Because there is no cross-platform way to specify 64-bit integer types +** SQLite includes typedefs for 64-bit signed and unsigned integers. +** +** The sqlite3_int64 and sqlite3_uint64 are the preferred type definitions. +** The sqlite_int64 and sqlite_uint64 types are supported for backwards +** compatibility only. +** +** ^The sqlite3_int64 and sqlite_int64 types can store integer values +** between -9223372036854775808 and +9223372036854775807 inclusive. ^The +** sqlite3_uint64 and sqlite_uint64 types can store integer values +** between 0 and +18446744073709551615 inclusive. +*/ +#ifdef SQLITE_INT64_TYPE + typedef SQLITE_INT64_TYPE sqlite_int64; +# ifdef SQLITE_UINT64_TYPE + typedef SQLITE_UINT64_TYPE sqlite_uint64; +# else + typedef unsigned SQLITE_INT64_TYPE sqlite_uint64; +# endif +#elif defined(_MSC_VER) || defined(__BORLANDC__) + typedef __int64 sqlite_int64; + typedef unsigned __int64 sqlite_uint64; +#else + typedef long long int sqlite_int64; + typedef unsigned long long int sqlite_uint64; +#endif +typedef sqlite_int64 sqlite3_int64; +typedef sqlite_uint64 sqlite3_uint64; + +/* +** If compiling for a processor that lacks floating point support, +** substitute integer for floating-point. +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# define double sqlite3_int64 +#endif + +/* +** CAPI3REF: Closing A Database Connection +** DESTRUCTOR: sqlite3 +** +** ^The sqlite3_close() and sqlite3_close_v2() routines are destructors +** for the [sqlite3] object. +** ^Calls to sqlite3_close() and sqlite3_close_v2() return [SQLITE_OK] if +** the [sqlite3] object is successfully destroyed and all associated +** resources are deallocated. +** +** Ideally, applications should [sqlite3_finalize | finalize] all +** [prepared statements], [sqlite3_blob_close | close] all [BLOB handles], and +** [sqlite3_backup_finish | finish] all [sqlite3_backup] objects associated +** with the [sqlite3] object prior to attempting to close the object. +** ^If the database connection is associated with unfinalized prepared +** statements, BLOB handlers, and/or unfinished sqlite3_backup objects then +** sqlite3_close() will leave the database connection open and return +** [SQLITE_BUSY]. ^If sqlite3_close_v2() is called with unfinalized prepared +** statements, unclosed BLOB handlers, and/or unfinished sqlite3_backups, +** it returns [SQLITE_OK] regardless, but instead of deallocating the database +** connection immediately, it marks the database connection as an unusable +** "zombie" and makes arrangements to automatically deallocate the database +** connection after all prepared statements are finalized, all BLOB handles +** are closed, and all backups have finished. The sqlite3_close_v2() interface +** is intended for use with host languages that are garbage collected, and +** where the order in which destructors are called is arbitrary. +** +** ^If an [sqlite3] object is destroyed while a transaction is open, +** the transaction is automatically rolled back. +** +** The C parameter to [sqlite3_close(C)] and [sqlite3_close_v2(C)] +** must be either a NULL +** pointer or an [sqlite3] object pointer obtained +** from [sqlite3_open()], [sqlite3_open16()], or +** [sqlite3_open_v2()], and not previously closed. +** ^Calling sqlite3_close() or sqlite3_close_v2() with a NULL pointer +** argument is a harmless no-op. +*/ +SQLITE_API int sqlite3_close(sqlite3*); +SQLITE_API int sqlite3_close_v2(sqlite3*); + +/* +** The type for a callback function. +** This is legacy and deprecated. It is included for historical +** compatibility and is not documented. +*/ +typedef int (*sqlite3_callback)(void*,int,char**, char**); + +/* +** CAPI3REF: One-Step Query Execution Interface +** METHOD: sqlite3 +** +** The sqlite3_exec() interface is a convenience wrapper around +** [sqlite3_prepare_v2()], [sqlite3_step()], and [sqlite3_finalize()], +** that allows an application to run multiple statements of SQL +** without having to use a lot of C code. +** +** ^The sqlite3_exec() interface runs zero or more UTF-8 encoded, +** semicolon-separate SQL statements passed into its 2nd argument, +** in the context of the [database connection] passed in as its 1st +** argument. ^If the callback function of the 3rd argument to +** sqlite3_exec() is not NULL, then it is invoked for each result row +** coming out of the evaluated SQL statements. ^The 4th argument to +** sqlite3_exec() is relayed through to the 1st argument of each +** callback invocation. ^If the callback pointer to sqlite3_exec() +** is NULL, then no callback is ever invoked and result rows are +** ignored. +** +** ^If an error occurs while evaluating the SQL statements passed into +** sqlite3_exec(), then execution of the current statement stops and +** subsequent statements are skipped. ^If the 5th parameter to sqlite3_exec() +** is not NULL then any error message is written into memory obtained +** from [sqlite3_malloc()] and passed back through the 5th parameter. +** To avoid memory leaks, the application should invoke [sqlite3_free()] +** on error message strings returned through the 5th parameter of +** sqlite3_exec() after the error message string is no longer needed. +** ^If the 5th parameter to sqlite3_exec() is not NULL and no errors +** occur, then sqlite3_exec() sets the pointer in its 5th parameter to +** NULL before returning. +** +** ^If an sqlite3_exec() callback returns non-zero, the sqlite3_exec() +** routine returns SQLITE_ABORT without invoking the callback again and +** without running any subsequent SQL statements. +** +** ^The 2nd argument to the sqlite3_exec() callback function is the +** number of columns in the result. ^The 3rd argument to the sqlite3_exec() +** callback is an array of pointers to strings obtained as if from +** [sqlite3_column_text()], one for each column. ^If an element of a +** result row is NULL then the corresponding string pointer for the +** sqlite3_exec() callback is a NULL pointer. ^The 4th argument to the +** sqlite3_exec() callback is an array of pointers to strings where each +** entry represents the name of corresponding result column as obtained +** from [sqlite3_column_name()]. +** +** ^If the 2nd parameter to sqlite3_exec() is a NULL pointer, a pointer +** to an empty string, or a pointer that contains only whitespace and/or +** SQL comments, then no SQL statements are evaluated and the database +** is not changed. +** +** Restrictions: +** +**
    +**
  • The application must ensure that the 1st parameter to sqlite3_exec() +** is a valid and open [database connection]. +**
  • The application must not close the [database connection] specified by +** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not modify the SQL statement text passed into +** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not dereference the arrays or string pointers +** passed as the 3rd and 4th callback parameters after it returns. +**
+*/ +SQLITE_API int sqlite3_exec( + sqlite3*, /* An open database */ + const char *sql, /* SQL to be evaluated */ + int (*callback)(void*,int,char**,char**), /* Callback function */ + void *, /* 1st argument to callback */ + char **errmsg /* Error msg written here */ +); + +/* +** CAPI3REF: Result Codes +** KEYWORDS: {result code definitions} +** +** Many SQLite functions return an integer result code from the set shown +** here in order to indicate success or failure. +** +** New error codes may be added in future versions of SQLite. +** +** See also: [extended result code definitions] +*/ +#define SQLITE_OK 0 /* Successful result */ +/* beginning-of-error-codes */ +#define SQLITE_ERROR 1 /* Generic error */ +#define SQLITE_INTERNAL 2 /* Internal logic error in SQLite */ +#define SQLITE_PERM 3 /* Access permission denied */ +#define SQLITE_ABORT 4 /* Callback routine requested an abort */ +#define SQLITE_BUSY 5 /* The database file is locked */ +#define SQLITE_LOCKED 6 /* A table in the database is locked */ +#define SQLITE_NOMEM 7 /* A malloc() failed */ +#define SQLITE_READONLY 8 /* Attempt to write a readonly database */ +#define SQLITE_INTERRUPT 9 /* Operation terminated by sqlite3_interrupt()*/ +#define SQLITE_IOERR 10 /* Some kind of disk I/O error occurred */ +#define SQLITE_CORRUPT 11 /* The database disk image is malformed */ +#define SQLITE_NOTFOUND 12 /* Unknown opcode in sqlite3_file_control() */ +#define SQLITE_FULL 13 /* Insertion failed because database is full */ +#define SQLITE_CANTOPEN 14 /* Unable to open the database file */ +#define SQLITE_PROTOCOL 15 /* Database lock protocol error */ +#define SQLITE_EMPTY 16 /* Internal use only */ +#define SQLITE_SCHEMA 17 /* The database schema changed */ +#define SQLITE_TOOBIG 18 /* String or BLOB exceeds size limit */ +#define SQLITE_CONSTRAINT 19 /* Abort due to constraint violation */ +#define SQLITE_MISMATCH 20 /* Data type mismatch */ +#define SQLITE_MISUSE 21 /* Library used incorrectly */ +#define SQLITE_NOLFS 22 /* Uses OS features not supported on host */ +#define SQLITE_AUTH 23 /* Authorization denied */ +#define SQLITE_FORMAT 24 /* Not used */ +#define SQLITE_RANGE 25 /* 2nd parameter to sqlite3_bind out of range */ +#define SQLITE_NOTADB 26 /* File opened that is not a database file */ +#define SQLITE_NOTICE 27 /* Notifications from sqlite3_log() */ +#define SQLITE_WARNING 28 /* Warnings from sqlite3_log() */ +#define SQLITE_ROW 100 /* sqlite3_step() has another row ready */ +#define SQLITE_DONE 101 /* sqlite3_step() has finished executing */ +/* end-of-error-codes */ + +/* +** CAPI3REF: Extended Result Codes +** KEYWORDS: {extended result code definitions} +** +** In its default configuration, SQLite API routines return one of 30 integer +** [result codes]. However, experience has shown that many of +** these result codes are too coarse-grained. They do not provide as +** much information about problems as programmers might like. In an effort to +** address this, newer versions of SQLite (version 3.3.8 [dateof:3.3.8] +** and later) include +** support for additional result codes that provide more detailed information +** about errors. These [extended result codes] are enabled or disabled +** on a per database connection basis using the +** [sqlite3_extended_result_codes()] API. Or, the extended code for +** the most recent error can be obtained using +** [sqlite3_extended_errcode()]. +*/ +#define SQLITE_ERROR_MISSING_COLLSEQ (SQLITE_ERROR | (1<<8)) +#define SQLITE_ERROR_RETRY (SQLITE_ERROR | (2<<8)) +#define SQLITE_ERROR_SNAPSHOT (SQLITE_ERROR | (3<<8)) +#define SQLITE_IOERR_READ (SQLITE_IOERR | (1<<8)) +#define SQLITE_IOERR_SHORT_READ (SQLITE_IOERR | (2<<8)) +#define SQLITE_IOERR_WRITE (SQLITE_IOERR | (3<<8)) +#define SQLITE_IOERR_FSYNC (SQLITE_IOERR | (4<<8)) +#define SQLITE_IOERR_DIR_FSYNC (SQLITE_IOERR | (5<<8)) +#define SQLITE_IOERR_TRUNCATE (SQLITE_IOERR | (6<<8)) +#define SQLITE_IOERR_FSTAT (SQLITE_IOERR | (7<<8)) +#define SQLITE_IOERR_UNLOCK (SQLITE_IOERR | (8<<8)) +#define SQLITE_IOERR_RDLOCK (SQLITE_IOERR | (9<<8)) +#define SQLITE_IOERR_DELETE (SQLITE_IOERR | (10<<8)) +#define SQLITE_IOERR_BLOCKED (SQLITE_IOERR | (11<<8)) +#define SQLITE_IOERR_NOMEM (SQLITE_IOERR | (12<<8)) +#define SQLITE_IOERR_ACCESS (SQLITE_IOERR | (13<<8)) +#define SQLITE_IOERR_CHECKRESERVEDLOCK (SQLITE_IOERR | (14<<8)) +#define SQLITE_IOERR_LOCK (SQLITE_IOERR | (15<<8)) +#define SQLITE_IOERR_CLOSE (SQLITE_IOERR | (16<<8)) +#define SQLITE_IOERR_DIR_CLOSE (SQLITE_IOERR | (17<<8)) +#define SQLITE_IOERR_SHMOPEN (SQLITE_IOERR | (18<<8)) +#define SQLITE_IOERR_SHMSIZE (SQLITE_IOERR | (19<<8)) +#define SQLITE_IOERR_SHMLOCK (SQLITE_IOERR | (20<<8)) +#define SQLITE_IOERR_SHMMAP (SQLITE_IOERR | (21<<8)) +#define SQLITE_IOERR_SEEK (SQLITE_IOERR | (22<<8)) +#define SQLITE_IOERR_DELETE_NOENT (SQLITE_IOERR | (23<<8)) +#define SQLITE_IOERR_MMAP (SQLITE_IOERR | (24<<8)) +#define SQLITE_IOERR_GETTEMPPATH (SQLITE_IOERR | (25<<8)) +#define SQLITE_IOERR_CONVPATH (SQLITE_IOERR | (26<<8)) +#define SQLITE_IOERR_VNODE (SQLITE_IOERR | (27<<8)) +#define SQLITE_IOERR_AUTH (SQLITE_IOERR | (28<<8)) +#define SQLITE_IOERR_BEGIN_ATOMIC (SQLITE_IOERR | (29<<8)) +#define SQLITE_IOERR_COMMIT_ATOMIC (SQLITE_IOERR | (30<<8)) +#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) +#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) +#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) +#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) +#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) +#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) +#define SQLITE_BUSY_SNAPSHOT (SQLITE_BUSY | (2<<8)) +#define SQLITE_BUSY_TIMEOUT (SQLITE_BUSY | (3<<8)) +#define SQLITE_CANTOPEN_NOTEMPDIR (SQLITE_CANTOPEN | (1<<8)) +#define SQLITE_CANTOPEN_ISDIR (SQLITE_CANTOPEN | (2<<8)) +#define SQLITE_CANTOPEN_FULLPATH (SQLITE_CANTOPEN | (3<<8)) +#define SQLITE_CANTOPEN_CONVPATH (SQLITE_CANTOPEN | (4<<8)) +#define SQLITE_CANTOPEN_DIRTYWAL (SQLITE_CANTOPEN | (5<<8)) /* Not Used */ +#define SQLITE_CANTOPEN_SYMLINK (SQLITE_CANTOPEN | (6<<8)) +#define SQLITE_CORRUPT_VTAB (SQLITE_CORRUPT | (1<<8)) +#define SQLITE_CORRUPT_SEQUENCE (SQLITE_CORRUPT | (2<<8)) +#define SQLITE_CORRUPT_INDEX (SQLITE_CORRUPT | (3<<8)) +#define SQLITE_READONLY_RECOVERY (SQLITE_READONLY | (1<<8)) +#define SQLITE_READONLY_CANTLOCK (SQLITE_READONLY | (2<<8)) +#define SQLITE_READONLY_ROLLBACK (SQLITE_READONLY | (3<<8)) +#define SQLITE_READONLY_DBMOVED (SQLITE_READONLY | (4<<8)) +#define SQLITE_READONLY_CANTINIT (SQLITE_READONLY | (5<<8)) +#define SQLITE_READONLY_DIRECTORY (SQLITE_READONLY | (6<<8)) +#define SQLITE_ABORT_ROLLBACK (SQLITE_ABORT | (2<<8)) +#define SQLITE_CONSTRAINT_CHECK (SQLITE_CONSTRAINT | (1<<8)) +#define SQLITE_CONSTRAINT_COMMITHOOK (SQLITE_CONSTRAINT | (2<<8)) +#define SQLITE_CONSTRAINT_FOREIGNKEY (SQLITE_CONSTRAINT | (3<<8)) +#define SQLITE_CONSTRAINT_FUNCTION (SQLITE_CONSTRAINT | (4<<8)) +#define SQLITE_CONSTRAINT_NOTNULL (SQLITE_CONSTRAINT | (5<<8)) +#define SQLITE_CONSTRAINT_PRIMARYKEY (SQLITE_CONSTRAINT | (6<<8)) +#define SQLITE_CONSTRAINT_TRIGGER (SQLITE_CONSTRAINT | (7<<8)) +#define SQLITE_CONSTRAINT_UNIQUE (SQLITE_CONSTRAINT | (8<<8)) +#define SQLITE_CONSTRAINT_VTAB (SQLITE_CONSTRAINT | (9<<8)) +#define SQLITE_CONSTRAINT_ROWID (SQLITE_CONSTRAINT |(10<<8)) +#define SQLITE_CONSTRAINT_PINNED (SQLITE_CONSTRAINT |(11<<8)) +#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) +#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) +#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) +#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) +#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) +#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) +#define SQLITE_OK_SYMLINK (SQLITE_OK | (2<<8)) /* internal use only */ + +/* +** CAPI3REF: Flags For File Open Operations +** +** These bit values are intended for use in the +** 3rd parameter to the [sqlite3_open_v2()] interface and +** in the 4th parameter to the [sqlite3_vfs.xOpen] method. +** +** Only those flags marked as "Ok for sqlite3_open_v2()" may be +** used as the third argument to the [sqlite3_open_v2()] interface. +** The other flags have historically been ignored by sqlite3_open_v2(), +** though future versions of SQLite might change so that an error is +** raised if any of the disallowed bits are passed into sqlite3_open_v2(). +** Applications should not depend on the historical behavior. +** +** Note in particular that passing the SQLITE_OPEN_EXCLUSIVE flag into +** [sqlite3_open_v2()] does *not* cause the underlying database file +** to be opened using O_EXCL. Passing SQLITE_OPEN_EXCLUSIVE into +** [sqlite3_open_v2()] has historically be a no-op and might become an +** error in future versions of SQLite. +*/ +#define SQLITE_OPEN_READONLY 0x00000001 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_READWRITE 0x00000002 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_CREATE 0x00000004 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_DELETEONCLOSE 0x00000008 /* VFS only */ +#define SQLITE_OPEN_EXCLUSIVE 0x00000010 /* VFS only */ +#define SQLITE_OPEN_AUTOPROXY 0x00000020 /* VFS only */ +#define SQLITE_OPEN_URI 0x00000040 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_MEMORY 0x00000080 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_MAIN_DB 0x00000100 /* VFS only */ +#define SQLITE_OPEN_TEMP_DB 0x00000200 /* VFS only */ +#define SQLITE_OPEN_TRANSIENT_DB 0x00000400 /* VFS only */ +#define SQLITE_OPEN_MAIN_JOURNAL 0x00000800 /* VFS only */ +#define SQLITE_OPEN_TEMP_JOURNAL 0x00001000 /* VFS only */ +#define SQLITE_OPEN_SUBJOURNAL 0x00002000 /* VFS only */ +#define SQLITE_OPEN_SUPER_JOURNAL 0x00004000 /* VFS only */ +#define SQLITE_OPEN_NOMUTEX 0x00008000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_FULLMUTEX 0x00010000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_SHAREDCACHE 0x00020000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_PRIVATECACHE 0x00040000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_WAL 0x00080000 /* VFS only */ +#define SQLITE_OPEN_NOFOLLOW 0x01000000 /* Ok for sqlite3_open_v2() */ +#define SQLITE_OPEN_EXRESCODE 0x02000000 /* Extended result codes */ + +/* Reserved: 0x00F00000 */ +/* Legacy compatibility: */ +#define SQLITE_OPEN_MASTER_JOURNAL 0x00004000 /* VFS only */ + + +/* +** CAPI3REF: Device Characteristics +** +** The xDeviceCharacteristics method of the [sqlite3_io_methods] +** object returns an integer which is a vector of these +** bit values expressing I/O characteristics of the mass storage +** device that holds the file that the [sqlite3_io_methods] +** refers to. +** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). The SQLITE_IOCAP_POWERSAFE_OVERWRITE property means that +** after reboot following a crash or power loss, the only bytes in a +** file that were written at the application level might have changed +** and that adjacent bytes, even bytes within the same sector are +** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN +** flag indicates that a file cannot be deleted when open. The +** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on +** read-only media and cannot be changed even by processes with +** elevated privileges. +** +** The SQLITE_IOCAP_BATCH_ATOMIC property means that the underlying +** filesystem supports doing multiple write operations atomically when those +** write operations are bracketed by [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] and +** [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. +*/ +#define SQLITE_IOCAP_ATOMIC 0x00000001 +#define SQLITE_IOCAP_ATOMIC512 0x00000002 +#define SQLITE_IOCAP_ATOMIC1K 0x00000004 +#define SQLITE_IOCAP_ATOMIC2K 0x00000008 +#define SQLITE_IOCAP_ATOMIC4K 0x00000010 +#define SQLITE_IOCAP_ATOMIC8K 0x00000020 +#define SQLITE_IOCAP_ATOMIC16K 0x00000040 +#define SQLITE_IOCAP_ATOMIC32K 0x00000080 +#define SQLITE_IOCAP_ATOMIC64K 0x00000100 +#define SQLITE_IOCAP_SAFE_APPEND 0x00000200 +#define SQLITE_IOCAP_SEQUENTIAL 0x00000400 +#define SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 0x00000800 +#define SQLITE_IOCAP_POWERSAFE_OVERWRITE 0x00001000 +#define SQLITE_IOCAP_IMMUTABLE 0x00002000 +#define SQLITE_IOCAP_BATCH_ATOMIC 0x00004000 + +/* +** CAPI3REF: File Locking Levels +** +** SQLite uses one of these integer values as the second +** argument to calls it makes to the xLock() and xUnlock() methods +** of an [sqlite3_io_methods] object. These values are ordered from +** lest restrictive to most restrictive. +** +** The argument to xLock() is always SHARED or higher. The argument to +** xUnlock is either SHARED or NONE. +*/ +#define SQLITE_LOCK_NONE 0 /* xUnlock() only */ +#define SQLITE_LOCK_SHARED 1 /* xLock() or xUnlock() */ +#define SQLITE_LOCK_RESERVED 2 /* xLock() only */ +#define SQLITE_LOCK_PENDING 3 /* xLock() only */ +#define SQLITE_LOCK_EXCLUSIVE 4 /* xLock() only */ + +/* +** CAPI3REF: Synchronization Type Flags +** +** When SQLite invokes the xSync() method of an +** [sqlite3_io_methods] object it uses a combination of +** these integer values as the second argument. +** +** When the SQLITE_SYNC_DATAONLY flag is used, it means that the +** sync operation only needs to flush data to mass storage. Inode +** information need not be flushed. If the lower four bits of the flag +** equal SQLITE_SYNC_NORMAL, that means to use normal fsync() semantics. +** If the lower four bits equal SQLITE_SYNC_FULL, that means +** to use Mac OS X style fullsync instead of fsync(). +** +** Do not confuse the SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags +** with the [PRAGMA synchronous]=NORMAL and [PRAGMA synchronous]=FULL +** settings. The [synchronous pragma] determines when calls to the +** xSync VFS method occur and applies uniformly across all platforms. +** The SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL flags determine how +** energetic or rigorous or forceful the sync operations are and +** only make a difference on Mac OSX for the default SQLite code. +** (Third-party VFS implementations might also make the distinction +** between SQLITE_SYNC_NORMAL and SQLITE_SYNC_FULL, but among the +** operating systems natively supported by SQLite, only Mac OSX +** cares about the difference.) +*/ +#define SQLITE_SYNC_NORMAL 0x00002 +#define SQLITE_SYNC_FULL 0x00003 +#define SQLITE_SYNC_DATAONLY 0x00010 + +/* +** CAPI3REF: OS Interface Open File Handle +** +** An [sqlite3_file] object represents an open file in the +** [sqlite3_vfs | OS interface layer]. Individual OS interface +** implementations will +** want to subclass this object by appending additional fields +** for their own use. The pMethods entry is a pointer to an +** [sqlite3_io_methods] object that defines methods for performing +** I/O operations on the open file. +*/ +typedef struct sqlite3_file sqlite3_file; +struct sqlite3_file { + const struct sqlite3_io_methods *pMethods; /* Methods for an open file */ +}; + +/* +** CAPI3REF: OS Interface File Virtual Methods Object +** +** Every file opened by the [sqlite3_vfs.xOpen] method populates an +** [sqlite3_file] object (or, more commonly, a subclass of the +** [sqlite3_file] object) with a pointer to an instance of this object. +** This object defines the methods used to perform various operations +** against the open file represented by the [sqlite3_file] object. +** +** If the [sqlite3_vfs.xOpen] method sets the sqlite3_file.pMethods element +** to a non-NULL pointer, then the sqlite3_io_methods.xClose method +** may be invoked even if the [sqlite3_vfs.xOpen] reported that it failed. The +** only way to prevent a call to xClose following a failed [sqlite3_vfs.xOpen] +** is for the [sqlite3_vfs.xOpen] to set the sqlite3_file.pMethods element +** to NULL. +** +** The flags argument to xSync may be one of [SQLITE_SYNC_NORMAL] or +** [SQLITE_SYNC_FULL]. The first choice is the normal fsync(). +** The second choice is a Mac OS X style fullsync. The [SQLITE_SYNC_DATAONLY] +** flag may be ORed in to indicate that only the data of the file +** and not its inode needs to be synced. +** +** The integer values to xLock() and xUnlock() are one of +**
    +**
  • [SQLITE_LOCK_NONE], +**
  • [SQLITE_LOCK_SHARED], +**
  • [SQLITE_LOCK_RESERVED], +**
  • [SQLITE_LOCK_PENDING], or +**
  • [SQLITE_LOCK_EXCLUSIVE]. +**
+** xLock() upgrades the database file lock. In other words, xLock() moves the +** database file lock in the direction NONE toward EXCLUSIVE. The argument to +** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** SQLITE_LOCK_NONE. If the database file lock is already at or above the +** requested lock, then the call to xLock() is a no-op. +** xUnlock() downgrades the database file lock to either SHARED or NONE. +* If the lock is already at or below the requested lock state, then the call +** to xUnlock() is a no-op. +** The xCheckReservedLock() method checks whether any database connection, +** either in this process or in some other process, is holding a RESERVED, +** PENDING, or EXCLUSIVE lock on the file. It returns true +** if such a lock exists and false otherwise. +** +** The xFileControl() method is a generic interface that allows custom +** VFS implementations to directly control an open file using the +** [sqlite3_file_control()] interface. The second "op" argument is an +** integer opcode. The third argument is a generic pointer intended to +** point to a structure that may contain arguments or space in which to +** write return values. Potential uses for xFileControl() might be +** functions to enable blocking locks with timeouts, to change the +** locking strategy (for example to use dot-file locks), to inquire +** about the status of a lock, or to break stale locks. The SQLite +** core reserves all opcodes less than 100 for its own use. +** A [file control opcodes | list of opcodes] less than 100 is available. +** Applications that define a custom xFileControl method should use opcodes +** greater than 100 to avoid conflicts. VFS implementations should +** return [SQLITE_NOTFOUND] for file control opcodes that they do not +** recognize. +** +** The xSectorSize() method returns the sector size of the +** device that underlies the file. The sector size is the +** minimum write that can be performed without disturbing +** other bytes in the file. The xDeviceCharacteristics() +** method returns a bit vector describing behaviors of the +** underlying device: +** +**
    +**
  • [SQLITE_IOCAP_ATOMIC] +**
  • [SQLITE_IOCAP_ATOMIC512] +**
  • [SQLITE_IOCAP_ATOMIC1K] +**
  • [SQLITE_IOCAP_ATOMIC2K] +**
  • [SQLITE_IOCAP_ATOMIC4K] +**
  • [SQLITE_IOCAP_ATOMIC8K] +**
  • [SQLITE_IOCAP_ATOMIC16K] +**
  • [SQLITE_IOCAP_ATOMIC32K] +**
  • [SQLITE_IOCAP_ATOMIC64K] +**
  • [SQLITE_IOCAP_SAFE_APPEND] +**
  • [SQLITE_IOCAP_SEQUENTIAL] +**
  • [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN] +**
  • [SQLITE_IOCAP_POWERSAFE_OVERWRITE] +**
  • [SQLITE_IOCAP_IMMUTABLE] +**
  • [SQLITE_IOCAP_BATCH_ATOMIC] +**
+** +** The SQLITE_IOCAP_ATOMIC property means that all writes of +** any size are atomic. The SQLITE_IOCAP_ATOMICnnn values +** mean that writes of blocks that are nnn bytes in size and +** are aligned to an address which is an integer multiple of +** nnn are atomic. The SQLITE_IOCAP_SAFE_APPEND value means +** that when data is appended to a file, the data is appended +** first then the size of the file is extended, never the other +** way around. The SQLITE_IOCAP_SEQUENTIAL property means that +** information is written to disk in the same order as calls +** to xWrite(). +** +** If xRead() returns SQLITE_IOERR_SHORT_READ it must also fill +** in the unread portions of the buffer with zeros. A VFS that +** fails to zero-fill short reads might seem to work. However, +** failure to zero-fill short reads will eventually lead to +** database corruption. +*/ +typedef struct sqlite3_io_methods sqlite3_io_methods; +struct sqlite3_io_methods { + int iVersion; + int (*xClose)(sqlite3_file*); + int (*xRead)(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); + int (*xWrite)(sqlite3_file*, const void*, int iAmt, sqlite3_int64 iOfst); + int (*xTruncate)(sqlite3_file*, sqlite3_int64 size); + int (*xSync)(sqlite3_file*, int flags); + int (*xFileSize)(sqlite3_file*, sqlite3_int64 *pSize); + int (*xLock)(sqlite3_file*, int); + int (*xUnlock)(sqlite3_file*, int); + int (*xCheckReservedLock)(sqlite3_file*, int *pResOut); + int (*xFileControl)(sqlite3_file*, int op, void *pArg); + int (*xSectorSize)(sqlite3_file*); + int (*xDeviceCharacteristics)(sqlite3_file*); + /* Methods above are valid for version 1 */ + int (*xShmMap)(sqlite3_file*, int iPg, int pgsz, int, void volatile**); + int (*xShmLock)(sqlite3_file*, int offset, int n, int flags); + void (*xShmBarrier)(sqlite3_file*); + int (*xShmUnmap)(sqlite3_file*, int deleteFlag); + /* Methods above are valid for version 2 */ + int (*xFetch)(sqlite3_file*, sqlite3_int64 iOfst, int iAmt, void **pp); + int (*xUnfetch)(sqlite3_file*, sqlite3_int64 iOfst, void *p); + /* Methods above are valid for version 3 */ + /* Additional methods may be added in future releases */ +}; + +/* +** CAPI3REF: Standard File Control Opcodes +** KEYWORDS: {file control opcodes} {file control opcode} +** +** These integer constants are opcodes for the xFileControl method +** of the [sqlite3_io_methods] object and for the [sqlite3_file_control()] +** interface. +** +**
    +**
  • [[SQLITE_FCNTL_LOCKSTATE]] +** The [SQLITE_FCNTL_LOCKSTATE] opcode is used for debugging. This +** opcode causes the xFileControl method to write the current state of +** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], +** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) +** into an integer that the pArg argument points to. +** This capability is only available if SQLite is compiled with [SQLITE_DEBUG]. +** +**
  • [[SQLITE_FCNTL_SIZE_HINT]] +** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS +** layer a hint of how large the database file will grow to be during the +** current transaction. This hint is not guaranteed to be accurate but it +** is often close. The underlying VFS might choose to preallocate database +** file space based on this hint in order to help writes to the database +** file run faster. +** +**
  • [[SQLITE_FCNTL_SIZE_LIMIT]] +** The [SQLITE_FCNTL_SIZE_LIMIT] opcode is used by in-memory VFS that +** implements [sqlite3_deserialize()] to set an upper bound on the size +** of the in-memory database. The argument is a pointer to a [sqlite3_int64]. +** If the integer pointed to is negative, then it is filled in with the +** current limit. Otherwise the limit is set to the larger of the value +** of the integer pointed to and the current database size. The integer +** pointed to is set to the new limit. +** +**
  • [[SQLITE_FCNTL_CHUNK_SIZE]] +** The [SQLITE_FCNTL_CHUNK_SIZE] opcode is used to request that the VFS +** extends and truncates the database file in chunks of a size specified +** by the user. The fourth argument to [sqlite3_file_control()] should +** point to an integer (type int) containing the new chunk-size to use +** for the nominated database. Allocating database file space in large +** chunks (say 1MB at a time), may reduce file-system fragmentation and +** improve performance on some systems. +** +**
  • [[SQLITE_FCNTL_FILE_POINTER]] +** The [SQLITE_FCNTL_FILE_POINTER] opcode is used to obtain a pointer +** to the [sqlite3_file] object associated with a particular database +** connection. See also [SQLITE_FCNTL_JOURNAL_POINTER]. +** +**
  • [[SQLITE_FCNTL_JOURNAL_POINTER]] +** The [SQLITE_FCNTL_JOURNAL_POINTER] opcode is used to obtain a pointer +** to the [sqlite3_file] object associated with the journal file (either +** the [rollback journal] or the [write-ahead log]) for a particular database +** connection. See also [SQLITE_FCNTL_FILE_POINTER]. +** +**
  • [[SQLITE_FCNTL_SYNC_OMITTED]] +** No longer in use. +** +**
  • [[SQLITE_FCNTL_SYNC]] +** The [SQLITE_FCNTL_SYNC] opcode is generated internally by SQLite and +** sent to the VFS immediately before the xSync method is invoked on a +** database file descriptor. Or, if the xSync method is not invoked +** because the user has configured SQLite with +** [PRAGMA synchronous | PRAGMA synchronous=OFF] it is invoked in place +** of the xSync method. In most cases, the pointer argument passed with +** this file-control is NULL. However, if the database file is being synced +** as part of a multi-database commit, the argument points to a nul-terminated +** string containing the transactions super-journal file name. VFSes that +** do not need this signal should silently ignore this opcode. Applications +** should not call [sqlite3_file_control()] with this opcode as doing so may +** disrupt the operation of the specialized VFSes that do require it. +** +**
  • [[SQLITE_FCNTL_COMMIT_PHASETWO]] +** The [SQLITE_FCNTL_COMMIT_PHASETWO] opcode is generated internally by SQLite +** and sent to the VFS after a transaction has been committed immediately +** but before the database is unlocked. VFSes that do not need this signal +** should silently ignore this opcode. Applications should not call +** [sqlite3_file_control()] with this opcode as doing so may disrupt the +** operation of the specialized VFSes that do require it. +** +**
  • [[SQLITE_FCNTL_WIN32_AV_RETRY]] +** ^The [SQLITE_FCNTL_WIN32_AV_RETRY] opcode is used to configure automatic +** retry counts and intervals for certain disk I/O operations for the +** windows [VFS] in order to provide robustness in the presence of +** anti-virus programs. By default, the windows VFS will retry file read, +** file write, and file delete operations up to 10 times, with a delay +** of 25 milliseconds before the first retry and with the delay increasing +** by an additional 25 milliseconds with each subsequent retry. This +** opcode allows these two values (10 retries and 25 milliseconds of delay) +** to be adjusted. The values are changed for all database connections +** within the same process. The argument is a pointer to an array of two +** integers where the first integer is the new retry count and the second +** integer is the delay. If either integer is negative, then the setting +** is not changed but instead the prior value of that setting is written +** into the array entry, allowing the current retry settings to be +** interrogated. The zDbName parameter is ignored. +** +**
  • [[SQLITE_FCNTL_PERSIST_WAL]] +** ^The [SQLITE_FCNTL_PERSIST_WAL] opcode is used to set or query the +** persistent [WAL | Write Ahead Log] setting. By default, the auxiliary +** write ahead log ([WAL file]) and shared memory +** files used for transaction control +** are automatically deleted when the latest connection to the database +** closes. Setting persistent WAL mode causes those files to persist after +** close. Persisting the files is useful when other processes that do not +** have write permission on the directory containing the database file want +** to read the database file, as the WAL and shared memory files must exist +** in order for the database to be readable. The fourth parameter to +** [sqlite3_file_control()] for this opcode should be a pointer to an integer. +** That integer is 0 to disable persistent WAL mode or 1 to enable persistent +** WAL mode. If the integer is -1, then it is overwritten with the current +** WAL persistence setting. +** +**
  • [[SQLITE_FCNTL_POWERSAFE_OVERWRITE]] +** ^The [SQLITE_FCNTL_POWERSAFE_OVERWRITE] opcode is used to set or query the +** persistent "powersafe-overwrite" or "PSOW" setting. The PSOW setting +** determines the [SQLITE_IOCAP_POWERSAFE_OVERWRITE] bit of the +** xDeviceCharacteristics methods. The fourth parameter to +** [sqlite3_file_control()] for this opcode should be a pointer to an integer. +** That integer is 0 to disable zero-damage mode or 1 to enable zero-damage +** mode. If the integer is -1, then it is overwritten with the current +** zero-damage mode setting. +** +**
  • [[SQLITE_FCNTL_OVERWRITE]] +** ^The [SQLITE_FCNTL_OVERWRITE] opcode is invoked by SQLite after opening +** a write transaction to indicate that, unless it is rolled back for some +** reason, the entire database file will be overwritten by the current +** transaction. This is used by VACUUM operations. +** +**
  • [[SQLITE_FCNTL_VFSNAME]] +** ^The [SQLITE_FCNTL_VFSNAME] opcode can be used to obtain the names of +** all [VFSes] in the VFS stack. The names are of all VFS shims and the +** final bottom-level VFS are written into memory obtained from +** [sqlite3_malloc()] and the result is stored in the char* variable +** that the fourth parameter of [sqlite3_file_control()] points to. +** The caller is responsible for freeing the memory when done. As with +** all file-control actions, there is no guarantee that this will actually +** do anything. Callers should initialize the char* variable to a NULL +** pointer in case this file-control is not implemented. This file-control +** is intended for diagnostic use only. +** +**
  • [[SQLITE_FCNTL_VFS_POINTER]] +** ^The [SQLITE_FCNTL_VFS_POINTER] opcode finds a pointer to the top-level +** [VFSes] currently in use. ^(The argument X in +** sqlite3_file_control(db,SQLITE_FCNTL_VFS_POINTER,X) must be +** of type "[sqlite3_vfs] **". This opcodes will set *X +** to a pointer to the top-level VFS.)^ +** ^When there are multiple VFS shims in the stack, this opcode finds the +** upper-most shim only. +** +**
  • [[SQLITE_FCNTL_PRAGMA]] +** ^Whenever a [PRAGMA] statement is parsed, an [SQLITE_FCNTL_PRAGMA] +** file control is sent to the open [sqlite3_file] object corresponding +** to the database file to which the pragma statement refers. ^The argument +** to the [SQLITE_FCNTL_PRAGMA] file control is an array of +** pointers to strings (char**) in which the second element of the array +** is the name of the pragma and the third element is the argument to the +** pragma or NULL if the pragma has no argument. ^The handler for an +** [SQLITE_FCNTL_PRAGMA] file control can optionally make the first element +** of the char** argument point to a string obtained from [sqlite3_mprintf()] +** or the equivalent and that string will become the result of the pragma or +** the error message if the pragma fails. ^If the +** [SQLITE_FCNTL_PRAGMA] file control returns [SQLITE_NOTFOUND], then normal +** [PRAGMA] processing continues. ^If the [SQLITE_FCNTL_PRAGMA] +** file control returns [SQLITE_OK], then the parser assumes that the +** VFS has handled the PRAGMA itself and the parser generates a no-op +** prepared statement if result string is NULL, or that returns a copy +** of the result string if the string is non-NULL. +** ^If the [SQLITE_FCNTL_PRAGMA] file control returns +** any result code other than [SQLITE_OK] or [SQLITE_NOTFOUND], that means +** that the VFS encountered an error while handling the [PRAGMA] and the +** compilation of the PRAGMA fails with an error. ^The [SQLITE_FCNTL_PRAGMA] +** file control occurs at the beginning of pragma statement analysis and so +** it is able to override built-in [PRAGMA] statements. +** +**
  • [[SQLITE_FCNTL_BUSYHANDLER]] +** ^The [SQLITE_FCNTL_BUSYHANDLER] +** file-control may be invoked by SQLite on the database file handle +** shortly after it is opened in order to provide a custom VFS with access +** to the connection's busy-handler callback. The argument is of type (void**) +** - an array of two (void *) values. The first (void *) actually points +** to a function of type (int (*)(void *)). In order to invoke the connection's +** busy-handler, this function should be invoked with the second (void *) in +** the array as the only argument. If it returns non-zero, then the operation +** should be retried. If it returns zero, the custom VFS should abandon the +** current operation. +** +**
  • [[SQLITE_FCNTL_TEMPFILENAME]] +** ^Applications can invoke the [SQLITE_FCNTL_TEMPFILENAME] file-control +** to have SQLite generate a +** temporary filename using the same algorithm that is followed to generate +** temporary filenames for TEMP tables and other internal uses. The +** argument should be a char** which will be filled with the filename +** written into memory obtained from [sqlite3_malloc()]. The caller should +** invoke [sqlite3_free()] on the result to avoid a memory leak. +** +**
  • [[SQLITE_FCNTL_MMAP_SIZE]] +** The [SQLITE_FCNTL_MMAP_SIZE] file control is used to query or set the +** maximum number of bytes that will be used for memory-mapped I/O. +** The argument is a pointer to a value of type sqlite3_int64 that +** is an advisory maximum number of bytes in the file to memory map. The +** pointer is overwritten with the old value. The limit is not changed if +** the value originally pointed to is negative, and so the current limit +** can be queried by passing in a pointer to a negative number. This +** file-control is used internally to implement [PRAGMA mmap_size]. +** +**
  • [[SQLITE_FCNTL_TRACE]] +** The [SQLITE_FCNTL_TRACE] file control provides advisory information +** to the VFS about what the higher layers of the SQLite stack are doing. +** This file control is used by some VFS activity tracing [shims]. +** The argument is a zero-terminated string. Higher layers in the +** SQLite stack may generate instances of this file control if +** the [SQLITE_USE_FCNTL_TRACE] compile-time option is enabled. +** +**
  • [[SQLITE_FCNTL_HAS_MOVED]] +** The [SQLITE_FCNTL_HAS_MOVED] file control interprets its argument as a +** pointer to an integer and it writes a boolean into that integer depending +** on whether or not the file has been renamed, moved, or deleted since it +** was first opened. +** +**
  • [[SQLITE_FCNTL_WIN32_GET_HANDLE]] +** The [SQLITE_FCNTL_WIN32_GET_HANDLE] opcode can be used to obtain the +** underlying native file handle associated with a file handle. This file +** control interprets its argument as a pointer to a native file handle and +** writes the resulting value there. +** +**
  • [[SQLITE_FCNTL_WIN32_SET_HANDLE]] +** The [SQLITE_FCNTL_WIN32_SET_HANDLE] opcode is used for debugging. This +** opcode causes the xFileControl method to swap the file handle with the one +** pointed to by the pArg argument. This capability is used during testing +** and only needs to be supported when SQLITE_TEST is defined. +** +**
  • [[SQLITE_FCNTL_WAL_BLOCK]] +** The [SQLITE_FCNTL_WAL_BLOCK] is a signal to the VFS layer that it might +** be advantageous to block on the next WAL lock if the lock is not immediately +** available. The WAL subsystem issues this signal during rare +** circumstances in order to fix a problem with priority inversion. +** Applications should not use this file-control. +** +**
  • [[SQLITE_FCNTL_ZIPVFS]] +** The [SQLITE_FCNTL_ZIPVFS] opcode is implemented by zipvfs only. All other +** VFS should return SQLITE_NOTFOUND for this opcode. +** +**
  • [[SQLITE_FCNTL_RBU]] +** The [SQLITE_FCNTL_RBU] opcode is implemented by the special VFS used by +** the RBU extension only. All other VFS should return SQLITE_NOTFOUND for +** this opcode. +** +**
  • [[SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]] +** If the [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] opcode returns SQLITE_OK, then +** the file descriptor is placed in "batch write mode", which +** means all subsequent write operations will be deferred and done +** atomically at the next [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]. Systems +** that do not support batch atomic writes will return SQLITE_NOTFOUND. +** ^Following a successful SQLITE_FCNTL_BEGIN_ATOMIC_WRITE and prior to +** the closing [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] or +** [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE], SQLite will make +** no VFS interface calls on the same [sqlite3_file] file descriptor +** except for calls to the xWrite method and the xFileControl method +** with [SQLITE_FCNTL_SIZE_HINT]. +** +**
  • [[SQLITE_FCNTL_COMMIT_ATOMIC_WRITE]] +** The [SQLITE_FCNTL_COMMIT_ATOMIC_WRITE] opcode causes all write +** operations since the previous successful call to +** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be performed atomically. +** This file control returns [SQLITE_OK] if and only if the writes were +** all performed successfully and have been committed to persistent storage. +** ^Regardless of whether or not it is successful, this file control takes +** the file descriptor out of batch write mode so that all subsequent +** write operations are independent. +** ^SQLite will never invoke SQLITE_FCNTL_COMMIT_ATOMIC_WRITE without +** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]. +** +**
  • [[SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE]] +** The [SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE] opcode causes all write +** operations since the previous successful call to +** [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE] to be rolled back. +** ^This file control takes the file descriptor out of batch write mode +** so that all subsequent write operations are independent. +** ^SQLite will never invoke SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE without +** a prior successful call to [SQLITE_FCNTL_BEGIN_ATOMIC_WRITE]. +** +**
  • [[SQLITE_FCNTL_LOCK_TIMEOUT]] +** The [SQLITE_FCNTL_LOCK_TIMEOUT] opcode is used to configure a VFS +** to block for up to M milliseconds before failing when attempting to +** obtain a file lock using the xLock or xShmLock methods of the VFS. +** The parameter is a pointer to a 32-bit signed integer that contains +** the value that M is to be set to. Before returning, the 32-bit signed +** integer is overwritten with the previous value of M. +** +**
  • [[SQLITE_FCNTL_DATA_VERSION]] +** The [SQLITE_FCNTL_DATA_VERSION] opcode is used to detect changes to +** a database file. The argument is a pointer to a 32-bit unsigned integer. +** The "data version" for the pager is written into the pointer. The +** "data version" changes whenever any change occurs to the corresponding +** database file, either through SQL statements on the same database +** connection or through transactions committed by separate database +** connections possibly in other processes. The [sqlite3_total_changes()] +** interface can be used to find if any database on the connection has changed, +** but that interface responds to changes on TEMP as well as MAIN and does +** not provide a mechanism to detect changes to MAIN only. Also, the +** [sqlite3_total_changes()] interface responds to internal changes only and +** omits changes made by other database connections. The +** [PRAGMA data_version] command provides a mechanism to detect changes to +** a single attached database that occur due to other database connections, +** but omits changes implemented by the database connection on which it is +** called. This file control is the only mechanism to detect changes that +** happen either internally or externally and that are associated with +** a particular attached database. +** +**
  • [[SQLITE_FCNTL_CKPT_START]] +** The [SQLITE_FCNTL_CKPT_START] opcode is invoked from within a checkpoint +** in wal mode before the client starts to copy pages from the wal +** file to the database file. +** +**
  • [[SQLITE_FCNTL_CKPT_DONE]] +** The [SQLITE_FCNTL_CKPT_DONE] opcode is invoked from within a checkpoint +** in wal mode after the client has finished copying pages from the wal +** file to the database file, but before the *-shm file is updated to +** record the fact that the pages have been checkpointed. +** +**
  • [[SQLITE_FCNTL_EXTERNAL_READER]] +** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect +** whether or not there is a database client in another process with a wal-mode +** transaction open on the database or not. It is only available on unix.The +** (void*) argument passed with this file-control should be a pointer to a +** value of type (int). The integer value is set to 1 if the database is a wal +** mode database and there exists at least one client in another process that +** currently has an SQL transaction open on the database. It is set to 0 if +** the database is not a wal-mode db, or if there is no such connection in any +** other process. This opcode cannot be used to detect transactions opened +** by clients within the current process, only within other processes. +** +**
  • [[SQLITE_FCNTL_CKSM_FILE]] +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +**
  • [[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. +**
+*/ +#define SQLITE_FCNTL_LOCKSTATE 1 +#define SQLITE_FCNTL_GET_LOCKPROXYFILE 2 +#define SQLITE_FCNTL_SET_LOCKPROXYFILE 3 +#define SQLITE_FCNTL_LAST_ERRNO 4 +#define SQLITE_FCNTL_SIZE_HINT 5 +#define SQLITE_FCNTL_CHUNK_SIZE 6 +#define SQLITE_FCNTL_FILE_POINTER 7 +#define SQLITE_FCNTL_SYNC_OMITTED 8 +#define SQLITE_FCNTL_WIN32_AV_RETRY 9 +#define SQLITE_FCNTL_PERSIST_WAL 10 +#define SQLITE_FCNTL_OVERWRITE 11 +#define SQLITE_FCNTL_VFSNAME 12 +#define SQLITE_FCNTL_POWERSAFE_OVERWRITE 13 +#define SQLITE_FCNTL_PRAGMA 14 +#define SQLITE_FCNTL_BUSYHANDLER 15 +#define SQLITE_FCNTL_TEMPFILENAME 16 +#define SQLITE_FCNTL_MMAP_SIZE 18 +#define SQLITE_FCNTL_TRACE 19 +#define SQLITE_FCNTL_HAS_MOVED 20 +#define SQLITE_FCNTL_SYNC 21 +#define SQLITE_FCNTL_COMMIT_PHASETWO 22 +#define SQLITE_FCNTL_WIN32_SET_HANDLE 23 +#define SQLITE_FCNTL_WAL_BLOCK 24 +#define SQLITE_FCNTL_ZIPVFS 25 +#define SQLITE_FCNTL_RBU 26 +#define SQLITE_FCNTL_VFS_POINTER 27 +#define SQLITE_FCNTL_JOURNAL_POINTER 28 +#define SQLITE_FCNTL_WIN32_GET_HANDLE 29 +#define SQLITE_FCNTL_PDB 30 +#define SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31 +#define SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32 +#define SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33 +#define SQLITE_FCNTL_LOCK_TIMEOUT 34 +#define SQLITE_FCNTL_DATA_VERSION 35 +#define SQLITE_FCNTL_SIZE_LIMIT 36 +#define SQLITE_FCNTL_CKPT_DONE 37 +#define SQLITE_FCNTL_RESERVE_BYTES 38 +#define SQLITE_FCNTL_CKPT_START 39 +#define SQLITE_FCNTL_EXTERNAL_READER 40 +#define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 + +/* deprecated names */ +#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE +#define SQLITE_SET_LOCKPROXYFILE SQLITE_FCNTL_SET_LOCKPROXYFILE +#define SQLITE_LAST_ERRNO SQLITE_FCNTL_LAST_ERRNO + + +/* +** CAPI3REF: Mutex Handle +** +** The mutex module within SQLite defines [sqlite3_mutex] to be an +** abstract type for a mutex object. The SQLite core never looks +** at the internal representation of an [sqlite3_mutex]. It only +** deals with pointers to the [sqlite3_mutex] object. +** +** Mutexes are created using [sqlite3_mutex_alloc()]. +*/ +typedef struct sqlite3_mutex sqlite3_mutex; + +/* +** CAPI3REF: Loadable Extension Thunk +** +** A pointer to the opaque sqlite3_api_routines structure is passed as +** the third parameter to entry points of [loadable extensions]. This +** structure must be typedefed in order to work around compiler warnings +** on some platforms. +*/ +typedef struct sqlite3_api_routines sqlite3_api_routines; + +/* +** CAPI3REF: File Name +** +** Type [sqlite3_filename] is used by SQLite to pass filenames to the +** xOpen method of a [VFS]. It may be cast to (const char*) and treated +** as a normal, nul-terminated, UTF-8 buffer containing the filename, but +** may also be passed to special APIs such as: +** +**
    +**
  • sqlite3_filename_database() +**
  • sqlite3_filename_journal() +**
  • sqlite3_filename_wal() +**
  • sqlite3_uri_parameter() +**
  • sqlite3_uri_boolean() +**
  • sqlite3_uri_int64() +**
  • sqlite3_uri_key() +**
+*/ +typedef const char *sqlite3_filename; + +/* +** CAPI3REF: OS Interface Object +** +** An instance of the sqlite3_vfs object defines the interface between +** the SQLite core and the underlying operating system. The "vfs" +** in the name of the object stands for "virtual file system". See +** the [VFS | VFS documentation] for further information. +** +** The VFS interface is sometimes extended by adding new methods onto +** the end. Each time such an extension occurs, the iVersion field +** is incremented. The iVersion value started out as 1 in +** SQLite [version 3.5.0] on [dateof:3.5.0], then increased to 2 +** with SQLite [version 3.7.0] on [dateof:3.7.0], and then increased +** to 3 with SQLite [version 3.7.6] on [dateof:3.7.6]. Additional fields +** may be appended to the sqlite3_vfs object and the iVersion value +** may increase again in future versions of SQLite. +** Note that due to an oversight, the structure +** of the sqlite3_vfs object changed in the transition from +** SQLite [version 3.5.9] to [version 3.6.0] on [dateof:3.6.0] +** and yet the iVersion field was not increased. +** +** The szOsFile field is the size of the subclassed [sqlite3_file] +** structure used by this VFS. mxPathname is the maximum length of +** a pathname in this VFS. +** +** Registered sqlite3_vfs objects are kept on a linked list formed by +** the pNext pointer. The [sqlite3_vfs_register()] +** and [sqlite3_vfs_unregister()] interfaces manage this list +** in a thread-safe way. The [sqlite3_vfs_find()] interface +** searches the list. Neither the application code nor the VFS +** implementation should use the pNext pointer. +** +** The pNext field is the only field in the sqlite3_vfs +** structure that SQLite will ever modify. SQLite will only access +** or modify this field while holding a particular static mutex. +** The application should never modify anything within the sqlite3_vfs +** object once the object has been registered. +** +** The zName field holds the name of the VFS module. The name must +** be unique across all VFS modules. +** +** [[sqlite3_vfs.xOpen]] +** ^SQLite guarantees that the zFilename parameter to xOpen +** is either a NULL pointer or string obtained +** from xFullPathname() with an optional suffix added. +** ^If a suffix is added to the zFilename parameter, it will +** consist of a single "-" character followed by no more than +** 11 alphanumeric and/or "-" characters. +** ^SQLite further guarantees that +** the string will be valid and unchanged until xClose() is +** called. Because of the previous sentence, +** the [sqlite3_file] can safely store a pointer to the +** filename if it needs to remember the filename for some reason. +** If the zFilename parameter to xOpen is a NULL pointer then xOpen +** must invent its own temporary name for the file. ^Whenever the +** xFilename parameter is NULL it will also be the case that the +** flags parameter will include [SQLITE_OPEN_DELETEONCLOSE]. +** +** The flags argument to xOpen() includes all bits set in +** the flags argument to [sqlite3_open_v2()]. Or if [sqlite3_open()] +** or [sqlite3_open16()] is used, then flags includes at least +** [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]. +** If xOpen() opens a file read-only then it sets *pOutFlags to +** include [SQLITE_OPEN_READONLY]. Other bits in *pOutFlags may be set. +** +** ^(SQLite will also add one of the following flags to the xOpen() +** call, depending on the object being opened: +** +**
    +**
  • [SQLITE_OPEN_MAIN_DB] +**
  • [SQLITE_OPEN_MAIN_JOURNAL] +**
  • [SQLITE_OPEN_TEMP_DB] +**
  • [SQLITE_OPEN_TEMP_JOURNAL] +**
  • [SQLITE_OPEN_TRANSIENT_DB] +**
  • [SQLITE_OPEN_SUBJOURNAL] +**
  • [SQLITE_OPEN_SUPER_JOURNAL] +**
  • [SQLITE_OPEN_WAL] +**
)^ +** +** The file I/O implementation can use the object type flags to +** change the way it deals with files. For example, an application +** that does not care about crash recovery or rollback might make +** the open of a journal file a no-op. Writes to this journal would +** also be no-ops, and any attempt to read the journal would return +** SQLITE_IOERR. Or the implementation might recognize that a database +** file will be doing page-aligned sector reads and writes in a random +** order and set up its I/O subsystem accordingly. +** +** SQLite might also add one of the following flags to the xOpen method: +** +**
    +**
  • [SQLITE_OPEN_DELETEONCLOSE] +**
  • [SQLITE_OPEN_EXCLUSIVE] +**
+** +** The [SQLITE_OPEN_DELETEONCLOSE] flag means the file should be +** deleted when it is closed. ^The [SQLITE_OPEN_DELETEONCLOSE] +** will be set for TEMP databases and their journals, transient +** databases, and subjournals. +** +** ^The [SQLITE_OPEN_EXCLUSIVE] flag is always used in conjunction +** with the [SQLITE_OPEN_CREATE] flag, which are both directly +** analogous to the O_EXCL and O_CREAT flags of the POSIX open() +** API. The SQLITE_OPEN_EXCLUSIVE flag, when paired with the +** SQLITE_OPEN_CREATE, is used to indicate that file should always +** be created, and that it is an error if it already exists. +** It is not used to indicate the file should be opened +** for exclusive access. +** +** ^At least szOsFile bytes of memory are allocated by SQLite +** to hold the [sqlite3_file] structure passed as the third +** argument to xOpen. The xOpen method does not have to +** allocate the structure; it should just fill it in. Note that +** the xOpen method must set the sqlite3_file.pMethods to either +** a valid [sqlite3_io_methods] object or to NULL. xOpen must do +** this even if the open fails. SQLite expects that the sqlite3_file.pMethods +** element will be valid after xOpen returns regardless of the success +** or failure of the xOpen call. +** +** [[sqlite3_vfs.xAccess]] +** ^The flags argument to xAccess() may be [SQLITE_ACCESS_EXISTS] +** to test for the existence of a file, or [SQLITE_ACCESS_READWRITE] to +** test whether a file is readable and writable, or [SQLITE_ACCESS_READ] +** to test whether a file is at least readable. The SQLITE_ACCESS_READ +** flag is never actually used and is not implemented in the built-in +** VFSes of SQLite. The file is named by the second argument and can be a +** directory. The xAccess method returns [SQLITE_OK] on success or some +** non-zero error code if there is an I/O error or if the name of +** the file given in the second argument is illegal. If SQLITE_OK +** is returned, then non-zero or zero is written into *pResOut to indicate +** whether or not the file is accessible. +** +** ^SQLite will always allocate at least mxPathname+1 bytes for the +** output buffer xFullPathname. The exact size of the output buffer +** is also passed as a parameter to both methods. If the output buffer +** is not large enough, [SQLITE_CANTOPEN] should be returned. Since this is +** handled as a fatal error by SQLite, vfs implementations should endeavor +** to prevent this by setting mxPathname to a sufficiently large value. +** +** The xRandomness(), xSleep(), xCurrentTime(), and xCurrentTimeInt64() +** interfaces are not strictly a part of the filesystem, but they are +** included in the VFS structure for completeness. +** The xRandomness() function attempts to return nBytes bytes +** of good-quality randomness into zOut. The return value is +** the actual number of bytes of randomness obtained. +** The xSleep() method causes the calling thread to sleep for at +** least the number of microseconds given. ^The xCurrentTime() +** method returns a Julian Day Number for the current date and time as +** a floating point value. +** ^The xCurrentTimeInt64() method returns, as an integer, the Julian +** Day Number multiplied by 86400000 (the number of milliseconds in +** a 24-hour day). +** ^SQLite will use the xCurrentTimeInt64() method to get the current +** date and time if that method is available (if iVersion is 2 or +** greater and the function pointer is not NULL) and will fall back +** to xCurrentTime() if xCurrentTimeInt64() is unavailable. +** +** ^The xSetSystemCall(), xGetSystemCall(), and xNestSystemCall() interfaces +** are not used by the SQLite core. These optional interfaces are provided +** by some VFSes to facilitate testing of the VFS code. By overriding +** system calls with functions under its control, a test program can +** simulate faults and error conditions that would otherwise be difficult +** or impossible to induce. The set of system calls that can be overridden +** varies from one VFS to another, and from one version of the same VFS to the +** next. Applications that use these interfaces must be prepared for any +** or all of these interfaces to be NULL or for their behavior to change +** from one release to the next. Applications must not attempt to access +** any of these methods if the iVersion of the VFS is less than 3. +*/ +typedef struct sqlite3_vfs sqlite3_vfs; +typedef void (*sqlite3_syscall_ptr)(void); +struct sqlite3_vfs { + int iVersion; /* Structure version number (currently 3) */ + int szOsFile; /* Size of subclassed sqlite3_file */ + int mxPathname; /* Maximum file pathname length */ + sqlite3_vfs *pNext; /* Next registered VFS */ + const char *zName; /* Name of this virtual file system */ + void *pAppData; /* Pointer to application-specific data */ + int (*xOpen)(sqlite3_vfs*, sqlite3_filename zName, sqlite3_file*, + int flags, int *pOutFlags); + int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); + int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); + int (*xFullPathname)(sqlite3_vfs*, const char *zName, int nOut, char *zOut); + void *(*xDlOpen)(sqlite3_vfs*, const char *zFilename); + void (*xDlError)(sqlite3_vfs*, int nByte, char *zErrMsg); + void (*(*xDlSym)(sqlite3_vfs*,void*, const char *zSymbol))(void); + void (*xDlClose)(sqlite3_vfs*, void*); + int (*xRandomness)(sqlite3_vfs*, int nByte, char *zOut); + int (*xSleep)(sqlite3_vfs*, int microseconds); + int (*xCurrentTime)(sqlite3_vfs*, double*); + int (*xGetLastError)(sqlite3_vfs*, int, char *); + /* + ** The methods above are in version 1 of the sqlite_vfs object + ** definition. Those that follow are added in version 2 or later + */ + int (*xCurrentTimeInt64)(sqlite3_vfs*, sqlite3_int64*); + /* + ** The methods above are in versions 1 and 2 of the sqlite_vfs object. + ** Those below are for version 3 and greater. + */ + int (*xSetSystemCall)(sqlite3_vfs*, const char *zName, sqlite3_syscall_ptr); + sqlite3_syscall_ptr (*xGetSystemCall)(sqlite3_vfs*, const char *zName); + const char *(*xNextSystemCall)(sqlite3_vfs*, const char *zName); + /* + ** The methods above are in versions 1 through 3 of the sqlite_vfs object. + ** New fields may be appended in future versions. The iVersion + ** value will increment whenever this happens. + */ +}; + +/* +** CAPI3REF: Flags for the xAccess VFS method +** +** These integer constants can be used as the third parameter to +** the xAccess method of an [sqlite3_vfs] object. They determine +** what kind of permissions the xAccess method is looking for. +** With SQLITE_ACCESS_EXISTS, the xAccess method +** simply checks whether the file exists. +** With SQLITE_ACCESS_READWRITE, the xAccess method +** checks whether the named directory is both readable and writable +** (in other words, if files can be added, removed, and renamed within +** the directory). +** The SQLITE_ACCESS_READWRITE constant is currently used only by the +** [temp_store_directory pragma], though this could change in a future +** release of SQLite. +** With SQLITE_ACCESS_READ, the xAccess method +** checks whether the file is readable. The SQLITE_ACCESS_READ constant is +** currently unused, though it might be used in a future release of +** SQLite. +*/ +#define SQLITE_ACCESS_EXISTS 0 +#define SQLITE_ACCESS_READWRITE 1 /* Used by PRAGMA temp_store_directory */ +#define SQLITE_ACCESS_READ 2 /* Unused */ + +/* +** CAPI3REF: Flags for the xShmLock VFS method +** +** These integer constants define the various locking operations +** allowed by the xShmLock method of [sqlite3_io_methods]. The +** following are the only legal combinations of flags to the +** xShmLock method: +** +**
    +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_LOCK | SQLITE_SHM_EXCLUSIVE +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED +**
  • SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE +**
+** +** When unlocking, the same SHARED or EXCLUSIVE flag must be supplied as +** was given on the corresponding lock. +** +** The xShmLock method can transition between unlocked and SHARED or +** between unlocked and EXCLUSIVE. It cannot transition between SHARED +** and EXCLUSIVE. +*/ +#define SQLITE_SHM_UNLOCK 1 +#define SQLITE_SHM_LOCK 2 +#define SQLITE_SHM_SHARED 4 +#define SQLITE_SHM_EXCLUSIVE 8 + +/* +** CAPI3REF: Maximum xShmLock index +** +** The xShmLock method on [sqlite3_io_methods] may use values +** between 0 and this upper bound as its "offset" argument. +** The SQLite core will never attempt to acquire or release a +** lock outside of this range +*/ +#define SQLITE_SHM_NLOCK 8 + + +/* +** CAPI3REF: Initialize The SQLite Library +** +** ^The sqlite3_initialize() routine initializes the +** SQLite library. ^The sqlite3_shutdown() routine +** deallocates any resources that were allocated by sqlite3_initialize(). +** These routines are designed to aid in process initialization and +** shutdown on embedded systems. Workstation applications using +** SQLite normally do not need to invoke either of these routines. +** +** A call to sqlite3_initialize() is an "effective" call if it is +** the first time sqlite3_initialize() is invoked during the lifetime of +** the process, or if it is the first time sqlite3_initialize() is invoked +** following a call to sqlite3_shutdown(). ^(Only an effective call +** of sqlite3_initialize() does any initialization. All other calls +** are harmless no-ops.)^ +** +** A call to sqlite3_shutdown() is an "effective" call if it is the first +** call to sqlite3_shutdown() since the last sqlite3_initialize(). ^(Only +** an effective call to sqlite3_shutdown() does any deinitialization. +** All other valid calls to sqlite3_shutdown() are harmless no-ops.)^ +** +** The sqlite3_initialize() interface is threadsafe, but sqlite3_shutdown() +** is not. The sqlite3_shutdown() interface must only be called from a +** single thread. All open [database connections] must be closed and all +** other SQLite resources must be deallocated prior to invoking +** sqlite3_shutdown(). +** +** Among other things, ^sqlite3_initialize() will invoke +** sqlite3_os_init(). Similarly, ^sqlite3_shutdown() +** will invoke sqlite3_os_end(). +** +** ^The sqlite3_initialize() routine returns [SQLITE_OK] on success. +** ^If for some reason, sqlite3_initialize() is unable to initialize +** the library (perhaps it is unable to allocate a needed resource such +** as a mutex) it returns an [error code] other than [SQLITE_OK]. +** +** ^The sqlite3_initialize() routine is called internally by many other +** SQLite interfaces so that an application usually does not need to +** invoke sqlite3_initialize() directly. For example, [sqlite3_open()] +** calls sqlite3_initialize() so the SQLite library will be automatically +** initialized when [sqlite3_open()] is called if it has not be initialized +** already. ^However, if SQLite is compiled with the [SQLITE_OMIT_AUTOINIT] +** compile-time option, then the automatic calls to sqlite3_initialize() +** are omitted and the application must call sqlite3_initialize() directly +** prior to using any other SQLite interface. For maximum portability, +** it is recommended that applications always invoke sqlite3_initialize() +** directly prior to using any other SQLite interface. Future releases +** of SQLite may require this. In other words, the behavior exhibited +** when SQLite is compiled with [SQLITE_OMIT_AUTOINIT] might become the +** default behavior in some future release of SQLite. +** +** The sqlite3_os_init() routine does operating-system specific +** initialization of the SQLite library. The sqlite3_os_end() +** routine undoes the effect of sqlite3_os_init(). Typical tasks +** performed by these routines include allocation or deallocation +** of static resources, initialization of global variables, +** setting up a default [sqlite3_vfs] module, or setting up +** a default configuration using [sqlite3_config()]. +** +** The application should never invoke either sqlite3_os_init() +** or sqlite3_os_end() directly. The application should only invoke +** sqlite3_initialize() and sqlite3_shutdown(). The sqlite3_os_init() +** interface is called automatically by sqlite3_initialize() and +** sqlite3_os_end() is called by sqlite3_shutdown(). Appropriate +** implementations for sqlite3_os_init() and sqlite3_os_end() +** are built into SQLite when it is compiled for Unix, Windows, or OS/2. +** When [custom builds | built for other platforms] +** (using the [SQLITE_OS_OTHER=1] compile-time +** option) the application must supply a suitable implementation for +** sqlite3_os_init() and sqlite3_os_end(). An application-supplied +** implementation of sqlite3_os_init() or sqlite3_os_end() +** must return [SQLITE_OK] on success and some other [error code] upon +** failure. +*/ +SQLITE_API int sqlite3_initialize(void); +SQLITE_API int sqlite3_shutdown(void); +SQLITE_API int sqlite3_os_init(void); +SQLITE_API int sqlite3_os_end(void); + +/* +** CAPI3REF: Configuring The SQLite Library +** +** The sqlite3_config() interface is used to make global configuration +** changes to SQLite in order to tune SQLite to the specific needs of +** the application. The default configuration is recommended for most +** applications and so this routine is usually not necessary. It is +** provided to support rare applications with unusual needs. +** +** The sqlite3_config() interface is not threadsafe. The application +** must ensure that no other SQLite interfaces are invoked by other +** threads while sqlite3_config() is running. +** +** The first argument to sqlite3_config() is an integer +** [configuration option] that determines +** what property of SQLite is to be configured. Subsequent arguments +** vary depending on the [configuration option] +** in the first argument. +** +** For most configuration options, the sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** +** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. +** ^If the option is unknown or SQLite is unable to set the option +** then this routine returns a non-zero [error code]. +*/ +SQLITE_API int sqlite3_config(int, ...); + +/* +** CAPI3REF: Configure database connections +** METHOD: sqlite3 +** +** The sqlite3_db_config() interface is used to make configuration +** changes to a [database connection]. The interface is similar to +** [sqlite3_config()] except that the changes apply to a single +** [database connection] (specified in the first argument). +** +** The second argument to sqlite3_db_config(D,V,...) is the +** [SQLITE_DBCONFIG_LOOKASIDE | configuration verb] - an integer code +** that indicates what aspect of the [database connection] is being configured. +** Subsequent arguments vary depending on the configuration verb. +** +** ^Calls to sqlite3_db_config() return SQLITE_OK if and only if +** the call is considered successful. +*/ +SQLITE_API int sqlite3_db_config(sqlite3*, int op, ...); + +/* +** CAPI3REF: Memory Allocation Routines +** +** An instance of this object defines the interface between SQLite +** and low-level memory allocation routines. +** +** This object is used in only one place in the SQLite interface. +** A pointer to an instance of this object is the argument to +** [sqlite3_config()] when the configuration option is +** [SQLITE_CONFIG_MALLOC] or [SQLITE_CONFIG_GETMALLOC]. +** By creating an instance of this object +** and passing it to [sqlite3_config]([SQLITE_CONFIG_MALLOC]) +** during configuration, an application can specify an alternative +** memory allocation subsystem for SQLite to use for all of its +** dynamic memory needs. +** +** Note that SQLite comes with several [built-in memory allocators] +** that are perfectly adequate for the overwhelming majority of applications +** and that this object is only useful to a tiny minority of applications +** with specialized memory allocation requirements. This object is +** also used during testing of SQLite in order to specify an alternative +** memory allocator that simulates memory out-of-memory conditions in +** order to verify that SQLite recovers gracefully from such +** conditions. +** +** The xMalloc, xRealloc, and xFree methods must work like the +** malloc(), realloc() and free() functions from the standard C library. +** ^SQLite guarantees that the second argument to +** xRealloc is always a value returned by a prior call to xRoundup. +** +** xSize should return the allocated size of a memory allocation +** previously obtained from xMalloc or xRealloc. The allocated size +** is always at least as big as the requested size but may be larger. +** +** The xRoundup method returns what would be the allocated size of +** a memory allocation given a particular requested size. Most memory +** allocators round up memory allocations at least to the next multiple +** of 8. Some allocators round up to a larger multiple or to a power of 2. +** Every memory allocation request coming in through [sqlite3_malloc()] +** or [sqlite3_realloc()] first calls xRoundup. If xRoundup returns 0, +** that causes the corresponding memory allocation to fail. +** +** The xInit method initializes the memory allocator. For example, +** it might allocate any required mutexes or initialize internal data +** structures. The xShutdown method is invoked (indirectly) by +** [sqlite3_shutdown()] and should deallocate any resources acquired +** by xInit. The pAppData pointer is used as the only parameter to +** xInit and xShutdown. +** +** SQLite holds the [SQLITE_MUTEX_STATIC_MAIN] mutex when it invokes +** the xInit method, so the xInit method need not be threadsafe. The +** xShutdown method is only called from [sqlite3_shutdown()] so it does +** not need to be threadsafe either. For all other methods, SQLite +** holds the [SQLITE_MUTEX_STATIC_MEM] mutex as long as the +** [SQLITE_CONFIG_MEMSTATUS] configuration option is turned on (which +** it is by default) and so the methods are automatically serialized. +** However, if [SQLITE_CONFIG_MEMSTATUS] is disabled, then the other +** methods must be threadsafe or else make their own arrangements for +** serialization. +** +** SQLite will never invoke xInit() more than once without an intervening +** call to xShutdown(). +*/ +typedef struct sqlite3_mem_methods sqlite3_mem_methods; +struct sqlite3_mem_methods { + void *(*xMalloc)(int); /* Memory allocation function */ + void (*xFree)(void*); /* Free a prior allocation */ + void *(*xRealloc)(void*,int); /* Resize an allocation */ + int (*xSize)(void*); /* Return the size of an allocation */ + int (*xRoundup)(int); /* Round up request size to allocation size */ + int (*xInit)(void*); /* Initialize the memory allocator */ + void (*xShutdown)(void*); /* Deinitialize the memory allocator */ + void *pAppData; /* Argument to xInit() and xShutdown() */ +}; + +/* +** CAPI3REF: Configuration Options +** KEYWORDS: {configuration option} +** +** These constants are the available integer configuration options that +** can be passed as the first argument to the [sqlite3_config()] interface. +** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +**
    +**
  • SQLITE_CONFIG_LOG +**
  • SQLITE_CONFIG_PCACHE_HDRSZ +**
+** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_config()] to make sure that +** the call worked. The [sqlite3_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
+** [[SQLITE_CONFIG_SINGLETHREAD]]
SQLITE_CONFIG_SINGLETHREAD
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Single-thread. In other words, it disables +** all mutexing and puts SQLite into a mode where it can only be used +** by a single thread. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to change the [threading mode] from its default +** value of Single-thread and so [sqlite3_config()] will return +** [SQLITE_ERROR] if called with the SQLITE_CONFIG_SINGLETHREAD +** configuration option.
+** +** [[SQLITE_CONFIG_MULTITHREAD]]
SQLITE_CONFIG_MULTITHREAD
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Multi-thread. In other words, it disables +** mutexing on [database connection] and [prepared statement] objects. +** The application is responsible for serializing access to +** [database connections] and [prepared statements]. But other mutexes +** are enabled so that SQLite will be safe to use in a multi-threaded +** environment as long as no two threads attempt to use the same +** [database connection] at the same time. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to set the Multi-thread [threading mode] and +** [sqlite3_config()] will return [SQLITE_ERROR] if called with the +** SQLITE_CONFIG_MULTITHREAD configuration option.
+** +** [[SQLITE_CONFIG_SERIALIZED]]
SQLITE_CONFIG_SERIALIZED
+**
There are no arguments to this option. ^This option sets the +** [threading mode] to Serialized. In other words, this option enables +** all mutexes including the recursive +** mutexes on [database connection] and [prepared statement] objects. +** In this mode (which is the default when SQLite is compiled with +** [SQLITE_THREADSAFE=1]) the SQLite library will itself serialize access +** to [database connections] and [prepared statements] so that the +** application is free to use the same [database connection] or the +** same [prepared statement] in different threads at the same time. +** ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** it is not possible to set the Serialized [threading mode] and +** [sqlite3_config()] will return [SQLITE_ERROR] if called with the +** SQLITE_CONFIG_SERIALIZED configuration option.
+** +** [[SQLITE_CONFIG_MALLOC]]
SQLITE_CONFIG_MALLOC
+**
^(The SQLITE_CONFIG_MALLOC option takes a single argument which is +** a pointer to an instance of the [sqlite3_mem_methods] structure. +** The argument specifies +** alternative low-level memory allocation routines to be used in place of +** the memory allocation routines built into SQLite.)^ ^SQLite makes +** its own private copy of the content of the [sqlite3_mem_methods] structure +** before the [sqlite3_config()] call returns.
+** +** [[SQLITE_CONFIG_GETMALLOC]]
SQLITE_CONFIG_GETMALLOC
+**
^(The SQLITE_CONFIG_GETMALLOC option takes a single argument which +** is a pointer to an instance of the [sqlite3_mem_methods] structure. +** The [sqlite3_mem_methods] +** structure is filled with the currently defined memory allocation routines.)^ +** This option can be used to overload the default memory allocation +** routines with a wrapper that simulations memory allocation failure or +** tracks memory usage, for example.
+** +** [[SQLITE_CONFIG_SMALL_MALLOC]]
SQLITE_CONFIG_SMALL_MALLOC
+**
^The SQLITE_CONFIG_SMALL_MALLOC option takes single argument of +** type int, interpreted as a boolean, which if true provides a hint to +** SQLite that it should avoid large memory allocations if possible. +** SQLite will run faster if it is free to make large memory allocations, +** but some application might prefer to run slower in exchange for +** guarantees about memory fragmentation that are possible if large +** allocations are avoided. This hint is normally off. +**
+** +** [[SQLITE_CONFIG_MEMSTATUS]]
SQLITE_CONFIG_MEMSTATUS
+**
^The SQLITE_CONFIG_MEMSTATUS option takes single argument of type int, +** interpreted as a boolean, which enables or disables the collection of +** memory allocation statistics. ^(When memory allocation statistics are +** disabled, the following SQLite interfaces become non-operational: +**
    +**
  • [sqlite3_hard_heap_limit64()] +**
  • [sqlite3_memory_used()] +**
  • [sqlite3_memory_highwater()] +**
  • [sqlite3_soft_heap_limit64()] +**
  • [sqlite3_status64()] +**
)^ +** ^Memory allocation statistics are enabled by default unless SQLite is +** compiled with [SQLITE_DEFAULT_MEMSTATUS]=0 in which case memory +** allocation statistics are disabled by default. +**
+** +** [[SQLITE_CONFIG_SCRATCH]]
SQLITE_CONFIG_SCRATCH
+**
The SQLITE_CONFIG_SCRATCH option is no longer used. +**
+** +** [[SQLITE_CONFIG_PAGECACHE]]
SQLITE_CONFIG_PAGECACHE
+**
^The SQLITE_CONFIG_PAGECACHE option specifies a memory pool +** that SQLite can use for the database page cache with the default page +** cache implementation. +** This configuration option is a no-op if an application-defined page +** cache implementation is loaded using the [SQLITE_CONFIG_PCACHE2]. +** ^There are three arguments to SQLITE_CONFIG_PAGECACHE: A pointer to +** 8-byte aligned memory (pMem), the size of each page cache line (sz), +** and the number of cache lines (N). +** The sz argument should be the size of the largest database page +** (a power of two between 512 and 65536) plus some extra bytes for each +** page header. ^The number of extra bytes needed by the page header +** can be determined using [SQLITE_CONFIG_PCACHE_HDRSZ]. +** ^It is harmless, apart from the wasted memory, +** for the sz parameter to be larger than necessary. The pMem +** argument must be either a NULL pointer or a pointer to an 8-byte +** aligned block of memory of at least sz*N bytes, otherwise +** subsequent behavior is undefined. +** ^When pMem is not NULL, SQLite will strive to use the memory provided +** to satisfy page cache needs, falling back to [sqlite3_malloc()] if +** a page cache line is larger than sz bytes or if all of the pMem buffer +** is exhausted. +** ^If pMem is NULL and N is non-zero, then each database connection +** does an initial bulk allocation for page cache memory +** from [sqlite3_malloc()] sufficient for N cache lines if N is positive or +** of -1024*N bytes if N is negative, . ^If additional +** page cache memory is needed beyond what is provided by the initial +** allocation, then SQLite goes to [sqlite3_malloc()] separately for each +** additional cache line.
+** +** [[SQLITE_CONFIG_HEAP]]
SQLITE_CONFIG_HEAP
+**
^The SQLITE_CONFIG_HEAP option specifies a static memory buffer +** that SQLite will use for all of its dynamic memory allocation needs +** beyond those provided for by [SQLITE_CONFIG_PAGECACHE]. +** ^The SQLITE_CONFIG_HEAP option is only available if SQLite is compiled +** with either [SQLITE_ENABLE_MEMSYS3] or [SQLITE_ENABLE_MEMSYS5] and returns +** [SQLITE_ERROR] if invoked otherwise. +** ^There are three arguments to SQLITE_CONFIG_HEAP: +** An 8-byte aligned pointer to the memory, +** the number of bytes in the memory buffer, and the minimum allocation size. +** ^If the first pointer (the memory pointer) is NULL, then SQLite reverts +** to using its default memory allocator (the system malloc() implementation), +** undoing any prior invocation of [SQLITE_CONFIG_MALLOC]. ^If the +** memory pointer is not NULL then the alternative memory +** allocator is engaged to handle all of SQLites memory allocation needs. +** The first pointer (the memory pointer) must be aligned to an 8-byte +** boundary or subsequent behavior of SQLite will be undefined. +** The minimum allocation size is capped at 2**12. Reasonable values +** for the minimum allocation size are 2**5 through 2**8.
+** +** [[SQLITE_CONFIG_MUTEX]]
SQLITE_CONFIG_MUTEX
+**
^(The SQLITE_CONFIG_MUTEX option takes a single argument which is a +** pointer to an instance of the [sqlite3_mutex_methods] structure. +** The argument specifies alternative low-level mutex routines to be used +** in place the mutex routines built into SQLite.)^ ^SQLite makes a copy of +** the content of the [sqlite3_mutex_methods] structure before the call to +** [sqlite3_config()] returns. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** the entire mutexing subsystem is omitted from the build and hence calls to +** [sqlite3_config()] with the SQLITE_CONFIG_MUTEX configuration option will +** return [SQLITE_ERROR].
+** +** [[SQLITE_CONFIG_GETMUTEX]]
SQLITE_CONFIG_GETMUTEX
+**
^(The SQLITE_CONFIG_GETMUTEX option takes a single argument which +** is a pointer to an instance of the [sqlite3_mutex_methods] structure. The +** [sqlite3_mutex_methods] +** structure is filled with the currently defined mutex routines.)^ +** This option can be used to overload the default mutex allocation +** routines with a wrapper used to track mutex usage for performance +** profiling or testing, for example. ^If SQLite is compiled with +** the [SQLITE_THREADSAFE | SQLITE_THREADSAFE=0] compile-time option then +** the entire mutexing subsystem is omitted from the build and hence calls to +** [sqlite3_config()] with the SQLITE_CONFIG_GETMUTEX configuration option will +** return [SQLITE_ERROR].
+** +** [[SQLITE_CONFIG_LOOKASIDE]]
SQLITE_CONFIG_LOOKASIDE
+**
^(The SQLITE_CONFIG_LOOKASIDE option takes two arguments that determine +** the default size of lookaside memory on each [database connection]. +** The first argument is the +** size of each lookaside buffer slot and the second is the number of +** slots allocated to each database connection.)^ ^(SQLITE_CONFIG_LOOKASIDE +** sets the default lookaside size. The [SQLITE_DBCONFIG_LOOKASIDE] +** option to [sqlite3_db_config()] can be used to change the lookaside +** configuration on individual connections.)^
+** +** [[SQLITE_CONFIG_PCACHE2]]
SQLITE_CONFIG_PCACHE2
+**
^(The SQLITE_CONFIG_PCACHE2 option takes a single argument which is +** a pointer to an [sqlite3_pcache_methods2] object. This object specifies +** the interface to a custom page cache implementation.)^ +** ^SQLite makes a copy of the [sqlite3_pcache_methods2] object.
+** +** [[SQLITE_CONFIG_GETPCACHE2]]
SQLITE_CONFIG_GETPCACHE2
+**
^(The SQLITE_CONFIG_GETPCACHE2 option takes a single argument which +** is a pointer to an [sqlite3_pcache_methods2] object. SQLite copies of +** the current page cache implementation into that object.)^
+** +** [[SQLITE_CONFIG_LOG]]
SQLITE_CONFIG_LOG
+**
The SQLITE_CONFIG_LOG option is used to configure the SQLite +** global [error log]. +** (^The SQLITE_CONFIG_LOG option takes two arguments: a pointer to a +** function with a call signature of void(*)(void*,int,const char*), +** and a pointer to void. ^If the function pointer is not NULL, it is +** invoked by [sqlite3_log()] to process each logging event. ^If the +** function pointer is NULL, the [sqlite3_log()] interface becomes a no-op. +** ^The void pointer that is the second argument to SQLITE_CONFIG_LOG is +** passed through as the first parameter to the application-defined logger +** function whenever that function is invoked. ^The second parameter to +** the logger function is a copy of the first parameter to the corresponding +** [sqlite3_log()] call and is intended to be a [result code] or an +** [extended result code]. ^The third parameter passed to the logger is +** log message after formatting via [sqlite3_snprintf()]. +** The SQLite logging interface is not reentrant; the logger function +** supplied by the application must not invoke any SQLite interface. +** In a multi-threaded application, the application-defined logger +** function must be threadsafe.
+** +** [[SQLITE_CONFIG_URI]]
SQLITE_CONFIG_URI +**
^(The SQLITE_CONFIG_URI option takes a single argument of type int. +** If non-zero, then URI handling is globally enabled. If the parameter is zero, +** then URI handling is globally disabled.)^ ^If URI handling is globally +** enabled, all filenames passed to [sqlite3_open()], [sqlite3_open_v2()], +** [sqlite3_open16()] or +** specified as part of [ATTACH] commands are interpreted as URIs, regardless +** of whether or not the [SQLITE_OPEN_URI] flag is set when the database +** connection is opened. ^If it is globally disabled, filenames are +** only interpreted as URIs if the SQLITE_OPEN_URI flag is set when the +** database connection is opened. ^(By default, URI handling is globally +** disabled. The default value may be changed by compiling with the +** [SQLITE_USE_URI] symbol defined.)^ +** +** [[SQLITE_CONFIG_COVERING_INDEX_SCAN]]
SQLITE_CONFIG_COVERING_INDEX_SCAN +**
^The SQLITE_CONFIG_COVERING_INDEX_SCAN option takes a single integer +** argument which is interpreted as a boolean in order to enable or disable +** the use of covering indices for full table scans in the query optimizer. +** ^The default setting is determined +** by the [SQLITE_ALLOW_COVERING_INDEX_SCAN] compile-time option, or is "on" +** if that compile-time option is omitted. +** The ability to disable the use of covering indices for full table scans +** is because some incorrectly coded legacy applications might malfunction +** when the optimization is enabled. Providing the ability to +** disable the optimization allows the older, buggy application code to work +** without change even with newer versions of SQLite. +** +** [[SQLITE_CONFIG_PCACHE]] [[SQLITE_CONFIG_GETPCACHE]] +**
SQLITE_CONFIG_PCACHE and SQLITE_CONFIG_GETPCACHE +**
These options are obsolete and should not be used by new code. +** They are retained for backwards compatibility but are now no-ops. +**
+** +** [[SQLITE_CONFIG_SQLLOG]] +**
SQLITE_CONFIG_SQLLOG +**
This option is only available if sqlite is compiled with the +** [SQLITE_ENABLE_SQLLOG] pre-processor macro defined. The first argument should +** be a pointer to a function of type void(*)(void*,sqlite3*,const char*, int). +** The second should be of type (void*). The callback is invoked by the library +** in three separate circumstances, identified by the value passed as the +** fourth parameter. If the fourth parameter is 0, then the database connection +** passed as the second argument has just been opened. The third argument +** points to a buffer containing the name of the main database file. If the +** fourth parameter is 1, then the SQL statement that the third parameter +** points to has just been executed. Or, if the fourth parameter is 2, then +** the connection being passed as the second parameter is being closed. The +** third parameter is passed NULL In this case. An example of using this +** configuration option can be seen in the "test_sqllog.c" source file in +** the canonical SQLite source tree.
+** +** [[SQLITE_CONFIG_MMAP_SIZE]] +**
SQLITE_CONFIG_MMAP_SIZE +**
^SQLITE_CONFIG_MMAP_SIZE takes two 64-bit integer (sqlite3_int64) values +** that are the default mmap size limit (the default setting for +** [PRAGMA mmap_size]) and the maximum allowed mmap size limit. +** ^The default setting can be overridden by each database connection using +** either the [PRAGMA mmap_size] command, or by using the +** [SQLITE_FCNTL_MMAP_SIZE] file control. ^(The maximum allowed mmap size +** will be silently truncated if necessary so that it does not exceed the +** compile-time maximum mmap size set by the +** [SQLITE_MAX_MMAP_SIZE] compile-time option.)^ +** ^If either argument to this option is negative, then that argument is +** changed to its compile-time default. +** +** [[SQLITE_CONFIG_WIN32_HEAPSIZE]] +**
SQLITE_CONFIG_WIN32_HEAPSIZE +**
^The SQLITE_CONFIG_WIN32_HEAPSIZE option is only available if SQLite is +** compiled for Windows with the [SQLITE_WIN32_MALLOC] pre-processor macro +** defined. ^SQLITE_CONFIG_WIN32_HEAPSIZE takes a 32-bit unsigned integer value +** that specifies the maximum size of the created heap. +** +** [[SQLITE_CONFIG_PCACHE_HDRSZ]] +**
SQLITE_CONFIG_PCACHE_HDRSZ +**
^The SQLITE_CONFIG_PCACHE_HDRSZ option takes a single parameter which +** is a pointer to an integer and writes into that integer the number of extra +** bytes per page required for each page in [SQLITE_CONFIG_PAGECACHE]. +** The amount of extra space required can change depending on the compiler, +** target platform, and SQLite version. +** +** [[SQLITE_CONFIG_PMASZ]] +**
SQLITE_CONFIG_PMASZ +**
^The SQLITE_CONFIG_PMASZ option takes a single parameter which +** is an unsigned integer and sets the "Minimum PMA Size" for the multithreaded +** sorter to that integer. The default minimum PMA Size is set by the +** [SQLITE_SORTER_PMASZ] compile-time option. New threads are launched +** to help with sort operations when multithreaded sorting +** is enabled (using the [PRAGMA threads] command) and the amount of content +** to be sorted exceeds the page size times the minimum of the +** [PRAGMA cache_size] setting and this value. +** +** [[SQLITE_CONFIG_STMTJRNL_SPILL]] +**
SQLITE_CONFIG_STMTJRNL_SPILL +**
^The SQLITE_CONFIG_STMTJRNL_SPILL option takes a single parameter which +** becomes the [statement journal] spill-to-disk threshold. +** [Statement journals] are held in memory until their size (in bytes) +** exceeds this threshold, at which point they are written to disk. +** Or if the threshold is -1, statement journals are always held +** exclusively in memory. +** Since many statement journals never become large, setting the spill +** threshold to a value such as 64KiB can greatly reduce the amount of +** I/O required to support statement rollback. +** The default value for this setting is controlled by the +** [SQLITE_STMTJRNL_SPILL] compile-time option. +** +** [[SQLITE_CONFIG_SORTERREF_SIZE]] +**
SQLITE_CONFIG_SORTERREF_SIZE +**
The SQLITE_CONFIG_SORTERREF_SIZE option accepts a single parameter +** of type (int) - the new value of the sorter-reference size threshold. +** Usually, when SQLite uses an external sort to order records according +** to an ORDER BY clause, all fields required by the caller are present in the +** sorted records. However, if SQLite determines based on the declared type +** of a table column that its values are likely to be very large - larger +** than the configured sorter-reference size threshold - then a reference +** is stored in each sorted record and the required column values loaded +** from the database as records are returned in sorted order. The default +** value for this option is to never use this optimization. Specifying a +** negative value for this option restores the default behavior. +** This option is only available if SQLite is compiled with the +** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. +** +** [[SQLITE_CONFIG_MEMDB_MAXSIZE]] +**
SQLITE_CONFIG_MEMDB_MAXSIZE +**
The SQLITE_CONFIG_MEMDB_MAXSIZE option accepts a single parameter +** [sqlite3_int64] parameter which is the default maximum size for an in-memory +** database created using [sqlite3_deserialize()]. This default maximum +** size can be adjusted up or down for individual databases using the +** [SQLITE_FCNTL_SIZE_LIMIT] [sqlite3_file_control|file-control]. If this +** configuration setting is never used, then the default maximum is determined +** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that +** compile-time option is not set, then the default maximum is 1073741824. +** +** [[SQLITE_CONFIG_ROWID_IN_VIEW]] +**
SQLITE_CONFIG_ROWID_IN_VIEW +**
The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability +** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is +** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability +** defaults to on. This configuration option queries the current setting or +** changes the setting to off or on. The argument is a pointer to an integer. +** If that integer initially holds a value of 1, then the ability for VIEWs to +** have ROWIDs is activated. If the integer initially holds zero, then the +** ability is deactivated. Any other initial value for the integer leaves the +** setting unchanged. After changes, if any, the integer is written with +** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite +** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and +** recommended case) then the integer is always filled with zero, regardless +** if its initial value. +**
+*/ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ +#define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ +#define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ +#define SQLITE_CONFIG_STMTJRNL_SPILL 26 /* int nByte */ +#define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ +#define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ +#define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ +#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */ + +/* +** CAPI3REF: Database Connection Configuration Options +** +** These constants are the available integer configuration options that +** can be passed as the second argument to the [sqlite3_db_config()] interface. +** +** New configuration options may be added in future releases of SQLite. +** Existing configuration options might be discontinued. Applications +** should check the return code from [sqlite3_db_config()] to make sure that +** the call worked. ^The [sqlite3_db_config()] interface will return a +** non-zero [error code] if a discontinued or unsupported configuration option +** is invoked. +** +**
+** [[SQLITE_DBCONFIG_LOOKASIDE]] +**
SQLITE_DBCONFIG_LOOKASIDE
+**
^This option takes three additional arguments that determine the +** [lookaside memory allocator] configuration for the [database connection]. +** ^The first argument (the third parameter to [sqlite3_db_config()] is a +** pointer to a memory buffer to use for lookaside memory. +** ^The first argument after the SQLITE_DBCONFIG_LOOKASIDE verb +** may be NULL in which case SQLite will allocate the +** lookaside buffer itself using [sqlite3_malloc()]. ^The second argument is the +** size of each lookaside buffer slot. ^The third argument is the number of +** slots. The size of the buffer in the first argument must be greater than +** or equal to the product of the second and third arguments. The buffer +** must be aligned to an 8-byte boundary. ^If the second argument to +** SQLITE_DBCONFIG_LOOKASIDE is not a multiple of 8, it is internally +** rounded down to the next smaller multiple of 8. ^(The lookaside memory +** configuration for a database connection can only be changed when that +** connection is not currently using lookaside memory, or in other words +** when the "current value" returned by +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. +** Any attempt to change the lookaside memory configuration when lookaside +** memory is in use leaves the configuration unchanged and returns +** [SQLITE_BUSY].)^
+** +** [[SQLITE_DBCONFIG_ENABLE_FKEY]] +**
SQLITE_DBCONFIG_ENABLE_FKEY
+**
^This option is used to enable or disable the enforcement of +** [foreign key constraints]. There should be two additional arguments. +** The first argument is an integer which is 0 to disable FK enforcement, +** positive to enable FK enforcement or negative to leave FK enforcement +** unchanged. The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether FK enforcement is off or on +** following this call. The second parameter may be a NULL pointer, in +** which case the FK enforcement setting is not reported back.
+** +** [[SQLITE_DBCONFIG_ENABLE_TRIGGER]] +**
SQLITE_DBCONFIG_ENABLE_TRIGGER
+**
^This option is used to enable or disable [CREATE TRIGGER | triggers]. +** There should be two additional arguments. +** The first argument is an integer which is 0 to disable triggers, +** positive to enable triggers or negative to leave the setting unchanged. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether triggers are disabled or enabled +** following this call. The second parameter may be a NULL pointer, in +** which case the trigger setting is not reported back. +** +**

Originally this option disabled all triggers. ^(However, since +** SQLite version 3.35.0, TEMP triggers are still allowed even if +** this option is off. So, in other words, this option now only disables +** triggers in the main database schema or in the schemas of ATTACH-ed +** databases.)^

+** +** [[SQLITE_DBCONFIG_ENABLE_VIEW]] +**
SQLITE_DBCONFIG_ENABLE_VIEW
+**
^This option is used to enable or disable [CREATE VIEW | views]. +** There should be two additional arguments. +** The first argument is an integer which is 0 to disable views, +** positive to enable views or negative to leave the setting unchanged. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether views are disabled or enabled +** following this call. The second parameter may be a NULL pointer, in +** which case the view setting is not reported back. +** +**

Originally this option disabled all views. ^(However, since +** SQLite version 3.35.0, TEMP views are still allowed even if +** this option is off. So, in other words, this option now only disables +** views in the main database schema or in the schemas of ATTACH-ed +** databases.)^

+** +** [[SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER]] +**
SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER
+**
^This option is used to enable or disable the +** [fts3_tokenizer()] function which is part of the +** [FTS3] full-text search engine extension. +** There should be two additional arguments. +** The first argument is an integer which is 0 to disable fts3_tokenizer() or +** positive to enable fts3_tokenizer() or negative to leave the setting +** unchanged. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether fts3_tokenizer is disabled or enabled +** following this call. The second parameter may be a NULL pointer, in +** which case the new setting is not reported back.
+** +** [[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION]] +**
SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION
+**
^This option is used to enable or disable the [sqlite3_load_extension()] +** interface independently of the [load_extension()] SQL function. +** The [sqlite3_enable_load_extension()] API enables or disables both the +** C-API [sqlite3_load_extension()] and the SQL function [load_extension()]. +** There should be two additional arguments. +** When the first argument to this interface is 1, then only the C-API is +** enabled and the SQL function remains disabled. If the first argument to +** this interface is 0, then both the C-API and the SQL function are disabled. +** If the first argument is -1, then no changes are made to state of either the +** C-API or the SQL function. +** The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether [sqlite3_load_extension()] interface +** is disabled or enabled following this call. The second parameter may +** be a NULL pointer, in which case the new setting is not reported back. +**
+** +** [[SQLITE_DBCONFIG_MAINDBNAME]]
SQLITE_DBCONFIG_MAINDBNAME
+**
^This option is used to change the name of the "main" database +** schema. ^The sole argument is a pointer to a constant UTF8 string +** which will become the new schema name in place of "main". ^SQLite +** does not make a copy of the new main schema name string, so the application +** must ensure that the argument passed into this DBCONFIG option is unchanged +** until after the database connection closes. +**
+** +** [[SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE]] +**
SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE
+**
Usually, when a database in wal mode is closed or detached from a +** database handle, SQLite checks if this will mean that there are now no +** connections at all to the database. If so, it performs a checkpoint +** operation before closing the connection. This option may be used to +** override this behavior. The first parameter passed to this operation +** is an integer - positive to disable checkpoints-on-close, or zero (the +** default) to enable them, and negative to leave the setting unchanged. +** The second parameter is a pointer to an integer +** into which is written 0 or 1 to indicate whether checkpoints-on-close +** have been disabled - 0 if they are not disabled, 1 if they are. +**
+** +** [[SQLITE_DBCONFIG_ENABLE_QPSG]]
SQLITE_DBCONFIG_ENABLE_QPSG
+**
^(The SQLITE_DBCONFIG_ENABLE_QPSG option activates or deactivates +** the [query planner stability guarantee] (QPSG). When the QPSG is active, +** a single SQL query statement will always use the same algorithm regardless +** of values of [bound parameters].)^ The QPSG disables some query optimizations +** that look at the values of bound parameters, which can make some queries +** slower. But the QPSG has the advantage of more predictable behavior. With +** the QPSG active, SQLite will always use the same query plan in the field as +** was used during testing in the lab. +** The first argument to this setting is an integer which is 0 to disable +** the QPSG, positive to enable QPSG, or negative to leave the setting +** unchanged. The second parameter is a pointer to an integer into which +** is written 0 or 1 to indicate whether the QPSG is disabled or enabled +** following this call. +**
+** +** [[SQLITE_DBCONFIG_TRIGGER_EQP]]
SQLITE_DBCONFIG_TRIGGER_EQP
+**
By default, the output of EXPLAIN QUERY PLAN commands does not +** include output for any operations performed by trigger programs. This +** option is used to set or clear (the default) a flag that governs this +** behavior. The first parameter passed to this operation is an integer - +** positive to enable output for trigger programs, or zero to disable it, +** or negative to leave the setting unchanged. +** The second parameter is a pointer to an integer into which is written +** 0 or 1 to indicate whether output-for-triggers has been disabled - 0 if +** it is not disabled, 1 if it is. +**
+** +** [[SQLITE_DBCONFIG_RESET_DATABASE]]
SQLITE_DBCONFIG_RESET_DATABASE
+**
Set the SQLITE_DBCONFIG_RESET_DATABASE flag and then run +** [VACUUM] in order to reset a database back to an empty database +** with no schema and no content. The following process works even for +** a badly corrupted database file: +**
    +**
  1. If the database connection is newly opened, make sure it has read the +** database schema by preparing then discarding some query against the +** database, or calling sqlite3_table_column_metadata(), ignoring any +** errors. This step is only necessary if the application desires to keep +** the database in WAL mode after the reset if it was in WAL mode before +** the reset. +**
  2. sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 1, 0); +**
  3. [sqlite3_exec](db, "[VACUUM]", 0, 0, 0); +**
  4. sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); +**
+** Because resetting a database is destructive and irreversible, the +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. +** +** [[SQLITE_DBCONFIG_DEFENSIVE]]
SQLITE_DBCONFIG_DEFENSIVE
+**
The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the +** "defensive" flag for a database connection. When the defensive +** flag is enabled, language features that allow ordinary SQL to +** deliberately corrupt the database file are disabled. The disabled +** features include but are not limited to the following: +**
    +**
  • The [PRAGMA writable_schema=ON] statement. +**
  • The [PRAGMA journal_mode=OFF] statement. +**
  • The [PRAGMA schema_version=N] statement. +**
  • Writes to the [sqlite_dbpage] virtual table. +**
  • Direct writes to [shadow tables]. +**
+**
+** +** [[SQLITE_DBCONFIG_WRITABLE_SCHEMA]]
SQLITE_DBCONFIG_WRITABLE_SCHEMA
+**
The SQLITE_DBCONFIG_WRITABLE_SCHEMA option activates or deactivates the +** "writable_schema" flag. This has the same effect and is logically equivalent +** to setting [PRAGMA writable_schema=ON] or [PRAGMA writable_schema=OFF]. +** The first argument to this setting is an integer which is 0 to disable +** the writable_schema, positive to enable writable_schema, or negative to +** leave the setting unchanged. The second parameter is a pointer to an +** integer into which is written 0 or 1 to indicate whether the writable_schema +** is enabled or disabled following this call. +**
+** +** [[SQLITE_DBCONFIG_LEGACY_ALTER_TABLE]] +**
SQLITE_DBCONFIG_LEGACY_ALTER_TABLE
+**
The SQLITE_DBCONFIG_LEGACY_ALTER_TABLE option activates or deactivates +** the legacy behavior of the [ALTER TABLE RENAME] command such it +** behaves as it did prior to [version 3.24.0] (2018-06-04). See the +** "Compatibility Notice" on the [ALTER TABLE RENAME documentation] for +** additional information. This feature can also be turned on and off +** using the [PRAGMA legacy_alter_table] statement. +**
+** +** [[SQLITE_DBCONFIG_DQS_DML]] +**
SQLITE_DBCONFIG_DQS_DML
+**
The SQLITE_DBCONFIG_DQS_DML option activates or deactivates +** the legacy [double-quoted string literal] misfeature for DML statements +** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The +** default value of this setting is determined by the [-DSQLITE_DQS] +** compile-time option. +**
+** +** [[SQLITE_DBCONFIG_DQS_DDL]] +**
SQLITE_DBCONFIG_DQS_DDL
+**
The SQLITE_DBCONFIG_DQS option activates or deactivates +** the legacy [double-quoted string literal] misfeature for DDL statements, +** such as CREATE TABLE and CREATE INDEX. The +** default value of this setting is determined by the [-DSQLITE_DQS] +** compile-time option. +**
+** +** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] +**
SQLITE_DBCONFIG_TRUSTED_SCHEMA
+**
The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to +** assume that database schemas are untainted by malicious content. +** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite +** takes additional defensive steps to protect the application from harm +** including: +**
    +**
  • Prohibit the use of SQL functions inside triggers, views, +** CHECK constraints, DEFAULT clauses, expression indexes, +** partial indexes, or generated columns +** unless those functions are tagged with [SQLITE_INNOCUOUS]. +**
  • Prohibit the use of virtual tables inside of triggers or views +** unless those virtual tables are tagged with [SQLITE_VTAB_INNOCUOUS]. +**
+** This setting defaults to "on" for legacy compatibility, however +** all applications are advised to turn it off if possible. This setting +** can also be controlled using the [PRAGMA trusted_schema] statement. +**
+** +** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] +**
SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
+**
The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates +** the legacy file format flag. When activated, this flag causes all newly +** created database file to have a schema format version number (the 4-byte +** integer found at offset 44 into the database header) of 1. This in turn +** means that the resulting database file will be readable and writable by +** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, +** newly created databases are generally not understandable by SQLite versions +** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there +** is now scarcely any need to generate database files that are compatible +** all the way back to version 3.0.0, and so this setting is of little +** practical use, but is provided so that SQLite can continue to claim the +** ability to generate new database files that are compatible with version +** 3.0.0. +**

Note that when the SQLITE_DBCONFIG_LEGACY_FILE_FORMAT setting is on, +** the [VACUUM] command will fail with an obscure error when attempting to +** process a table with generated columns and a descending index. This is +** not considered a bug since SQLite versions 3.3.0 and earlier do not support +** either generated columns or descending indexes. +**

+** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +**
SQLITE_DBCONFIG_STMT_SCANSTATUS
+**
The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +**
+** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +**
SQLITE_DBCONFIG_REVERSE_SCANORDER
+**
The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. +**
+** +**
+*/ +#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ +#define SQLITE_DBCONFIG_LOOKASIDE 1001 /* void* int int */ +#define SQLITE_DBCONFIG_ENABLE_FKEY 1002 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */ +#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_QPSG 1007 /* int int* */ +#define SQLITE_DBCONFIG_TRIGGER_EQP 1008 /* int int* */ +#define SQLITE_DBCONFIG_RESET_DATABASE 1009 /* int int* */ +#define SQLITE_DBCONFIG_DEFENSIVE 1010 /* int int* */ +#define SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011 /* int int* */ +#define SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012 /* int int* */ +#define SQLITE_DBCONFIG_DQS_DML 1013 /* int int* */ +#define SQLITE_DBCONFIG_DQS_DDL 1014 /* int int* */ +#define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ +#define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ +#define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ + +/* +** CAPI3REF: Enable Or Disable Extended Result Codes +** METHOD: sqlite3 +** +** ^The sqlite3_extended_result_codes() routine enables or disables the +** [extended result codes] feature of SQLite. ^The extended result +** codes are disabled by default for historical compatibility. +*/ +SQLITE_API int sqlite3_extended_result_codes(sqlite3*, int onoff); + +/* +** CAPI3REF: Last Insert Rowid +** METHOD: sqlite3 +** +** ^Each entry in most SQLite tables (except for [WITHOUT ROWID] tables) +** has a unique 64-bit signed +** integer key called the [ROWID | "rowid"]. ^The rowid is always available +** as an undeclared column named ROWID, OID, or _ROWID_ as long as those +** names are not also used by explicitly declared columns. ^If +** the table has a column of type [INTEGER PRIMARY KEY] then that column +** is another alias for the rowid. +** +** ^The sqlite3_last_insert_rowid(D) interface usually returns the [rowid] of +** the most recent successful [INSERT] into a rowid table or [virtual table] +** on database connection D. ^Inserts into [WITHOUT ROWID] tables are not +** recorded. ^If no successful [INSERT]s into rowid tables have ever occurred +** on the database connection D, then sqlite3_last_insert_rowid(D) returns +** zero. +** +** As well as being set automatically as rows are inserted into database +** tables, the value returned by this function may be set explicitly by +** [sqlite3_set_last_insert_rowid()] +** +** Some virtual table implementations may INSERT rows into rowid tables as +** part of committing a transaction (e.g. to flush data accumulated in memory +** to disk). In this case subsequent calls to this function return the rowid +** associated with these internal INSERT operations, which leads to +** unintuitive results. Virtual table implementations that do write to rowid +** tables in this way can avoid this problem by restoring the original +** rowid value using [sqlite3_set_last_insert_rowid()] before returning +** control to the user. +** +** ^(If an [INSERT] occurs within a trigger then this routine will +** return the [rowid] of the inserted row as long as the trigger is +** running. Once the trigger program ends, the value returned +** by this routine reverts to what it was before the trigger was fired.)^ +** +** ^An [INSERT] that fails due to a constraint violation is not a +** successful [INSERT] and does not change the value returned by this +** routine. ^Thus INSERT OR FAIL, INSERT OR IGNORE, INSERT OR ROLLBACK, +** and INSERT OR ABORT make no changes to the return value of this +** routine when their insertion fails. ^(When INSERT OR REPLACE +** encounters a constraint violation, it does not fail. The +** INSERT continues to completion after deleting rows that caused +** the constraint problem so INSERT OR REPLACE will always change +** the return value of this interface.)^ +** +** ^For the purposes of this routine, an [INSERT] is considered to +** be successful even if it is subsequently rolled back. +** +** This function is accessible to SQL statements via the +** [last_insert_rowid() SQL function]. +** +** If a separate thread performs a new [INSERT] on the same +** database connection while the [sqlite3_last_insert_rowid()] +** function is running and thus changes the last insert [rowid], +** then the value returned by [sqlite3_last_insert_rowid()] is +** unpredictable and might not equal either the old or the new +** last insert [rowid]. +*/ +SQLITE_API sqlite3_int64 sqlite3_last_insert_rowid(sqlite3*); + +/* +** CAPI3REF: Set the Last Insert Rowid value. +** METHOD: sqlite3 +** +** The sqlite3_set_last_insert_rowid(D, R) method allows the application to +** set the value returned by calling sqlite3_last_insert_rowid(D) to R +** without inserting a row into the database. +*/ +SQLITE_API void sqlite3_set_last_insert_rowid(sqlite3*,sqlite3_int64); + +/* +** CAPI3REF: Count The Number Of Rows Modified +** METHOD: sqlite3 +** +** ^These functions return the number of rows modified, inserted or +** deleted by the most recently completed INSERT, UPDATE or DELETE +** statement on the database connection specified by the only parameter. +** The two functions are identical except for the type of the return value +** and that if the number of rows modified by the most recent INSERT, UPDATE +** or DELETE is greater than the maximum value supported by type "int", then +** the return value of sqlite3_changes() is undefined. ^Executing any other +** type of SQL statement does not modify the value returned by these functions. +** +** ^Only changes made directly by the INSERT, UPDATE or DELETE statement are +** considered - auxiliary changes caused by [CREATE TRIGGER | triggers], +** [foreign key actions] or [REPLACE] constraint resolution are not counted. +** +** Changes to a view that are intercepted by +** [INSTEAD OF trigger | INSTEAD OF triggers] are not counted. ^The value +** returned by sqlite3_changes() immediately after an INSERT, UPDATE or +** DELETE statement run on a view is always zero. Only changes made to real +** tables are counted. +** +** Things are more complicated if the sqlite3_changes() function is +** executed while a trigger program is running. This may happen if the +** program uses the [changes() SQL function], or if some other callback +** function invokes sqlite3_changes() directly. Essentially: +** +**
    +**
  • ^(Before entering a trigger program the value returned by +** sqlite3_changes() function is saved. After the trigger program +** has finished, the original value is restored.)^ +** +**
  • ^(Within a trigger program each INSERT, UPDATE and DELETE +** statement sets the value returned by sqlite3_changes() +** upon completion as normal. Of course, this value will not include +** any changes performed by sub-triggers, as the sqlite3_changes() +** value will be saved and restored after each sub-trigger has run.)^ +**
+** +** ^This means that if the changes() SQL function (or similar) is used +** by the first INSERT, UPDATE or DELETE statement within a trigger, it +** returns the value as set when the calling statement began executing. +** ^If it is used by the second or subsequent such statement within a trigger +** program, the value returned reflects the number of rows modified by the +** previous INSERT, UPDATE or DELETE statement within the same trigger. +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_changes()] is running then the value returned +** is unpredictable and not meaningful. +** +** See also: +**
    +**
  • the [sqlite3_total_changes()] interface +**
  • the [count_changes pragma] +**
  • the [changes() SQL function] +**
  • the [data_version pragma] +**
+*/ +SQLITE_API int sqlite3_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_changes64(sqlite3*); + +/* +** CAPI3REF: Total Number Of Rows Modified +** METHOD: sqlite3 +** +** ^These functions return the total number of rows inserted, modified or +** deleted by all [INSERT], [UPDATE] or [DELETE] statements completed +** since the database connection was opened, including those executed as +** part of trigger programs. The two functions are identical except for the +** type of the return value and that if the number of rows modified by the +** connection exceeds the maximum value supported by type "int", then +** the return value of sqlite3_total_changes() is undefined. ^Executing +** any other type of SQL statement does not affect the value returned by +** sqlite3_total_changes(). +** +** ^Changes made as part of [foreign key actions] are included in the +** count, but those made as part of REPLACE constraint resolution are +** not. ^Changes to a view that are intercepted by INSTEAD OF triggers +** are not counted. +** +** The [sqlite3_total_changes(D)] interface only reports the number +** of rows that changed due to SQL statement run against database +** connection D. Any changes by other database connections are ignored. +** To detect changes against a database file from other database +** connections use the [PRAGMA data_version] command or the +** [SQLITE_FCNTL_DATA_VERSION] [file control]. +** +** If a separate thread makes changes on the same database connection +** while [sqlite3_total_changes()] is running then the value +** returned is unpredictable and not meaningful. +** +** See also: +**
    +**
  • the [sqlite3_changes()] interface +**
  • the [count_changes pragma] +**
  • the [changes() SQL function] +**
  • the [data_version pragma] +**
  • the [SQLITE_FCNTL_DATA_VERSION] [file control] +**
+*/ +SQLITE_API int sqlite3_total_changes(sqlite3*); +SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*); + +/* +** CAPI3REF: Interrupt A Long-Running Query +** METHOD: sqlite3 +** +** ^This function causes any pending database operation to abort and +** return at its earliest opportunity. This routine is typically +** called in response to a user action such as pressing "Cancel" +** or Ctrl-C where the user wants a long query operation to halt +** immediately. +** +** ^It is safe to call this routine from a thread different from the +** thread that is currently running the database operation. But it +** is not safe to call this routine with a [database connection] that +** is closed or might close before sqlite3_interrupt() returns. +** +** ^If an SQL operation is very nearly finished at the time when +** sqlite3_interrupt() is called, then it might not have an opportunity +** to be interrupted and might continue to completion. +** +** ^An SQL operation that is interrupted will return [SQLITE_INTERRUPT]. +** ^If the interrupted SQL operation is an INSERT, UPDATE, or DELETE +** that is inside an explicit transaction, then the entire transaction +** will be rolled back automatically. +** +** ^The sqlite3_interrupt(D) call is in effect until all currently running +** SQL statements on [database connection] D complete. ^Any new SQL statements +** that are started after the sqlite3_interrupt() call and before the +** running statement count reaches zero are interrupted as if they had been +** running prior to the sqlite3_interrupt() call. ^New SQL statements +** that are started after the running statement count reaches zero are +** not effected by the sqlite3_interrupt(). +** ^A call to sqlite3_interrupt(D) that occurs when there are no running +** SQL statements is a no-op and has no effect on SQL statements +** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. +*/ +SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); + +/* +** CAPI3REF: Determine If An SQL Statement Is Complete +** +** These routines are useful during command-line input to determine if the +** currently entered text seems to form a complete SQL statement or +** if additional input is needed before sending the text into +** SQLite for parsing. ^These routines return 1 if the input string +** appears to be a complete SQL statement. ^A statement is judged to be +** complete if it ends with a semicolon token and is not a prefix of a +** well-formed CREATE TRIGGER statement. ^Semicolons that are embedded within +** string literals or quoted identifier names or comments are not +** independent tokens (they are part of the token in which they are +** embedded) and thus do not count as a statement terminator. ^Whitespace +** and comments that follow the final semicolon are ignored. +** +** ^These routines return 0 if the statement is incomplete. ^If a +** memory allocation fails, then SQLITE_NOMEM is returned. +** +** ^These routines do not parse the SQL statements thus +** will not detect syntactically incorrect SQL. +** +** ^(If SQLite has not been initialized using [sqlite3_initialize()] prior +** to invoking sqlite3_complete16() then sqlite3_initialize() is invoked +** automatically by sqlite3_complete16(). If that initialization fails, +** then the return value from sqlite3_complete16() will be non-zero +** regardless of whether or not the input SQL is complete.)^ +** +** The input to [sqlite3_complete()] must be a zero-terminated +** UTF-8 string. +** +** The input to [sqlite3_complete16()] must be a zero-terminated +** UTF-16 string in native byte order. +*/ +SQLITE_API int sqlite3_complete(const char *sql); +SQLITE_API int sqlite3_complete16(const void *sql); + +/* +** CAPI3REF: Register A Callback To Handle SQLITE_BUSY Errors +** KEYWORDS: {busy-handler callback} {busy handler} +** METHOD: sqlite3 +** +** ^The sqlite3_busy_handler(D,X,P) routine sets a callback function X +** that might be invoked with argument P whenever +** an attempt is made to access a database table associated with +** [database connection] D when another thread +** or process has the table locked. +** The sqlite3_busy_handler() interface is used to implement +** [sqlite3_busy_timeout()] and [PRAGMA busy_timeout]. +** +** ^If the busy callback is NULL, then [SQLITE_BUSY] +** is returned immediately upon encountering the lock. ^If the busy callback +** is not NULL, then the callback might be invoked with two arguments. +** +** ^The first argument to the busy handler is a copy of the void* pointer which +** is the third argument to sqlite3_busy_handler(). ^The second argument to +** the busy handler callback is the number of times that the busy handler has +** been invoked previously for the same locking event. ^If the +** busy callback returns 0, then no additional attempts are made to +** access the database and [SQLITE_BUSY] is returned +** to the application. +** ^If the callback returns non-zero, then another attempt +** is made to access the database and the cycle repeats. +** +** The presence of a busy handler does not guarantee that it will be invoked +** when there is lock contention. ^If SQLite determines that invoking the busy +** handler could result in a deadlock, it will go ahead and return [SQLITE_BUSY] +** to the application instead of invoking the +** busy handler. +** Consider a scenario where one process is holding a read lock that +** it is trying to promote to a reserved lock and +** a second process is holding a reserved lock that it is trying +** to promote to an exclusive lock. The first process cannot proceed +** because it is blocked by the second and the second process cannot +** proceed because it is blocked by the first. If both processes +** invoke the busy handlers, neither will make any progress. Therefore, +** SQLite returns [SQLITE_BUSY] for the first process, hoping that this +** will induce the first process to release its read lock and allow +** the second process to proceed. +** +** ^The default busy callback is NULL. +** +** ^(There can only be a single busy handler defined for each +** [database connection]. Setting a new busy handler clears any +** previously set handler.)^ ^Note that calling [sqlite3_busy_timeout()] +** or evaluating [PRAGMA busy_timeout=N] will change the +** busy handler and thus clear any previously set busy handler. +** +** The busy callback should not take any actions which modify the +** database connection that invoked the busy handler. In other words, +** the busy handler is not reentrant. Any such actions +** result in undefined behavior. +** +** A busy handler must not close the database connection +** or [prepared statement] that invoked the busy handler. +*/ +SQLITE_API int sqlite3_busy_handler(sqlite3*,int(*)(void*,int),void*); + +/* +** CAPI3REF: Set A Busy Timeout +** METHOD: sqlite3 +** +** ^This routine sets a [sqlite3_busy_handler | busy handler] that sleeps +** for a specified amount of time when a table is locked. ^The handler +** will sleep multiple times until at least "ms" milliseconds of sleeping +** have accumulated. ^After at least "ms" milliseconds of sleeping, +** the handler returns 0 which causes [sqlite3_step()] to return +** [SQLITE_BUSY]. +** +** ^Calling this routine with an argument less than or equal to zero +** turns off all busy handlers. +** +** ^(There can only be a single busy handler for a particular +** [database connection] at any given moment. If another busy handler +** was defined (using [sqlite3_busy_handler()]) prior to calling +** this routine, that other busy handler is cleared.)^ +** +** See also: [PRAGMA busy_timeout] +*/ +SQLITE_API int sqlite3_busy_timeout(sqlite3*, int ms); + +/* +** CAPI3REF: Convenience Routines For Running Queries +** METHOD: sqlite3 +** +** This is a legacy interface that is preserved for backwards compatibility. +** Use of this interface is not recommended. +** +** Definition: A result table is memory data structure created by the +** [sqlite3_get_table()] interface. A result table records the +** complete query results from one or more queries. +** +** The table conceptually has a number of rows and columns. But +** these numbers are not part of the result table itself. These +** numbers are obtained separately. Let N be the number of rows +** and M be the number of columns. +** +** A result table is an array of pointers to zero-terminated UTF-8 strings. +** There are (N+1)*M elements in the array. The first M pointers point +** to zero-terminated strings that contain the names of the columns. +** The remaining entries all point to query results. NULL values result +** in NULL pointers. All other values are in their UTF-8 zero-terminated +** string representation as returned by [sqlite3_column_text()]. +** +** A result table might consist of one or more memory allocations. +** It is not safe to pass a result table directly to [sqlite3_free()]. +** A result table should be deallocated using [sqlite3_free_table()]. +** +** ^(As an example of the result table format, suppose a query result +** is as follows: +** +**
+**        Name        | Age
+**        -----------------------
+**        Alice       | 43
+**        Bob         | 28
+**        Cindy       | 21
+** 
+** +** There are two columns (M==2) and three rows (N==3). Thus the +** result table has 8 entries. Suppose the result table is stored +** in an array named azResult. Then azResult holds this content: +** +**
+**        azResult[0] = "Name";
+**        azResult[1] = "Age";
+**        azResult[2] = "Alice";
+**        azResult[3] = "43";
+**        azResult[4] = "Bob";
+**        azResult[5] = "28";
+**        azResult[6] = "Cindy";
+**        azResult[7] = "21";
+** 
)^ +** +** ^The sqlite3_get_table() function evaluates one or more +** semicolon-separated SQL statements in the zero-terminated UTF-8 +** string of its 2nd parameter and returns a result table to the +** pointer given in its 3rd parameter. +** +** After the application has finished with the result from sqlite3_get_table(), +** it must pass the result table pointer to sqlite3_free_table() in order to +** release the memory that was malloced. Because of the way the +** [sqlite3_malloc()] happens within sqlite3_get_table(), the calling +** function must not try to call [sqlite3_free()] directly. Only +** [sqlite3_free_table()] is able to release the memory properly and safely. +** +** The sqlite3_get_table() interface is implemented as a wrapper around +** [sqlite3_exec()]. The sqlite3_get_table() routine does not have access +** to any internal data structures of SQLite. It uses only the public +** interface defined here. As a consequence, errors that occur in the +** wrapper layer outside of the internal [sqlite3_exec()] call are not +** reflected in subsequent calls to [sqlite3_errcode()] or +** [sqlite3_errmsg()]. +*/ +SQLITE_API int sqlite3_get_table( + sqlite3 *db, /* An open database */ + const char *zSql, /* SQL to be evaluated */ + char ***pazResult, /* Results of the query */ + int *pnRow, /* Number of result rows written here */ + int *pnColumn, /* Number of result columns written here */ + char **pzErrmsg /* Error msg written here */ +); +SQLITE_API void sqlite3_free_table(char **result); + +/* +** CAPI3REF: Formatted String Printing Functions +** +** These routines are work-alikes of the "printf()" family of functions +** from the standard C library. +** These routines understand most of the common formatting options from +** the standard library printf() +** plus some additional non-standard formats ([%q], [%Q], [%w], and [%z]). +** See the [built-in printf()] documentation for details. +** +** ^The sqlite3_mprintf() and sqlite3_vmprintf() routines write their +** results into memory obtained from [sqlite3_malloc64()]. +** The strings returned by these two routines should be +** released by [sqlite3_free()]. ^Both routines return a +** NULL pointer if [sqlite3_malloc64()] is unable to allocate enough +** memory to hold the resulting string. +** +** ^(The sqlite3_snprintf() routine is similar to "snprintf()" from +** the standard C library. The result is written into the +** buffer supplied as the second parameter whose size is given by +** the first parameter. Note that the order of the +** first two parameters is reversed from snprintf().)^ This is an +** historical accident that cannot be fixed without breaking +** backwards compatibility. ^(Note also that sqlite3_snprintf() +** returns a pointer to its buffer instead of the number of +** characters actually written into the buffer.)^ We admit that +** the number of characters written would be a more useful return +** value but we cannot change the implementation of sqlite3_snprintf() +** now without breaking compatibility. +** +** ^As long as the buffer size is greater than zero, sqlite3_snprintf() +** guarantees that the buffer is always zero-terminated. ^The first +** parameter "n" is the total size of the buffer, including space for +** the zero terminator. So the longest string that can be completely +** written will be n-1 characters. +** +** ^The sqlite3_vsnprintf() routine is a varargs version of sqlite3_snprintf(). +** +** See also: [built-in printf()], [printf() SQL function] +*/ +SQLITE_API char *sqlite3_mprintf(const char*,...); +SQLITE_API char *sqlite3_vmprintf(const char*, va_list); +SQLITE_API char *sqlite3_snprintf(int,char*,const char*, ...); +SQLITE_API char *sqlite3_vsnprintf(int,char*,const char*, va_list); + +/* +** CAPI3REF: Memory Allocation Subsystem +** +** The SQLite core uses these three routines for all of its own +** internal memory allocation needs. "Core" in the previous sentence +** does not include operating-system specific [VFS] implementation. The +** Windows VFS uses native malloc() and free() for some operations. +** +** ^The sqlite3_malloc() routine returns a pointer to a block +** of memory at least N bytes in length, where N is the parameter. +** ^If sqlite3_malloc() is unable to obtain sufficient free +** memory, it returns a NULL pointer. ^If the parameter N to +** sqlite3_malloc() is zero or negative then sqlite3_malloc() returns +** a NULL pointer. +** +** ^The sqlite3_malloc64(N) routine works just like +** sqlite3_malloc(N) except that N is an unsigned 64-bit integer instead +** of a signed 32-bit integer. +** +** ^Calling sqlite3_free() with a pointer previously returned +** by sqlite3_malloc() or sqlite3_realloc() releases that memory so +** that it might be reused. ^The sqlite3_free() routine is +** a no-op if is called with a NULL pointer. Passing a NULL pointer +** to sqlite3_free() is harmless. After being freed, memory +** should neither be read nor written. Even reading previously freed +** memory might result in a segmentation fault or other severe error. +** Memory corruption, a segmentation fault, or other severe error +** might result if sqlite3_free() is called with a non-NULL pointer that +** was not obtained from sqlite3_malloc() or sqlite3_realloc(). +** +** ^The sqlite3_realloc(X,N) interface attempts to resize a +** prior memory allocation X to be at least N bytes. +** ^If the X parameter to sqlite3_realloc(X,N) +** is a NULL pointer then its behavior is identical to calling +** sqlite3_malloc(N). +** ^If the N parameter to sqlite3_realloc(X,N) is zero or +** negative then the behavior is exactly the same as calling +** sqlite3_free(X). +** ^sqlite3_realloc(X,N) returns a pointer to a memory allocation +** of at least N bytes in size or NULL if insufficient memory is available. +** ^If M is the size of the prior allocation, then min(N,M) bytes +** of the prior allocation are copied into the beginning of buffer returned +** by sqlite3_realloc(X,N) and the prior allocation is freed. +** ^If sqlite3_realloc(X,N) returns NULL and N is positive, then the +** prior allocation is not freed. +** +** ^The sqlite3_realloc64(X,N) interfaces works the same as +** sqlite3_realloc(X,N) except that N is a 64-bit unsigned integer instead +** of a 32-bit signed integer. +** +** ^If X is a memory allocation previously obtained from sqlite3_malloc(), +** sqlite3_malloc64(), sqlite3_realloc(), or sqlite3_realloc64(), then +** sqlite3_msize(X) returns the size of that memory allocation in bytes. +** ^The value returned by sqlite3_msize(X) might be larger than the number +** of bytes requested when X was allocated. ^If X is a NULL pointer then +** sqlite3_msize(X) returns zero. If X points to something that is not +** the beginning of memory allocation, or if it points to a formerly +** valid memory allocation that has now been freed, then the behavior +** of sqlite3_msize(X) is undefined and possibly harmful. +** +** ^The memory returned by sqlite3_malloc(), sqlite3_realloc(), +** sqlite3_malloc64(), and sqlite3_realloc64() +** is always aligned to at least an 8 byte boundary, or to a +** 4 byte boundary if the [SQLITE_4_BYTE_ALIGNED_MALLOC] compile-time +** option is used. +** +** The pointer arguments to [sqlite3_free()] and [sqlite3_realloc()] +** must be either NULL or else pointers obtained from a prior +** invocation of [sqlite3_malloc()] or [sqlite3_realloc()] that have +** not yet been released. +** +** The application must not read or write any part of +** a block of memory after it has been released using +** [sqlite3_free()] or [sqlite3_realloc()]. +*/ +SQLITE_API void *sqlite3_malloc(int); +SQLITE_API void *sqlite3_malloc64(sqlite3_uint64); +SQLITE_API void *sqlite3_realloc(void*, int); +SQLITE_API void *sqlite3_realloc64(void*, sqlite3_uint64); +SQLITE_API void sqlite3_free(void*); +SQLITE_API sqlite3_uint64 sqlite3_msize(void*); + +/* +** CAPI3REF: Memory Allocator Statistics +** +** SQLite provides these two interfaces for reporting on the status +** of the [sqlite3_malloc()], [sqlite3_free()], and [sqlite3_realloc()] +** routines, which form the built-in memory allocation subsystem. +** +** ^The [sqlite3_memory_used()] routine returns the number of bytes +** of memory currently outstanding (malloced but not freed). +** ^The [sqlite3_memory_highwater()] routine returns the maximum +** value of [sqlite3_memory_used()] since the high-water mark +** was last reset. ^The values returned by [sqlite3_memory_used()] and +** [sqlite3_memory_highwater()] include any overhead +** added by SQLite in its implementation of [sqlite3_malloc()], +** but not overhead added by the any underlying system library +** routines that [sqlite3_malloc()] may call. +** +** ^The memory high-water mark is reset to the current value of +** [sqlite3_memory_used()] if and only if the parameter to +** [sqlite3_memory_highwater()] is true. ^The value returned +** by [sqlite3_memory_highwater(1)] is the high-water mark +** prior to the reset. +*/ +SQLITE_API sqlite3_int64 sqlite3_memory_used(void); +SQLITE_API sqlite3_int64 sqlite3_memory_highwater(int resetFlag); + +/* +** CAPI3REF: Pseudo-Random Number Generator +** +** SQLite contains a high-quality pseudo-random number generator (PRNG) used to +** select random [ROWID | ROWIDs] when inserting new records into a table that +** already uses the largest possible [ROWID]. The PRNG is also used for +** the built-in random() and randomblob() SQL functions. This interface allows +** applications to access the same PRNG for other purposes. +** +** ^A call to this routine stores N bytes of randomness into buffer P. +** ^The P parameter can be a NULL pointer. +** +** ^If this routine has not been previously called or if the previous +** call had N less than one or a NULL pointer for P, then the PRNG is +** seeded using randomness obtained from the xRandomness method of +** the default [sqlite3_vfs] object. +** ^If the previous call to this routine had an N of 1 or more and a +** non-NULL P then the pseudo-randomness is generated +** internally and without recourse to the [sqlite3_vfs] xRandomness +** method. +*/ +SQLITE_API void sqlite3_randomness(int N, void *P); + +/* +** CAPI3REF: Compile-Time Authorization Callbacks +** METHOD: sqlite3 +** KEYWORDS: {authorizer callback} +** +** ^This routine registers an authorizer callback with a particular +** [database connection], supplied in the first argument. +** ^The authorizer callback is invoked as SQL statements are being compiled +** by [sqlite3_prepare()] or its variants [sqlite3_prepare_v2()], +** [sqlite3_prepare_v3()], [sqlite3_prepare16()], [sqlite3_prepare16_v2()], +** and [sqlite3_prepare16_v3()]. ^At various +** points during the compilation process, as logic is being created +** to perform various actions, the authorizer callback is invoked to +** see if those actions are allowed. ^The authorizer callback should +** return [SQLITE_OK] to allow the action, [SQLITE_IGNORE] to disallow the +** specific action but allow the SQL statement to continue to be +** compiled, or [SQLITE_DENY] to cause the entire SQL statement to be +** rejected with an error. ^If the authorizer callback returns +** any value other than [SQLITE_IGNORE], [SQLITE_OK], or [SQLITE_DENY] +** then the [sqlite3_prepare_v2()] or equivalent call that triggered +** the authorizer will fail with an error message. +** +** When the callback returns [SQLITE_OK], that means the operation +** requested is ok. ^When the callback returns [SQLITE_DENY], the +** [sqlite3_prepare_v2()] or equivalent call that triggered the +** authorizer will fail with an error message explaining that +** access is denied. +** +** ^The first parameter to the authorizer callback is a copy of the third +** parameter to the sqlite3_set_authorizer() interface. ^The second parameter +** to the callback is an integer [SQLITE_COPY | action code] that specifies +** the particular action to be authorized. ^The third through sixth parameters +** to the callback are either NULL pointers or zero-terminated strings +** that contain additional details about the action to be authorized. +** Applications must always be prepared to encounter a NULL pointer in any +** of the third through the sixth parameters of the authorization callback. +** +** ^If the action code is [SQLITE_READ] +** and the callback returns [SQLITE_IGNORE] then the +** [prepared statement] statement is constructed to substitute +** a NULL value in place of the table column that would have +** been read if [SQLITE_OK] had been returned. The [SQLITE_IGNORE] +** return can be used to deny an untrusted user access to individual +** columns of a table. +** ^When a table is referenced by a [SELECT] but no column values are +** extracted from that table (for example in a query like +** "SELECT count(*) FROM tab") then the [SQLITE_READ] authorizer callback +** is invoked once for that table with a column name that is an empty string. +** ^If the action code is [SQLITE_DELETE] and the callback returns +** [SQLITE_IGNORE] then the [DELETE] operation proceeds but the +** [truncate optimization] is disabled and all rows are deleted individually. +** +** An authorizer is used when [sqlite3_prepare | preparing] +** SQL statements from an untrusted source, to ensure that the SQL statements +** do not try to access data they are not allowed to see, or that they do not +** try to execute malicious statements that damage the database. For +** example, an application may allow a user to enter arbitrary +** SQL queries for evaluation by a database. But the application does +** not want the user to be able to make arbitrary changes to the +** database. An authorizer could then be put in place while the +** user-entered SQL is being [sqlite3_prepare | prepared] that +** disallows everything except [SELECT] statements. +** +** Applications that need to process SQL from untrusted sources +** might also consider lowering resource limits using [sqlite3_limit()] +** and limiting database size using the [max_page_count] [PRAGMA] +** in addition to using an authorizer. +** +** ^(Only a single authorizer can be in place on a database connection +** at a time. Each call to sqlite3_set_authorizer overrides the +** previous call.)^ ^Disable the authorizer by installing a NULL callback. +** The authorizer is disabled by default. +** +** The authorizer callback must not do anything that will modify +** the database connection that invoked the authorizer callback. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** ^When [sqlite3_prepare_v2()] is used to prepare a statement, the +** statement might be re-prepared during [sqlite3_step()] due to a +** schema change. Hence, the application should ensure that the +** correct authorizer callback remains in place during the [sqlite3_step()]. +** +** ^Note that the authorizer callback is invoked only during +** [sqlite3_prepare()] or its variants. Authorization is not +** performed during statement evaluation in [sqlite3_step()], unless +** as stated in the previous paragraph, sqlite3_step() invokes +** sqlite3_prepare_v2() to reprepare a statement after a schema change. +*/ +SQLITE_API int sqlite3_set_authorizer( + sqlite3*, + int (*xAuth)(void*,int,const char*,const char*,const char*,const char*), + void *pUserData +); + +/* +** CAPI3REF: Authorizer Return Codes +** +** The [sqlite3_set_authorizer | authorizer callback function] must +** return either [SQLITE_OK] or one of these two constants in order +** to signal SQLite whether or not the action is permitted. See the +** [sqlite3_set_authorizer | authorizer documentation] for additional +** information. +** +** Note that SQLITE_IGNORE is also used as a [conflict resolution mode] +** returned from the [sqlite3_vtab_on_conflict()] interface. +*/ +#define SQLITE_DENY 1 /* Abort the SQL statement with an error */ +#define SQLITE_IGNORE 2 /* Don't allow access, but don't generate an error */ + +/* +** CAPI3REF: Authorizer Action Codes +** +** The [sqlite3_set_authorizer()] interface registers a callback function +** that is invoked to authorize certain SQL statement actions. The +** second parameter to the callback is an integer code that specifies +** what action is being authorized. These are the integer action codes that +** the authorizer callback may be passed. +** +** These action code values signify what kind of operation is to be +** authorized. The 3rd and 4th parameters to the authorization +** callback function will be parameters or NULL depending on which of these +** codes is used as the second parameter. ^(The 5th parameter to the +** authorizer callback is the name of the database ("main", "temp", +** etc.) if applicable.)^ ^The 6th parameter to the authorizer callback +** is the name of the inner-most trigger or view that is responsible for +** the access attempt or NULL if this access attempt is directly from +** top-level SQL code. +*/ +/******************************************* 3rd ************ 4th ***********/ +#define SQLITE_CREATE_INDEX 1 /* Index Name Table Name */ +#define SQLITE_CREATE_TABLE 2 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_INDEX 3 /* Index Name Table Name */ +#define SQLITE_CREATE_TEMP_TABLE 4 /* Table Name NULL */ +#define SQLITE_CREATE_TEMP_TRIGGER 5 /* Trigger Name Table Name */ +#define SQLITE_CREATE_TEMP_VIEW 6 /* View Name NULL */ +#define SQLITE_CREATE_TRIGGER 7 /* Trigger Name Table Name */ +#define SQLITE_CREATE_VIEW 8 /* View Name NULL */ +#define SQLITE_DELETE 9 /* Table Name NULL */ +#define SQLITE_DROP_INDEX 10 /* Index Name Table Name */ +#define SQLITE_DROP_TABLE 11 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_INDEX 12 /* Index Name Table Name */ +#define SQLITE_DROP_TEMP_TABLE 13 /* Table Name NULL */ +#define SQLITE_DROP_TEMP_TRIGGER 14 /* Trigger Name Table Name */ +#define SQLITE_DROP_TEMP_VIEW 15 /* View Name NULL */ +#define SQLITE_DROP_TRIGGER 16 /* Trigger Name Table Name */ +#define SQLITE_DROP_VIEW 17 /* View Name NULL */ +#define SQLITE_INSERT 18 /* Table Name NULL */ +#define SQLITE_PRAGMA 19 /* Pragma Name 1st arg or NULL */ +#define SQLITE_READ 20 /* Table Name Column Name */ +#define SQLITE_SELECT 21 /* NULL NULL */ +#define SQLITE_TRANSACTION 22 /* Operation NULL */ +#define SQLITE_UPDATE 23 /* Table Name Column Name */ +#define SQLITE_ATTACH 24 /* Filename NULL */ +#define SQLITE_DETACH 25 /* Database Name NULL */ +#define SQLITE_ALTER_TABLE 26 /* Database Name Table Name */ +#define SQLITE_REINDEX 27 /* Index Name NULL */ +#define SQLITE_ANALYZE 28 /* Table Name NULL */ +#define SQLITE_CREATE_VTABLE 29 /* Table Name Module Name */ +#define SQLITE_DROP_VTABLE 30 /* Table Name Module Name */ +#define SQLITE_FUNCTION 31 /* NULL Function Name */ +#define SQLITE_SAVEPOINT 32 /* Operation Savepoint Name */ +#define SQLITE_COPY 0 /* No longer used */ +#define SQLITE_RECURSIVE 33 /* NULL NULL */ + +/* +** CAPI3REF: Tracing And Profiling Functions +** METHOD: sqlite3 +** +** These routines are deprecated. Use the [sqlite3_trace_v2()] interface +** instead of the routines described here. +** +** These routines register callback functions that can be used for +** tracing and profiling the execution of SQL statements. +** +** ^The callback function registered by sqlite3_trace() is invoked at +** various times when an SQL statement is being run by [sqlite3_step()]. +** ^The sqlite3_trace() callback is invoked with a UTF-8 rendering of the +** SQL statement text as the statement first begins executing. +** ^(Additional sqlite3_trace() callbacks might occur +** as each triggered subprogram is entered. The callbacks for triggers +** contain a UTF-8 SQL comment that identifies the trigger.)^ +** +** The [SQLITE_TRACE_SIZE_LIMIT] compile-time option can be used to limit +** the length of [bound parameter] expansion in the output of sqlite3_trace(). +** +** ^The callback function registered by sqlite3_profile() is invoked +** as each SQL statement finishes. ^The profile callback contains +** the original statement text and an estimate of wall-clock time +** of how long that statement took to run. ^The profile callback +** time is in units of nanoseconds, however the current implementation +** is only capable of millisecond resolution so the six least significant +** digits in the time are meaningless. Future versions of SQLite +** might provide greater resolution on the profiler callback. Invoking +** either [sqlite3_trace()] or [sqlite3_trace_v2()] will cancel the +** profile callback. +*/ +SQLITE_API SQLITE_DEPRECATED void *sqlite3_trace(sqlite3*, + void(*xTrace)(void*,const char*), void*); +SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, + void(*xProfile)(void*,const char*,sqlite3_uint64), void*); + +/* +** CAPI3REF: SQL Trace Event Codes +** KEYWORDS: SQLITE_TRACE +** +** These constants identify classes of events that can be monitored +** using the [sqlite3_trace_v2()] tracing logic. The M argument +** to [sqlite3_trace_v2(D,M,X,P)] is an OR-ed combination of one or more of +** the following constants. ^The first argument to the trace callback +** is one of the following constants. +** +** New tracing constants may be added in future releases. +** +** ^A trace callback has four arguments: xCallback(T,C,P,X). +** ^The T argument is one of the integer type codes above. +** ^The C argument is a copy of the context pointer passed in as the +** fourth argument to [sqlite3_trace_v2()]. +** The P and X arguments are pointers whose meanings depend on T. +** +**
+** [[SQLITE_TRACE_STMT]]
SQLITE_TRACE_STMT
+**
^An SQLITE_TRACE_STMT callback is invoked when a prepared statement +** first begins running and possibly at other times during the +** execution of the prepared statement, such as at the start of each +** trigger subprogram. ^The P argument is a pointer to the +** [prepared statement]. ^The X argument is a pointer to a string which +** is the unexpanded SQL text of the prepared statement or an SQL comment +** that indicates the invocation of a trigger. ^The callback can compute +** the same text that would have been returned by the legacy [sqlite3_trace()] +** interface by using the X argument when X begins with "--" and invoking +** [sqlite3_expanded_sql(P)] otherwise. +** +** [[SQLITE_TRACE_PROFILE]]
SQLITE_TRACE_PROFILE
+**
^An SQLITE_TRACE_PROFILE callback provides approximately the same +** information as is provided by the [sqlite3_profile()] callback. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. +** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. +** +** [[SQLITE_TRACE_ROW]]
SQLITE_TRACE_ROW
+**
^An SQLITE_TRACE_ROW callback is invoked whenever a prepared +** statement generates a single row of result. +** ^The P argument is a pointer to the [prepared statement] and the +** X argument is unused. +** +** [[SQLITE_TRACE_CLOSE]]
SQLITE_TRACE_CLOSE
+**
^An SQLITE_TRACE_CLOSE callback is invoked when a database +** connection closes. +** ^The P argument is a pointer to the [database connection] object +** and the X argument is unused. +**
+*/ +#define SQLITE_TRACE_STMT 0x01 +#define SQLITE_TRACE_PROFILE 0x02 +#define SQLITE_TRACE_ROW 0x04 +#define SQLITE_TRACE_CLOSE 0x08 + +/* +** CAPI3REF: SQL Trace Hook +** METHOD: sqlite3 +** +** ^The sqlite3_trace_v2(D,M,X,P) interface registers a trace callback +** function X against [database connection] D, using property mask M +** and context pointer P. ^If the X callback is +** NULL or if the M mask is zero, then tracing is disabled. The +** M argument should be the bitwise OR-ed combination of +** zero or more [SQLITE_TRACE] constants. +** +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. +** +** ^The X callback is invoked whenever any of the events identified by +** mask M occur. ^The integer return value from the callback is currently +** ignored, though this may change in future releases. Callback +** implementations should return zero to ensure future compatibility. +** +** ^A trace callback is invoked with four arguments: callback(T,C,P,X). +** ^The T argument is one of the [SQLITE_TRACE] +** constants to indicate why the callback was invoked. +** ^The C argument is a copy of the context pointer. +** The P and X arguments are pointers whose meanings depend on T. +** +** The sqlite3_trace_v2() interface is intended to replace the legacy +** interfaces [sqlite3_trace()] and [sqlite3_profile()], both of which +** are deprecated. +*/ +SQLITE_API int sqlite3_trace_v2( + sqlite3*, + unsigned uMask, + int(*xCallback)(unsigned,void*,void*,void*), + void *pCtx +); + +/* +** CAPI3REF: Query Progress Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback +** function X to be invoked periodically during long running calls to +** [sqlite3_step()] and [sqlite3_prepare()] and similar for +** database connection D. An example use for this +** interface is to keep a GUI updated during a large query. +** +** ^The parameter P is passed through as the only parameter to the +** callback function X. ^The parameter N is the approximate number of +** [virtual machine instructions] that are evaluated between successive +** invocations of the callback X. ^If N is less than one then the progress +** handler is disabled. +** +** ^Only a single progress handler may be defined at one time per +** [database connection]; setting a new progress handler cancels the +** old one. ^Setting parameter X to NULL disables the progress handler. +** ^The progress handler is also disabled by setting N to a value less +** than 1. +** +** ^If the progress callback returns non-zero, the operation is +** interrupted. This feature can be used to implement a +** "Cancel" button on a GUI progress dialog box. +** +** The progress handler callback must not do anything that will modify +** the database connection that invoked the progress handler. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. +*/ +SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); + +/* +** CAPI3REF: Opening A New Database Connection +** CONSTRUCTOR: sqlite3 +** +** ^These routines open an SQLite database file as specified by the +** filename argument. ^The filename argument is interpreted as UTF-8 for +** sqlite3_open() and sqlite3_open_v2() and as UTF-16 in the native byte +** order for sqlite3_open16(). ^(A [database connection] handle is usually +** returned in *ppDb, even if an error occurs. The only exception is that +** if SQLite is unable to allocate memory to hold the [sqlite3] object, +** a NULL will be written into *ppDb instead of a pointer to the [sqlite3] +** object.)^ ^(If the database is opened (and/or created) successfully, then +** [SQLITE_OK] is returned. Otherwise an [error code] is returned.)^ ^The +** [sqlite3_errmsg()] or [sqlite3_errmsg16()] routines can be used to obtain +** an English language description of the error following a failure of any +** of the sqlite3_open() routines. +** +** ^The default encoding will be UTF-8 for databases created using +** sqlite3_open() or sqlite3_open_v2(). ^The default encoding for databases +** created using sqlite3_open16() will be UTF-16 in the native byte order. +** +** Whether or not an error occurs when it is opened, resources +** associated with the [database connection] handle should be released by +** passing it to [sqlite3_close()] when it is no longer required. +** +** The sqlite3_open_v2() interface works like sqlite3_open() +** except that it accepts two additional parameters for additional control +** over the new database connection. ^(The flags parameter to +** sqlite3_open_v2() must include, at a minimum, one of the following +** three flag combinations:)^ +** +**
+** ^(
[SQLITE_OPEN_READONLY]
+**
The database is opened in read-only mode. If the database does +** not already exist, an error is returned.
)^ +** +** ^(
[SQLITE_OPEN_READWRITE]
+**
The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.
)^ +** +** ^(
[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
+**
The database is opened for reading and writing, and is created if +** it does not already exist. This is the behavior that is always used for +** sqlite3_open() and sqlite3_open16().
)^ +**
+** +** In addition to the required flags, the following optional flags are +** also supported: +** +**
+** ^(
[SQLITE_OPEN_URI]
+**
The filename can be interpreted as a URI if this flag is set.
)^ +** +** ^(
[SQLITE_OPEN_MEMORY]
+**
The database will be opened as an in-memory database. The database +** is named by the "filename" argument for the purposes of cache-sharing, +** if shared cache mode is enabled, but the "filename" is otherwise ignored. +**
)^ +** +** ^(
[SQLITE_OPEN_NOMUTEX]
+**
The new database connection will use the "multi-thread" +** [threading mode].)^ This means that separate threads are allowed +** to use SQLite at the same time, as long as each thread is using +** a different [database connection]. +** +** ^(
[SQLITE_OPEN_FULLMUTEX]
+**
The new database connection will use the "serialized" +** [threading mode].)^ This means the multiple threads can safely +** attempt to use the same database connection at the same time. +** (Mutexes will block any actual concurrency, but in this mode +** there is no harm in trying.) +** +** ^(
[SQLITE_OPEN_SHAREDCACHE]
+**
The database is opened [shared cache] enabled, overriding +** the default shared cache setting provided by +** [sqlite3_enable_shared_cache()].)^ +** The [use of shared cache mode is discouraged] and hence shared cache +** capabilities may be omitted from many builds of SQLite. In such cases, +** this option is a no-op. +** +** ^(
[SQLITE_OPEN_PRIVATECACHE]
+**
The database is opened [shared cache] disabled, overriding +** the default shared cache setting provided by +** [sqlite3_enable_shared_cache()].)^ +** +** [[OPEN_EXRESCODE]] ^(
[SQLITE_OPEN_EXRESCODE]
+**
The database connection comes up in "extended result code mode". +** In other words, the database behaves has if +** [sqlite3_extended_result_codes(db,1)] where called on the database +** connection as soon as the connection is created. In addition to setting +** the extended result code mode, this flag also causes [sqlite3_open_v2()] +** to return an extended result code.
+** +** [[OPEN_NOFOLLOW]] ^(
[SQLITE_OPEN_NOFOLLOW]
+**
The database filename is not allowed to contain a symbolic link
+**
)^ +** +** If the 3rd parameter to sqlite3_open_v2() is not one of the +** required combinations shown above optionally combined with other +** [SQLITE_OPEN_READONLY | SQLITE_OPEN_* bits] +** then the behavior is undefined. Historic versions of SQLite +** have silently ignored surplus bits in the flags parameter to +** sqlite3_open_v2(), however that behavior might not be carried through +** into future versions of SQLite and so applications should not rely +** upon it. Note in particular that the SQLITE_OPEN_EXCLUSIVE flag is a no-op +** for sqlite3_open_v2(). The SQLITE_OPEN_EXCLUSIVE does *not* cause +** the open to fail if the database already exists. The SQLITE_OPEN_EXCLUSIVE +** flag is intended for use by the [sqlite3_vfs|VFS interface] only, and not +** by sqlite3_open_v2(). +** +** ^The fourth parameter to sqlite3_open_v2() is the name of the +** [sqlite3_vfs] object that defines the operating system interface that +** the new database connection should use. ^If the fourth parameter is +** a NULL pointer then the default [sqlite3_vfs] object is used. +** +** ^If the filename is ":memory:", then a private, temporary in-memory database +** is created for the connection. ^This in-memory database will vanish when +** the database connection is closed. Future versions of SQLite might +** make use of additional special filenames that begin with the ":" character. +** It is recommended that when a database filename actually does begin with +** a ":" character you should prefix the filename with a pathname such as +** "./" to avoid ambiguity. +** +** ^If the filename is an empty string, then a private, temporary +** on-disk database will be created. ^This private database will be +** automatically deleted as soon as the database connection is closed. +** +** [[URI filenames in sqlite3_open()]]

URI Filenames

+** +** ^If [URI filename] interpretation is enabled, and the filename argument +** begins with "file:", then the filename is interpreted as a URI. ^URI +** filename interpretation is enabled if the [SQLITE_OPEN_URI] flag is +** set in the third argument to sqlite3_open_v2(), or if it has +** been enabled globally using the [SQLITE_CONFIG_URI] option with the +** [sqlite3_config()] method or by the [SQLITE_USE_URI] compile-time option. +** URI filename interpretation is turned off +** by default, but future releases of SQLite might enable URI filename +** interpretation by default. See "[URI filenames]" for additional +** information. +** +** URI filenames are parsed according to RFC 3986. ^If the URI contains an +** authority, then it must be either an empty string or the string +** "localhost". ^If the authority is not an empty string or "localhost", an +** error is returned to the caller. ^The fragment component of a URI, if +** present, is ignored. +** +** ^SQLite uses the path component of the URI as the name of the disk file +** which contains the database. ^If the path begins with a '/' character, +** then it is interpreted as an absolute path. ^If the path does not begin +** with a '/' (meaning that the authority section is omitted from the URI) +** then the path is interpreted as a relative path. +** ^(On windows, the first component of an absolute path +** is a drive specification (e.g. "C:").)^ +** +** [[core URI query parameters]] +** The query component of a URI may contain parameters that are interpreted +** either by SQLite itself, or by a [VFS | custom VFS implementation]. +** SQLite and its built-in [VFSes] interpret the +** following query parameters: +** +**
    +**
  • vfs: ^The "vfs" parameter may be used to specify the name of +** a VFS object that provides the operating system interface that should +** be used to access the database file on disk. ^If this option is set to +** an empty string the default VFS object is used. ^Specifying an unknown +** VFS is an error. ^If sqlite3_open_v2() is used and the vfs option is +** present, then the VFS specified by the option takes precedence over +** the value passed as the fourth parameter to sqlite3_open_v2(). +** +**
  • mode: ^(The mode parameter may be set to either "ro", "rw", +** "rwc", or "memory". Attempting to set it to any other value is +** an error)^. +** ^If "ro" is specified, then the database is opened for read-only +** access, just as if the [SQLITE_OPEN_READONLY] flag had been set in the +** third argument to sqlite3_open_v2(). ^If the mode option is set to +** "rw", then the database is opened for read-write (but not create) +** access, as if SQLITE_OPEN_READWRITE (but not SQLITE_OPEN_CREATE) had +** been set. ^Value "rwc" is equivalent to setting both +** SQLITE_OPEN_READWRITE and SQLITE_OPEN_CREATE. ^If the mode option is +** set to "memory" then a pure [in-memory database] that never reads +** or writes from disk is used. ^It is an error to specify a value for +** the mode parameter that is less restrictive than that specified by +** the flags passed in the third parameter to sqlite3_open_v2(). +** +**
  • cache: ^The cache parameter may be set to either "shared" or +** "private". ^Setting it to "shared" is equivalent to setting the +** SQLITE_OPEN_SHAREDCACHE bit in the flags argument passed to +** sqlite3_open_v2(). ^Setting the cache parameter to "private" is +** equivalent to setting the SQLITE_OPEN_PRIVATECACHE bit. +** ^If sqlite3_open_v2() is used and the "cache" parameter is present in +** a URI filename, its value overrides any behavior requested by setting +** SQLITE_OPEN_PRIVATECACHE or SQLITE_OPEN_SHAREDCACHE flag. +** +**
  • psow: ^The psow parameter indicates whether or not the +** [powersafe overwrite] property does or does not apply to the +** storage media on which the database file resides. +** +**
  • nolock: ^The nolock parameter is a boolean query parameter +** which if set disables file locking in rollback journal modes. This +** is useful for accessing a database on a filesystem that does not +** support locking. Caution: Database corruption might result if two +** or more processes write to the same database and any one of those +** processes uses nolock=1. +** +**
  • immutable: ^The immutable parameter is a boolean query +** parameter that indicates that the database file is stored on +** read-only media. ^When immutable is set, SQLite assumes that the +** database file cannot be changed, even by a process with higher +** privilege, and so the database is opened read-only and all locking +** and change detection is disabled. Caution: Setting the immutable +** property on a database file that does in fact change can result +** in incorrect query results and/or [SQLITE_CORRUPT] errors. +** See also: [SQLITE_IOCAP_IMMUTABLE]. +** +**
+** +** ^Specifying an unknown parameter in the query component of a URI is not an +** error. Future versions of SQLite might understand additional query +** parameters. See "[query parameters with special meaning to SQLite]" for +** additional information. +** +** [[URI filename examples]]

URI filename examples

+** +** +**
URI filenames Results +**
file:data.db +** Open the file "data.db" in the current directory. +**
file:/home/fred/data.db
+** file:///home/fred/data.db
+** file://localhost/home/fred/data.db
+** Open the database file "/home/fred/data.db". +**
file://darkstar/home/fred/data.db +** An error. "darkstar" is not a recognized authority. +**
+** file:///C:/Documents%20and%20Settings/fred/Desktop/data.db +** Windows only: Open the file "data.db" on fred's desktop on drive +** C:. Note that the %20 escaping in this example is not strictly +** necessary - space characters can be used literally +** in URI filenames. +**
file:data.db?mode=ro&cache=private +** Open file "data.db" in the current directory for read-only access. +** Regardless of whether or not shared-cache mode is enabled by +** default, use a private cache. +**
file:/home/fred/data.db?vfs=unix-dotfile +** Open file "/home/fred/data.db". Use the special VFS "unix-dotfile" +** that uses dot-files in place of posix advisory locking. +**
file:data.db?mode=readonly +** An error. "readonly" is not a valid option for the "mode" parameter. +** Use "ro" instead: "file:data.db?mode=ro". +**
+** +** ^URI hexadecimal escape sequences (%HH) are supported within the path and +** query components of a URI. A hexadecimal escape sequence consists of a +** percent sign - "%" - followed by exactly two hexadecimal digits +** specifying an octet value. ^Before the path or query components of a +** URI filename are interpreted, they are encoded using UTF-8 and all +** hexadecimal escape sequences replaced by a single byte containing the +** corresponding octet. If this process generates an invalid UTF-8 encoding, +** the results are undefined. +** +** Note to Windows users: The encoding used for the filename argument +** of sqlite3_open() and sqlite3_open_v2() must be UTF-8, not whatever +** codepage is currently defined. Filenames containing international +** characters must be converted to UTF-8 prior to passing them into +** sqlite3_open() or sqlite3_open_v2(). +** +** Note to Windows Runtime users: The temporary directory must be set +** prior to calling sqlite3_open() or sqlite3_open_v2(). Otherwise, various +** features that require the use of temporary files may fail. +** +** See also: [sqlite3_temp_directory] +*/ +SQLITE_API int sqlite3_open( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +SQLITE_API int sqlite3_open16( + const void *filename, /* Database filename (UTF-16) */ + sqlite3 **ppDb /* OUT: SQLite db handle */ +); +SQLITE_API int sqlite3_open_v2( + const char *filename, /* Database filename (UTF-8) */ + sqlite3 **ppDb, /* OUT: SQLite db handle */ + int flags, /* Flags */ + const char *zVfs /* Name of VFS module to use */ +); + +/* +** CAPI3REF: Obtain Values For URI Parameters +** +** These are utility routines, useful to [VFS|custom VFS implementations], +** that check if a database file was a URI that contained a specific query +** parameter, and if so obtains the value of that query parameter. +** +** The first parameter to these interfaces (hereafter referred to +** as F) must be one of: +**
    +**
  • A database filename pointer created by the SQLite core and +** passed into the xOpen() method of a VFS implementation, or +**
  • A filename obtained from [sqlite3_db_filename()], or +**
  • A new filename constructed using [sqlite3_create_filename()]. +**
+** If the F parameter is not one of the above, then the behavior is +** undefined and probably undesirable. Older versions of SQLite were +** more tolerant of invalid F parameters than newer versions. +** +** If F is a suitable filename (as described in the previous paragraph) +** and if P is the name of the query parameter, then +** sqlite3_uri_parameter(F,P) returns the value of the P +** parameter if it exists or a NULL pointer if P does not appear as a +** query parameter on F. If P is a query parameter of F and it +** has no explicit value, then sqlite3_uri_parameter(F,P) returns +** a pointer to an empty string. +** +** The sqlite3_uri_boolean(F,P,B) routine assumes that P is a boolean +** parameter and returns true (1) or false (0) according to the value +** of P. The sqlite3_uri_boolean(F,P,B) routine returns true (1) if the +** value of query parameter P is one of "yes", "true", or "on" in any +** case or if the value begins with a non-zero number. The +** sqlite3_uri_boolean(F,P,B) routines returns false (0) if the value of +** query parameter P is one of "no", "false", or "off" in any case or +** if the value begins with a numeric zero. If P is not a query +** parameter on F or if the value of P does not match any of the +** above, then sqlite3_uri_boolean(F,P,B) returns (B!=0). +** +** The sqlite3_uri_int64(F,P,D) routine converts the value of P into a +** 64-bit signed integer and returns that integer, or D if P does not +** exist. If the value of P is something other than an integer, then +** zero is returned. +** +** The sqlite3_uri_key(F,N) returns a pointer to the name (not +** the value) of the N-th query parameter for filename F, or a NULL +** pointer if N is less than zero or greater than the number of query +** parameters minus 1. The N value is zero-based so N should be 0 to obtain +** the name of the first query parameter, 1 for the second parameter, and +** so forth. +** +** If F is a NULL pointer, then sqlite3_uri_parameter(F,P) returns NULL and +** sqlite3_uri_boolean(F,P,B) returns B. If F is not a NULL pointer and +** is not a database file pathname pointer that the SQLite core passed +** into the xOpen VFS method, then the behavior of this routine is undefined +** and probably undesirable. +** +** Beginning with SQLite [version 3.31.0] ([dateof:3.31.0]) the input F +** parameter can also be the name of a rollback journal file or WAL file +** in addition to the main database file. Prior to version 3.31.0, these +** routines would only work if F was the name of the main database file. +** When the F parameter is the name of the rollback journal or WAL file, +** it has access to all the same query parameters as were found on the +** main database file. +** +** See the [URI filename] documentation for additional information. +*/ +SQLITE_API const char *sqlite3_uri_parameter(sqlite3_filename z, const char *zParam); +SQLITE_API int sqlite3_uri_boolean(sqlite3_filename z, const char *zParam, int bDefault); +SQLITE_API sqlite3_int64 sqlite3_uri_int64(sqlite3_filename, const char*, sqlite3_int64); +SQLITE_API const char *sqlite3_uri_key(sqlite3_filename z, int N); + +/* +** CAPI3REF: Translate filenames +** +** These routines are available to [VFS|custom VFS implementations] for +** translating filenames between the main database file, the journal file, +** and the WAL file. +** +** If F is the name of an sqlite database file, journal file, or WAL file +** passed by the SQLite core into the VFS, then sqlite3_filename_database(F) +** returns the name of the corresponding database file. +** +** If F is the name of an sqlite database file, journal file, or WAL file +** passed by the SQLite core into the VFS, or if F is a database filename +** obtained from [sqlite3_db_filename()], then sqlite3_filename_journal(F) +** returns the name of the corresponding rollback journal file. +** +** If F is the name of an sqlite database file, journal file, or WAL file +** that was passed by the SQLite core into the VFS, or if F is a database +** filename obtained from [sqlite3_db_filename()], then +** sqlite3_filename_wal(F) returns the name of the corresponding +** WAL file. +** +** In all of the above, if F is not the name of a database, journal or WAL +** filename passed into the VFS from the SQLite core and F is not the +** return value from [sqlite3_db_filename()], then the result is +** undefined and is likely a memory access violation. +*/ +SQLITE_API const char *sqlite3_filename_database(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_journal(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_wal(sqlite3_filename); + +/* +** CAPI3REF: Database File Corresponding To A Journal +** +** ^If X is the name of a rollback or WAL-mode journal file that is +** passed into the xOpen method of [sqlite3_vfs], then +** sqlite3_database_file_object(X) returns a pointer to the [sqlite3_file] +** object that represents the main database file. +** +** This routine is intended for use in custom [VFS] implementations +** only. It is not a general-purpose interface. +** The argument sqlite3_file_object(X) must be a filename pointer that +** has been passed into [sqlite3_vfs].xOpen method where the +** flags parameter to xOpen contains one of the bits +** [SQLITE_OPEN_MAIN_JOURNAL] or [SQLITE_OPEN_WAL]. Any other use +** of this routine results in undefined and probably undesirable +** behavior. +*/ +SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); + +/* +** CAPI3REF: Create and Destroy VFS Filenames +** +** These interfaces are provided for use by [VFS shim] implementations and +** are not useful outside of that context. +** +** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of +** database filename D with corresponding journal file J and WAL file W and +** with N URI parameters key/values pairs in the array P. The result from +** sqlite3_create_filename(D,J,W,N,P) is a pointer to a database filename that +** is safe to pass to routines like: +**
    +**
  • [sqlite3_uri_parameter()], +**
  • [sqlite3_uri_boolean()], +**
  • [sqlite3_uri_int64()], +**
  • [sqlite3_uri_key()], +**
  • [sqlite3_filename_database()], +**
  • [sqlite3_filename_journal()], or +**
  • [sqlite3_filename_wal()]. +**
+** If a memory allocation error occurs, sqlite3_create_filename() might +** return a NULL pointer. The memory obtained from sqlite3_create_filename(X) +** must be released by a corresponding call to sqlite3_free_filename(Y). +** +** The P parameter in sqlite3_create_filename(D,J,W,N,P) should be an array +** of 2*N pointers to strings. Each pair of pointers in this array corresponds +** to a key and value for a query parameter. The P parameter may be a NULL +** pointer if N is zero. None of the 2*N pointers in the P array may be +** NULL pointers and key pointers should not be empty strings. +** None of the D, J, or W parameters to sqlite3_create_filename(D,J,W,N,P) may +** be NULL pointers, though they can be empty strings. +** +** The sqlite3_free_filename(Y) routine releases a memory allocation +** previously obtained from sqlite3_create_filename(). Invoking +** sqlite3_free_filename(Y) where Y is a NULL pointer is a harmless no-op. +** +** If the Y parameter to sqlite3_free_filename(Y) is anything other +** than a NULL pointer or a pointer previously acquired from +** sqlite3_create_filename(), then bad things such as heap +** corruption or segfaults may occur. The value Y should not be +** used again after sqlite3_free_filename(Y) has been called. This means +** that if the [sqlite3_vfs.xOpen()] method of a VFS has been called using Y, +** then the corresponding [sqlite3_module.xClose() method should also be +** invoked prior to calling sqlite3_free_filename(Y). +*/ +SQLITE_API sqlite3_filename sqlite3_create_filename( + const char *zDatabase, + const char *zJournal, + const char *zWal, + int nParam, + const char **azParam +); +SQLITE_API void sqlite3_free_filename(sqlite3_filename); + +/* +** CAPI3REF: Error Codes And Messages +** METHOD: sqlite3 +** +** ^If the most recent sqlite3_* API call associated with +** [database connection] D failed, then the sqlite3_errcode(D) interface +** returns the numeric [result code] or [extended result code] for that +** API call. +** ^The sqlite3_extended_errcode() +** interface is the same except that it always returns the +** [extended result code] even when extended result codes are +** disabled. +** +** The values returned by sqlite3_errcode() and/or +** sqlite3_extended_errcode() might change with each API call. +** Except, there are some interfaces that are guaranteed to never +** change the value of the error code. The error-code preserving +** interfaces include the following: +** +**
    +**
  • sqlite3_errcode() +**
  • sqlite3_extended_errcode() +**
  • sqlite3_errmsg() +**
  • sqlite3_errmsg16() +**
  • sqlite3_error_offset() +**
+** +** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) +** ^(Memory to hold the error message string is managed internally. +** The application does not need to worry about freeing the result. +** However, the error string might be overwritten or deallocated by +** subsequent calls to other SQLite interface functions.)^ +** +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. +** ^(Memory to hold the error message string is managed internally +** and must not be freed by the application)^. +** +** ^If the most recent error references a specific token in the input +** SQL, the sqlite3_error_offset() interface returns the byte offset +** of the start of that token. ^The byte offset returned by +** sqlite3_error_offset() assumes that the input SQL is UTF8. +** ^If the most recent error does not reference a specific token in the input +** SQL, then the sqlite3_error_offset() function returns -1. +** +** When the serialized [threading mode] is in use, it might be the +** case that a second error occurs on a separate thread in between +** the time of the first error and the call to these interfaces. +** When that happens, the second error will be reported since these +** interfaces always report the most recent result. To avoid +** this, each thread can obtain exclusive use of the [database connection] D +** by invoking [sqlite3_mutex_enter]([sqlite3_db_mutex](D)) before beginning +** to use D and invoking [sqlite3_mutex_leave]([sqlite3_db_mutex](D)) after +** all calls to the interfaces listed here are completed. +** +** If an interface fails with SQLITE_MISUSE, that means the interface +** was invoked incorrectly by the application. In that case, the +** error code and message may or may not be set. +*/ +SQLITE_API int sqlite3_errcode(sqlite3 *db); +SQLITE_API int sqlite3_extended_errcode(sqlite3 *db); +SQLITE_API const char *sqlite3_errmsg(sqlite3*); +SQLITE_API const void *sqlite3_errmsg16(sqlite3*); +SQLITE_API const char *sqlite3_errstr(int); +SQLITE_API int sqlite3_error_offset(sqlite3 *db); + +/* +** CAPI3REF: Prepared Statement Object +** KEYWORDS: {prepared statement} {prepared statements} +** +** An instance of this object represents a single SQL statement that +** has been compiled into binary form and is ready to be evaluated. +** +** Think of each SQL statement as a separate computer program. The +** original SQL text is source code. A prepared statement object +** is the compiled object code. All SQL must be converted into a +** prepared statement before it can be run. +** +** The life-cycle of a prepared statement object usually goes like this: +** +**
    +**
  1. Create the prepared statement object using [sqlite3_prepare_v2()]. +**
  2. Bind values to [parameters] using the sqlite3_bind_*() +** interfaces. +**
  3. Run the SQL by calling [sqlite3_step()] one or more times. +**
  4. Reset the prepared statement using [sqlite3_reset()] then go back +** to step 2. Do this zero or more times. +**
  5. Destroy the object using [sqlite3_finalize()]. +**
+*/ +typedef struct sqlite3_stmt sqlite3_stmt; + +/* +** CAPI3REF: Run-time Limits +** METHOD: sqlite3 +** +** ^(This interface allows the size of various constructs to be limited +** on a connection by connection basis. The first parameter is the +** [database connection] whose limit is to be set or queried. The +** second parameter is one of the [limit categories] that define a +** class of constructs to be size limited. The third parameter is the +** new limit for that construct.)^ +** +** ^If the new limit is a negative number, the limit is unchanged. +** ^(For each limit category SQLITE_LIMIT_NAME there is a +** [limits | hard upper bound] +** set at compile-time by a C preprocessor macro called +** [limits | SQLITE_MAX_NAME]. +** (The "_LIMIT_" in the name is changed to "_MAX_".))^ +** ^Attempts to increase a limit above its hard upper bound are +** silently truncated to the hard upper bound. +** +** ^Regardless of whether or not the limit was changed, the +** [sqlite3_limit()] interface returns the prior value of the limit. +** ^Hence, to find the current value of a limit without changing it, +** simply invoke this interface with the third parameter set to -1. +** +** Run-time limits are intended for use in applications that manage +** both their own internal database and also databases that are controlled +** by untrusted external sources. An example application might be a +** web browser that has its own databases for storing history and +** separate databases controlled by JavaScript applications downloaded +** off the Internet. The internal databases can be given the +** large, default limits. Databases managed by external sources can +** be given much smaller limits designed to prevent a denial of service +** attack. Developers might also want to use the [sqlite3_set_authorizer()] +** interface to further control untrusted SQL. The size of the database +** created by an untrusted script can be contained using the +** [max_page_count] [PRAGMA]. +** +** New run-time limit categories may be added in future releases. +*/ +SQLITE_API int sqlite3_limit(sqlite3*, int id, int newVal); + +/* +** CAPI3REF: Run-Time Limit Categories +** KEYWORDS: {limit category} {*limit categories} +** +** These constants define various performance limits +** that can be lowered at run-time using [sqlite3_limit()]. +** The synopsis of the meanings of the various limits is shown below. +** Additional information is available at [limits | Limits in SQLite]. +** +**
+** [[SQLITE_LIMIT_LENGTH]] ^(
SQLITE_LIMIT_LENGTH
+**
The maximum size of any string or BLOB or table row, in bytes.
)^ +** +** [[SQLITE_LIMIT_SQL_LENGTH]] ^(
SQLITE_LIMIT_SQL_LENGTH
+**
The maximum length of an SQL statement, in bytes.
)^ +** +** [[SQLITE_LIMIT_COLUMN]] ^(
SQLITE_LIMIT_COLUMN
+**
The maximum number of columns in a table definition or in the +** result set of a [SELECT] or the maximum number of columns in an index +** or in an ORDER BY or GROUP BY clause.
)^ +** +** [[SQLITE_LIMIT_EXPR_DEPTH]] ^(
SQLITE_LIMIT_EXPR_DEPTH
+**
The maximum depth of the parse tree on any expression.
)^ +** +** [[SQLITE_LIMIT_COMPOUND_SELECT]] ^(
SQLITE_LIMIT_COMPOUND_SELECT
+**
The maximum number of terms in a compound SELECT statement.
)^ +** +** [[SQLITE_LIMIT_VDBE_OP]] ^(
SQLITE_LIMIT_VDBE_OP
+**
The maximum number of instructions in a virtual machine program +** used to implement an SQL statement. If [sqlite3_prepare_v2()] or +** the equivalent tries to allocate space for more than this many opcodes +** in a single prepared statement, an SQLITE_NOMEM error is returned.
)^ +** +** [[SQLITE_LIMIT_FUNCTION_ARG]] ^(
SQLITE_LIMIT_FUNCTION_ARG
+**
The maximum number of arguments on a function.
)^ +** +** [[SQLITE_LIMIT_ATTACHED]] ^(
SQLITE_LIMIT_ATTACHED
+**
The maximum number of [ATTACH | attached databases].)^
+** +** [[SQLITE_LIMIT_LIKE_PATTERN_LENGTH]] +** ^(
SQLITE_LIMIT_LIKE_PATTERN_LENGTH
+**
The maximum length of the pattern argument to the [LIKE] or +** [GLOB] operators.
)^ +** +** [[SQLITE_LIMIT_VARIABLE_NUMBER]] +** ^(
SQLITE_LIMIT_VARIABLE_NUMBER
+**
The maximum index number of any [parameter] in an SQL statement.)^ +** +** [[SQLITE_LIMIT_TRIGGER_DEPTH]] ^(
SQLITE_LIMIT_TRIGGER_DEPTH
+**
The maximum depth of recursion for triggers.
)^ +** +** [[SQLITE_LIMIT_WORKER_THREADS]] ^(
SQLITE_LIMIT_WORKER_THREADS
+**
The maximum number of auxiliary worker threads that a single +** [prepared statement] may start.
)^ +**
+*/ +#define SQLITE_LIMIT_LENGTH 0 +#define SQLITE_LIMIT_SQL_LENGTH 1 +#define SQLITE_LIMIT_COLUMN 2 +#define SQLITE_LIMIT_EXPR_DEPTH 3 +#define SQLITE_LIMIT_COMPOUND_SELECT 4 +#define SQLITE_LIMIT_VDBE_OP 5 +#define SQLITE_LIMIT_FUNCTION_ARG 6 +#define SQLITE_LIMIT_ATTACHED 7 +#define SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8 +#define SQLITE_LIMIT_VARIABLE_NUMBER 9 +#define SQLITE_LIMIT_TRIGGER_DEPTH 10 +#define SQLITE_LIMIT_WORKER_THREADS 11 + +/* +** CAPI3REF: Prepare Flags +** +** These constants define various flags that can be passed into +** "prepFlags" parameter of the [sqlite3_prepare_v3()] and +** [sqlite3_prepare16_v3()] interfaces. +** +** New flags may be added in future releases of SQLite. +** +**
+** [[SQLITE_PREPARE_PERSISTENT]] ^(
SQLITE_PREPARE_PERSISTENT
+**
The SQLITE_PREPARE_PERSISTENT flag is a hint to the query planner +** that the prepared statement will be retained for a long time and +** probably reused many times.)^ ^Without this flag, [sqlite3_prepare_v3()] +** and [sqlite3_prepare16_v3()] assume that the prepared statement will +** be used just once or at most a few times and then destroyed using +** [sqlite3_finalize()] relatively soon. The current implementation acts +** on this hint by avoiding the use of [lookaside memory] so as not to +** deplete the limited store of lookaside memory. Future versions of +** SQLite may act on this hint differently. +** +** [[SQLITE_PREPARE_NORMALIZE]]
SQLITE_PREPARE_NORMALIZE
+**
The SQLITE_PREPARE_NORMALIZE flag is a no-op. This flag used +** to be required for any prepared statement that wanted to use the +** [sqlite3_normalized_sql()] interface. However, the +** [sqlite3_normalized_sql()] interface is now available to all +** prepared statements, regardless of whether or not they use this +** flag. +** +** [[SQLITE_PREPARE_NO_VTAB]]
SQLITE_PREPARE_NO_VTAB
+**
The SQLITE_PREPARE_NO_VTAB flag causes the SQL compiler +** to return an error (error code SQLITE_ERROR) if the statement uses +** any virtual tables. +**
+*/ +#define SQLITE_PREPARE_PERSISTENT 0x01 +#define SQLITE_PREPARE_NORMALIZE 0x02 +#define SQLITE_PREPARE_NO_VTAB 0x04 + +/* +** CAPI3REF: Compiling An SQL Statement +** KEYWORDS: {SQL statement compiler} +** METHOD: sqlite3 +** CONSTRUCTOR: sqlite3_stmt +** +** To execute an SQL statement, it must first be compiled into a byte-code +** program using one of these routines. Or, in other words, these routines +** are constructors for the [prepared statement] object. +** +** The preferred routine to use is [sqlite3_prepare_v2()]. The +** [sqlite3_prepare()] interface is legacy and should be avoided. +** [sqlite3_prepare_v3()] has an extra "prepFlags" option that is used +** for special purposes. +** +** The use of the UTF-8 interfaces is preferred, as SQLite currently +** does all parsing using UTF-8. The UTF-16 interfaces are provided +** as a convenience. The UTF-16 interfaces work by converting the +** input text into UTF-8, then invoking the corresponding UTF-8 interface. +** +** The first argument, "db", is a [database connection] obtained from a +** prior successful call to [sqlite3_open()], [sqlite3_open_v2()] or +** [sqlite3_open16()]. The database connection must not have been closed. +** +** The second argument, "zSql", is the statement to be compiled, encoded +** as either UTF-8 or UTF-16. The sqlite3_prepare(), sqlite3_prepare_v2(), +** and sqlite3_prepare_v3() +** interfaces use UTF-8, and sqlite3_prepare16(), sqlite3_prepare16_v2(), +** and sqlite3_prepare16_v3() use UTF-16. +** +** ^If the nByte argument is negative, then zSql is read up to the +** first zero terminator. ^If nByte is positive, then it is the +** number of bytes read from zSql. ^If nByte is zero, then no prepared +** statement is generated. +** If the caller knows that the supplied string is nul-terminated, then +** there is a small performance advantage to passing an nByte parameter that +** is the number of bytes in the input string including +** the nul-terminator. +** +** ^If pzTail is not NULL then *pzTail is made to point to the first byte +** past the end of the first SQL statement in zSql. These routines only +** compile the first statement in zSql, so *pzTail is left pointing to +** what remains uncompiled. +** +** ^*ppStmt is left pointing to a compiled [prepared statement] that can be +** executed using [sqlite3_step()]. ^If there is an error, *ppStmt is set +** to NULL. ^If the input text contains no SQL (if the input is an empty +** string or a comment) then *ppStmt is set to NULL. +** The calling procedure is responsible for deleting the compiled +** SQL statement using [sqlite3_finalize()] after it has finished with it. +** ppStmt may not be NULL. +** +** ^On success, the sqlite3_prepare() family of routines return [SQLITE_OK]; +** otherwise an [error code] is returned. +** +** The sqlite3_prepare_v2(), sqlite3_prepare_v3(), sqlite3_prepare16_v2(), +** and sqlite3_prepare16_v3() interfaces are recommended for all new programs. +** The older interfaces (sqlite3_prepare() and sqlite3_prepare16()) +** are retained for backwards compatibility, but their use is discouraged. +** ^In the "vX" interfaces, the prepared statement +** that is returned (the [sqlite3_stmt] object) contains a copy of the +** original SQL text. This causes the [sqlite3_step()] interface to +** behave differently in three ways: +** +**
    +**
  1. +** ^If the database schema changes, instead of returning [SQLITE_SCHEMA] as it +** always used to do, [sqlite3_step()] will automatically recompile the SQL +** statement and try to run it again. As many as [SQLITE_MAX_SCHEMA_RETRY] +** retries will occur before sqlite3_step() gives up and returns an error. +**
  2. +** +**
  3. +** ^When an error occurs, [sqlite3_step()] will return one of the detailed +** [error codes] or [extended error codes]. ^The legacy behavior was that +** [sqlite3_step()] would only return a generic [SQLITE_ERROR] result code +** and the application would have to make a second call to [sqlite3_reset()] +** in order to find the underlying cause of the problem. With the "v2" prepare +** interfaces, the underlying reason for the error is returned immediately. +**
  4. +** +**
  5. +** ^If the specific value bound to a [parameter | host parameter] in the +** WHERE clause might influence the choice of query plan for a statement, +** then the statement will be automatically recompiled, as if there had been +** a schema change, on the first [sqlite3_step()] call following any change +** to the [sqlite3_bind_text | bindings] of that [parameter]. +** ^The specific value of a WHERE-clause [parameter] might influence the +** choice of query plan if the parameter is the left-hand side of a [LIKE] +** or [GLOB] operator or if the parameter is compared to an indexed column +** and the [SQLITE_ENABLE_STAT4] compile-time option is enabled. +**
  6. +**
+** +**

^sqlite3_prepare_v3() differs from sqlite3_prepare_v2() only in having +** the extra prepFlags parameter, which is a bit array consisting of zero or +** more of the [SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_*] flags. ^The +** sqlite3_prepare_v2() interface works exactly the same as +** sqlite3_prepare_v3() with a zero prepFlags parameter. +*/ +SQLITE_API int sqlite3_prepare( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare_v2( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare_v3( + sqlite3 *db, /* Database handle */ + const char *zSql, /* SQL statement, UTF-8 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const char **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare16( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare16_v2( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); +SQLITE_API int sqlite3_prepare16_v3( + sqlite3 *db, /* Database handle */ + const void *zSql, /* SQL statement, UTF-16 encoded */ + int nByte, /* Maximum length of zSql in bytes. */ + unsigned int prepFlags, /* Zero or more SQLITE_PREPARE_ flags */ + sqlite3_stmt **ppStmt, /* OUT: Statement handle */ + const void **pzTail /* OUT: Pointer to unused portion of zSql */ +); + +/* +** CAPI3REF: Retrieving Statement SQL +** METHOD: sqlite3_stmt +** +** ^The sqlite3_sql(P) interface returns a pointer to a copy of the UTF-8 +** SQL text used to create [prepared statement] P if P was +** created by [sqlite3_prepare_v2()], [sqlite3_prepare_v3()], +** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()]. +** ^The sqlite3_expanded_sql(P) interface returns a pointer to a UTF-8 +** string containing the SQL text of prepared statement P with +** [bound parameters] expanded. +** ^The sqlite3_normalized_sql(P) interface returns a pointer to a UTF-8 +** string containing the normalized SQL text of prepared statement P. The +** semantics used to normalize a SQL statement are unspecified and subject +** to change. At a minimum, literal values will be replaced with suitable +** placeholders. +** +** ^(For example, if a prepared statement is created using the SQL +** text "SELECT $abc,:xyz" and if parameter $abc is bound to integer 2345 +** and parameter :xyz is unbound, then sqlite3_sql() will return +** the original string, "SELECT $abc,:xyz" but sqlite3_expanded_sql() +** will return "SELECT 2345,NULL".)^ +** +** ^The sqlite3_expanded_sql() interface returns NULL if insufficient memory +** is available to hold the result, or if the result would exceed the +** the maximum string length determined by the [SQLITE_LIMIT_LENGTH]. +** +** ^The [SQLITE_TRACE_SIZE_LIMIT] compile-time option limits the size of +** bound parameter expansions. ^The [SQLITE_OMIT_TRACE] compile-time +** option causes sqlite3_expanded_sql() to always return NULL. +** +** ^The strings returned by sqlite3_sql(P) and sqlite3_normalized_sql(P) +** are managed by SQLite and are automatically freed when the prepared +** statement is finalized. +** ^The string returned by sqlite3_expanded_sql(P), on the other hand, +** is obtained from [sqlite3_malloc()] and must be freed by the application +** by passing it to [sqlite3_free()]. +** +** ^The sqlite3_normalized_sql() interface is only available if +** the [SQLITE_ENABLE_NORMALIZE] compile-time option is defined. +*/ +SQLITE_API const char *sqlite3_sql(sqlite3_stmt *pStmt); +SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt); +#ifdef SQLITE_ENABLE_NORMALIZE +SQLITE_API const char *sqlite3_normalized_sql(sqlite3_stmt *pStmt); +#endif + +/* +** CAPI3REF: Determine If An SQL Statement Writes The Database +** METHOD: sqlite3_stmt +** +** ^The sqlite3_stmt_readonly(X) interface returns true (non-zero) if +** and only if the [prepared statement] X makes no direct changes to +** the content of the database file. +** +** Note that [application-defined SQL functions] or +** [virtual tables] might change the database indirectly as a side effect. +** ^(For example, if an application defines a function "eval()" that +** calls [sqlite3_exec()], then the following SQL statement would +** change the database file through side-effects: +** +**

+**    SELECT eval('DELETE FROM t1') FROM t2;
+** 
+** +** But because the [SELECT] statement does not change the database file +** directly, sqlite3_stmt_readonly() would still return true.)^ +** +** ^Transaction control statements such as [BEGIN], [COMMIT], [ROLLBACK], +** [SAVEPOINT], and [RELEASE] cause sqlite3_stmt_readonly() to return true, +** since the statements themselves do not actually modify the database but +** rather they control the timing of when other statements modify the +** database. ^The [ATTACH] and [DETACH] statements also cause +** sqlite3_stmt_readonly() to return true since, while those statements +** change the configuration of a database connection, they do not make +** changes to the content of the database files on disk. +** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since +** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and +** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so +** sqlite3_stmt_readonly() returns false for those commands. +** +** ^This routine returns false if there is any possibility that the +** statement might change the database file. ^A false return does +** not guarantee that the statement will change the database file. +** ^For example, an UPDATE statement might have a WHERE clause that +** makes it a no-op, but the sqlite3_stmt_readonly() result would still +** be false. ^Similarly, a CREATE TABLE IF NOT EXISTS statement is a +** read-only no-op if the table already exists, but +** sqlite3_stmt_readonly() still returns false for such a statement. +** +** ^If prepared statement X is an [EXPLAIN] or [EXPLAIN QUERY PLAN] +** statement, then sqlite3_stmt_readonly(X) returns the same value as +** if the EXPLAIN or EXPLAIN QUERY PLAN prefix were omitted. +*/ +SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Query The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** ^The sqlite3_stmt_isexplain(S) interface returns 1 if the +** prepared statement S is an EXPLAIN statement, or 2 if the +** statement S is an EXPLAIN QUERY PLAN. +** ^The sqlite3_stmt_isexplain(S) interface returns 0 if S is +** an ordinary statement or a NULL pointer. +*/ +SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + +/* +** CAPI3REF: Determine If A Prepared Statement Has Been Reset +** METHOD: sqlite3_stmt +** +** ^The sqlite3_stmt_busy(S) interface returns true (non-zero) if the +** [prepared statement] S has been stepped at least once using +** [sqlite3_step(S)] but has neither run to completion (returned +** [SQLITE_DONE] from [sqlite3_step(S)]) nor +** been reset using [sqlite3_reset(S)]. ^The sqlite3_stmt_busy(S) +** interface returns false if S is a NULL pointer. If S is not a +** NULL pointer and is not a pointer to a valid [prepared statement] +** object, then the behavior is undefined and probably undesirable. +** +** This interface can be used in combination [sqlite3_next_stmt()] +** to locate all prepared statements associated with a database +** connection that are in need of being reset. This can be used, +** for example, in diagnostic routines to search for prepared +** statements that are holding a transaction open. +*/ +SQLITE_API int sqlite3_stmt_busy(sqlite3_stmt*); + +/* +** CAPI3REF: Dynamically Typed Value Object +** KEYWORDS: {protected sqlite3_value} {unprotected sqlite3_value} +** +** SQLite uses the sqlite3_value object to represent all values +** that can be stored in a database table. SQLite uses dynamic typing +** for the values it stores. ^Values stored in sqlite3_value objects +** can be integers, floating point values, strings, BLOBs, or NULL. +** +** An sqlite3_value object may be either "protected" or "unprotected". +** Some interfaces require a protected sqlite3_value. Other interfaces +** will accept either a protected or an unprotected sqlite3_value. +** Every interface that accepts sqlite3_value arguments specifies +** whether or not it requires a protected sqlite3_value. The +** [sqlite3_value_dup()] interface can be used to construct a new +** protected sqlite3_value from an unprotected sqlite3_value. +** +** The terms "protected" and "unprotected" refer to whether or not +** a mutex is held. An internal mutex is held for a protected +** sqlite3_value object but no mutex is held for an unprotected +** sqlite3_value object. If SQLite is compiled to be single-threaded +** (with [SQLITE_THREADSAFE=0] and with [sqlite3_threadsafe()] returning 0) +** or if SQLite is run in one of reduced mutex modes +** [SQLITE_CONFIG_SINGLETHREAD] or [SQLITE_CONFIG_MULTITHREAD] +** then there is no distinction between protected and unprotected +** sqlite3_value objects and they can be used interchangeably. However, +** for maximum code portability it is recommended that applications +** still make the distinction between protected and unprotected +** sqlite3_value objects even when not strictly required. +** +** ^The sqlite3_value objects that are passed as parameters into the +** implementation of [application-defined SQL functions] are protected. +** ^The sqlite3_value objects returned by [sqlite3_vtab_rhs_value()] +** are protected. +** ^The sqlite3_value object returned by +** [sqlite3_column_value()] is unprotected. +** Unprotected sqlite3_value objects may only be used as arguments +** to [sqlite3_result_value()], [sqlite3_bind_value()], and +** [sqlite3_value_dup()]. +** The [sqlite3_value_blob | sqlite3_value_type()] family of +** interfaces require protected sqlite3_value objects. +*/ +typedef struct sqlite3_value sqlite3_value; + +/* +** CAPI3REF: SQL Function Context Object +** +** The context in which an SQL function executes is stored in an +** sqlite3_context object. ^A pointer to an sqlite3_context object +** is always first parameter to [application-defined SQL functions]. +** The application-defined SQL function implementation will pass this +** pointer through into calls to [sqlite3_result_int | sqlite3_result()], +** [sqlite3_aggregate_context()], [sqlite3_user_data()], +** [sqlite3_context_db_handle()], [sqlite3_get_auxdata()], +** and/or [sqlite3_set_auxdata()]. +*/ +typedef struct sqlite3_context sqlite3_context; + +/* +** CAPI3REF: Binding Values To Prepared Statements +** KEYWORDS: {host parameter} {host parameters} {host parameter name} +** KEYWORDS: {SQL parameter} {SQL parameters} {parameter binding} +** METHOD: sqlite3_stmt +** +** ^(In the SQL statement text input to [sqlite3_prepare_v2()] and its variants, +** literals may be replaced by a [parameter] that matches one of following +** templates: +** +**
    +**
  • ? +**
  • ?NNN +**
  • :VVV +**
  • @VVV +**
  • $VVV +**
+** +** In the templates above, NNN represents an integer literal, +** and VVV represents an alphanumeric identifier.)^ ^The values of these +** parameters (also called "host parameter names" or "SQL parameters") +** can be set using the sqlite3_bind_*() routines defined here. +** +** ^The first argument to the sqlite3_bind_*() routines is always +** a pointer to the [sqlite3_stmt] object returned from +** [sqlite3_prepare_v2()] or its variants. +** +** ^The second argument is the index of the SQL parameter to be set. +** ^The leftmost SQL parameter has an index of 1. ^When the same named +** SQL parameter is used more than once, second and subsequent +** occurrences have the same index as the first occurrence. +** ^The index for named parameters can be looked up using the +** [sqlite3_bind_parameter_index()] API if desired. ^The index +** for "?NNN" parameters is the value of NNN. +** ^The NNN value must be between 1 and the [sqlite3_limit()] +** parameter [SQLITE_LIMIT_VARIABLE_NUMBER] (default value: 32766). +** +** ^The third argument is the value to bind to the parameter. +** ^If the third parameter to sqlite3_bind_text() or sqlite3_bind_text16() +** or sqlite3_bind_blob() is a NULL pointer then the fourth parameter +** is ignored and the end result is the same as sqlite3_bind_null(). +** ^If the third parameter to sqlite3_bind_text() is not NULL, then +** it should be a pointer to well-formed UTF8 text. +** ^If the third parameter to sqlite3_bind_text16() is not NULL, then +** it should be a pointer to well-formed UTF16 text. +** ^If the third parameter to sqlite3_bind_text64() is not NULL, then +** it should be a pointer to a well-formed unicode string that is +** either UTF8 if the sixth parameter is SQLITE_UTF8, or UTF16 +** otherwise. +** +** [[byte-order determination rules]] ^The byte-order of +** UTF16 input text is determined by the byte-order mark (BOM, U+FEFF) +** found in first character, which is removed, or in the absence of a BOM +** the byte order is the native byte order of the host +** machine for sqlite3_bind_text16() or the byte order specified in +** the 6th parameter for sqlite3_bind_text64().)^ +** ^If UTF16 input text contains invalid unicode +** characters, then SQLite might change those invalid characters +** into the unicode replacement character: U+FFFD. +** +** ^(In those routines that have a fourth argument, its value is the +** number of bytes in the parameter. To be clear: the value is the +** number of bytes in the value, not the number of characters.)^ +** ^If the fourth parameter to sqlite3_bind_text() or sqlite3_bind_text16() +** is negative, then the length of the string is +** the number of bytes up to the first zero terminator. +** If the fourth parameter to sqlite3_bind_blob() is negative, then +** the behavior is undefined. +** If a non-negative fourth parameter is provided to sqlite3_bind_text() +** or sqlite3_bind_text16() or sqlite3_bind_text64() then +** that parameter must be the byte offset +** where the NUL terminator would occur assuming the string were NUL +** terminated. If any NUL characters occurs at byte offsets less than +** the value of the fourth parameter then the resulting string value will +** contain embedded NULs. The result of expressions involving strings +** with embedded NULs is undefined. +** +** ^The fifth argument to the BLOB and string binding interfaces controls +** or indicates the lifetime of the object referenced by the third parameter. +** These three options exist: +** ^ (1) A destructor to dispose of the BLOB or string after SQLite has finished +** with it may be passed. ^It is called to dispose of the BLOB or string even +** if the call to the bind API fails, except the destructor is not called if +** the third parameter is a NULL pointer or the fourth parameter is negative. +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that +** the application remains responsible for disposing of the object. ^In this +** case, the object and the provided pointer to it must remain valid until +** either the prepared statement is finalized or the same SQL parameter is +** bound to something else, whichever occurs sooner. +** ^ (3) The constant, [SQLITE_TRANSIENT], may be passed to indicate that the +** object is to be copied prior to the return from sqlite3_bind_*(). ^The +** object and pointer to it must remain valid until then. ^SQLite will then +** manage the lifetime of its private copy. +** +** ^The sixth argument to sqlite3_bind_text64() must be one of +** [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE] +** to specify the encoding of the text in the third parameter. If +** the sixth argument to sqlite3_bind_text64() is not one of the +** allowed values shown above, or if the text encoding is different +** from the encoding specified by the sixth parameter, then the behavior +** is undefined. +** +** ^The sqlite3_bind_zeroblob() routine binds a BLOB of length N that +** is filled with zeroes. ^A zeroblob uses a fixed amount of memory +** (just an integer to hold its size) while it is being processed. +** Zeroblobs are intended to serve as placeholders for BLOBs whose +** content is later written using +** [sqlite3_blob_open | incremental BLOB I/O] routines. +** ^A negative value for the zeroblob results in a zero-length BLOB. +** +** ^The sqlite3_bind_pointer(S,I,P,T,D) routine causes the I-th parameter in +** [prepared statement] S to have an SQL value of NULL, but to also be +** associated with the pointer P of type T. ^D is either a NULL pointer or +** a pointer to a destructor function for P. ^SQLite will invoke the +** destructor D with a single argument of P when it is finished using +** P. The T parameter should be a static string, preferably a string +** literal. The sqlite3_bind_pointer() routine is part of the +** [pointer passing interface] added for SQLite 3.20.0. +** +** ^If any of the sqlite3_bind_*() routines are called with a NULL pointer +** for the [prepared statement] or with a prepared statement for which +** [sqlite3_step()] has been called more recently than [sqlite3_reset()], +** then the call will return [SQLITE_MISUSE]. If any sqlite3_bind_() +** routine is passed a [prepared statement] that has been finalized, the +** result is undefined and probably harmful. +** +** ^Bindings are not cleared by the [sqlite3_reset()] routine. +** ^Unbound parameters are interpreted as NULL. +** +** ^The sqlite3_bind_* routines return [SQLITE_OK] on success or an +** [error code] if anything goes wrong. +** ^[SQLITE_TOOBIG] might be returned if the size of a string or BLOB +** exceeds limits imposed by [sqlite3_limit]([SQLITE_LIMIT_LENGTH]) or +** [SQLITE_MAX_LENGTH]. +** ^[SQLITE_RANGE] is returned if the parameter +** index is out of range. ^[SQLITE_NOMEM] is returned if malloc() fails. +** +** See also: [sqlite3_bind_parameter_count()], +** [sqlite3_bind_parameter_name()], and [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API int sqlite3_bind_blob(sqlite3_stmt*, int, const void*, int n, void(*)(void*)); +SQLITE_API int sqlite3_bind_blob64(sqlite3_stmt*, int, const void*, sqlite3_uint64, + void(*)(void*)); +SQLITE_API int sqlite3_bind_double(sqlite3_stmt*, int, double); +SQLITE_API int sqlite3_bind_int(sqlite3_stmt*, int, int); +SQLITE_API int sqlite3_bind_int64(sqlite3_stmt*, int, sqlite3_int64); +SQLITE_API int sqlite3_bind_null(sqlite3_stmt*, int); +SQLITE_API int sqlite3_bind_text(sqlite3_stmt*,int,const char*,int,void(*)(void*)); +SQLITE_API int sqlite3_bind_text16(sqlite3_stmt*, int, const void*, int, void(*)(void*)); +SQLITE_API int sqlite3_bind_text64(sqlite3_stmt*, int, const char*, sqlite3_uint64, + void(*)(void*), unsigned char encoding); +SQLITE_API int sqlite3_bind_value(sqlite3_stmt*, int, const sqlite3_value*); +SQLITE_API int sqlite3_bind_pointer(sqlite3_stmt*, int, void*, const char*,void(*)(void*)); +SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt*, int, int n); +SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt*, int, sqlite3_uint64); + +/* +** CAPI3REF: Number Of SQL Parameters +** METHOD: sqlite3_stmt +** +** ^This routine can be used to find the number of [SQL parameters] +** in a [prepared statement]. SQL parameters are tokens of the +** form "?", "?NNN", ":AAA", "$AAA", or "@AAA" that serve as +** placeholders for values that are [sqlite3_bind_blob | bound] +** to the parameters at a later time. +** +** ^(This routine actually returns the index of the largest (rightmost) +** parameter. For all forms except ?NNN, this will correspond to the +** number of unique parameters. If parameters of the ?NNN form are used, +** there may be gaps in the list.)^ +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_name()], and +** [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt*); + +/* +** CAPI3REF: Name Of A Host Parameter +** METHOD: sqlite3_stmt +** +** ^The sqlite3_bind_parameter_name(P,N) interface returns +** the name of the N-th [SQL parameter] in the [prepared statement] P. +** ^(SQL parameters of the form "?NNN" or ":AAA" or "@AAA" or "$AAA" +** have a name which is the string "?NNN" or ":AAA" or "@AAA" or "$AAA" +** respectively. +** In other words, the initial ":" or "$" or "@" or "?" +** is included as part of the name.)^ +** ^Parameters of the form "?" without a following integer have no name +** and are referred to as "nameless" or "anonymous parameters". +** +** ^The first host parameter has an index of 1, not 0. +** +** ^If the value N is out of range or if the N-th parameter is +** nameless, then NULL is returned. ^The returned string is +** always in UTF-8 encoding even if the named parameter was +** originally specified as UTF-16 in [sqlite3_prepare16()], +** [sqlite3_prepare16_v2()], or [sqlite3_prepare16_v3()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_index()]. +*/ +SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt*, int); + +/* +** CAPI3REF: Index Of A Parameter With A Given Name +** METHOD: sqlite3_stmt +** +** ^Return the index of an SQL parameter given its name. ^The +** index value returned is suitable for use as the second +** parameter to [sqlite3_bind_blob|sqlite3_bind()]. ^A zero +** is returned if no matching parameter is found. ^The parameter +** name must be given in UTF-8 even if the original statement +** was prepared from UTF-16 text using [sqlite3_prepare16_v2()] or +** [sqlite3_prepare16_v3()]. +** +** See also: [sqlite3_bind_blob|sqlite3_bind()], +** [sqlite3_bind_parameter_count()], and +** [sqlite3_bind_parameter_name()]. +*/ +SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt*, const char *zName); + +/* +** CAPI3REF: Reset All Bindings On A Prepared Statement +** METHOD: sqlite3_stmt +** +** ^Contrary to the intuition of many, [sqlite3_reset()] does not reset +** the [sqlite3_bind_blob | bindings] on a [prepared statement]. +** ^Use this routine to reset all host parameters to NULL. +*/ +SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*); + +/* +** CAPI3REF: Number Of Columns In A Result Set +** METHOD: sqlite3_stmt +** +** ^Return the number of columns in the result set returned by the +** [prepared statement]. ^If this routine returns 0, that means the +** [prepared statement] returns no data (for example an [UPDATE]). +** ^However, just because this routine returns a positive number does not +** mean that one or more rows of data will be returned. ^A SELECT statement +** will always have a positive sqlite3_column_count() but depending on the +** WHERE clause constraints and the table content, it might return no rows. +** +** See also: [sqlite3_data_count()] +*/ +SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Column Names In A Result Set +** METHOD: sqlite3_stmt +** +** ^These routines return the name assigned to a particular column +** in the result set of a [SELECT] statement. ^The sqlite3_column_name() +** interface returns a pointer to a zero-terminated UTF-8 string +** and sqlite3_column_name16() returns a pointer to a zero-terminated +** UTF-16 string. ^The first parameter is the [prepared statement] +** that implements the [SELECT] statement. ^The second parameter is the +** column number. ^The leftmost column is number 0. +** +** ^The returned string pointer is valid until either the [prepared statement] +** is destroyed by [sqlite3_finalize()] or until the statement is automatically +** reprepared by the first call to [sqlite3_step()] for a particular run +** or until the next call to +** sqlite3_column_name() or sqlite3_column_name16() on the same column. +** +** ^If sqlite3_malloc() fails during the processing of either routine +** (for example during a conversion from UTF-8 to UTF-16) then a +** NULL pointer is returned. +** +** ^The name of a result column is the value of the "AS" clause for +** that column, if there is an AS clause. If there is no AS clause +** then the name of the column is unspecified and may change from +** one release of SQLite to the next. +*/ +SQLITE_API const char *sqlite3_column_name(sqlite3_stmt*, int N); +SQLITE_API const void *sqlite3_column_name16(sqlite3_stmt*, int N); + +/* +** CAPI3REF: Source Of Data In A Query Result +** METHOD: sqlite3_stmt +** +** ^These routines provide a means to determine the database, table, and +** table column that is the origin of a particular result column in +** [SELECT] statement. +** ^The name of the database or table or column can be returned as +** either a UTF-8 or UTF-16 string. ^The _database_ routines return +** the database name, the _table_ routines return the table name, and +** the origin_ routines return the column name. +** ^The returned string is valid until the [prepared statement] is destroyed +** using [sqlite3_finalize()] or until the statement is automatically +** reprepared by the first call to [sqlite3_step()] for a particular run +** or until the same information is requested +** again in a different encoding. +** +** ^The names returned are the original un-aliased names of the +** database, table, and column. +** +** ^The first argument to these interfaces is a [prepared statement]. +** ^These functions return information about the Nth result column returned by +** the statement, where N is the second function argument. +** ^The left-most column is column 0 for these routines. +** +** ^If the Nth column returned by the statement is an expression or +** subquery and is not a column value, then all of these functions return +** NULL. ^These routines might also return NULL if a memory allocation error +** occurs. ^Otherwise, they return the name of the attached database, table, +** or column that query result column was extracted from. +** +** ^As with all other SQLite APIs, those whose names end with "16" return +** UTF-16 encoded strings and the other functions return UTF-8. +** +** ^These APIs are only available if the library was compiled with the +** [SQLITE_ENABLE_COLUMN_METADATA] C-preprocessor symbol. +** +** If two or more threads call one or more +** [sqlite3_column_database_name | column metadata interfaces] +** for the same [prepared statement] and result column +** at the same time then the results are undefined. +*/ +SQLITE_API const char *sqlite3_column_database_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_database_name16(sqlite3_stmt*,int); +SQLITE_API const char *sqlite3_column_table_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_table_name16(sqlite3_stmt*,int); +SQLITE_API const char *sqlite3_column_origin_name(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Declared Datatype Of A Query Result +** METHOD: sqlite3_stmt +** +** ^(The first parameter is a [prepared statement]. +** If this statement is a [SELECT] statement and the Nth column of the +** returned result set of that [SELECT] is a table column (not an +** expression or subquery) then the declared type of the table +** column is returned.)^ ^If the Nth column of the result set is an +** expression or subquery, then a NULL pointer is returned. +** ^The returned string is always UTF-8 encoded. +** +** ^(For example, given the database schema: +** +** CREATE TABLE t1(c1 VARIANT); +** +** and the following statement to be compiled: +** +** SELECT c1 + 1, c1 FROM t1; +** +** this routine would return the string "VARIANT" for the second result +** column (i==1), and a NULL pointer for the first result column (i==0).)^ +** +** ^SQLite uses dynamic run-time typing. ^So just because a column +** is declared to contain a particular type does not mean that the +** data stored in that column is of the declared type. SQLite is +** strongly typed, but the typing is dynamic not static. ^Type +** is associated with individual values, not with the containers +** used to hold those values. +*/ +SQLITE_API const char *sqlite3_column_decltype(sqlite3_stmt*,int); +SQLITE_API const void *sqlite3_column_decltype16(sqlite3_stmt*,int); + +/* +** CAPI3REF: Evaluate An SQL Statement +** METHOD: sqlite3_stmt +** +** After a [prepared statement] has been prepared using any of +** [sqlite3_prepare_v2()], [sqlite3_prepare_v3()], [sqlite3_prepare16_v2()], +** or [sqlite3_prepare16_v3()] or one of the legacy +** interfaces [sqlite3_prepare()] or [sqlite3_prepare16()], this function +** must be called one or more times to evaluate the statement. +** +** The details of the behavior of the sqlite3_step() interface depend +** on whether the statement was prepared using the newer "vX" interfaces +** [sqlite3_prepare_v3()], [sqlite3_prepare_v2()], [sqlite3_prepare16_v3()], +** [sqlite3_prepare16_v2()] or the older legacy +** interfaces [sqlite3_prepare()] and [sqlite3_prepare16()]. The use of the +** new "vX" interface is recommended for new applications but the legacy +** interface will continue to be supported. +** +** ^In the legacy interface, the return value will be either [SQLITE_BUSY], +** [SQLITE_DONE], [SQLITE_ROW], [SQLITE_ERROR], or [SQLITE_MISUSE]. +** ^With the "v2" interface, any of the other [result codes] or +** [extended result codes] might be returned as well. +** +** ^[SQLITE_BUSY] means that the database engine was unable to acquire the +** database locks it needs to do its job. ^If the statement is a [COMMIT] +** or occurs outside of an explicit transaction, then you can retry the +** statement. If the statement is not a [COMMIT] and occurs within an +** explicit transaction then you should rollback the transaction before +** continuing. +** +** ^[SQLITE_DONE] means that the statement has finished executing +** successfully. sqlite3_step() should not be called again on this virtual +** machine without first calling [sqlite3_reset()] to reset the virtual +** machine back to its initial state. +** +** ^If the SQL statement being executed returns any data, then [SQLITE_ROW] +** is returned each time a new row of data is ready for processing by the +** caller. The values may be accessed using the [column access functions]. +** sqlite3_step() is called again to retrieve the next row of data. +** +** ^[SQLITE_ERROR] means that a run-time error (such as a constraint +** violation) has occurred. sqlite3_step() should not be called again on +** the VM. More information may be found by calling [sqlite3_errmsg()]. +** ^With the legacy interface, a more specific error code (for example, +** [SQLITE_INTERRUPT], [SQLITE_SCHEMA], [SQLITE_CORRUPT], and so forth) +** can be obtained by calling [sqlite3_reset()] on the +** [prepared statement]. ^In the "v2" interface, +** the more specific error code is returned directly by sqlite3_step(). +** +** [SQLITE_MISUSE] means that the this routine was called inappropriately. +** Perhaps it was called on a [prepared statement] that has +** already been [sqlite3_finalize | finalized] or on one that had +** previously returned [SQLITE_ERROR] or [SQLITE_DONE]. Or it could +** be the case that the same database connection is being used by two or +** more threads at the same moment in time. +** +** For all versions of SQLite up to and including 3.6.23.1, a call to +** [sqlite3_reset()] was required after sqlite3_step() returned anything +** other than [SQLITE_ROW] before any subsequent invocation of +** sqlite3_step(). Failure to reset the prepared statement using +** [sqlite3_reset()] would result in an [SQLITE_MISUSE] return from +** sqlite3_step(). But after [version 3.6.23.1] ([dateof:3.6.23.1], +** sqlite3_step() began +** calling [sqlite3_reset()] automatically in this circumstance rather +** than returning [SQLITE_MISUSE]. This is not considered a compatibility +** break because any application that ever receives an SQLITE_MISUSE error +** is broken by definition. The [SQLITE_OMIT_AUTORESET] compile-time option +** can be used to restore the legacy behavior. +** +** Goofy Interface Alert: In the legacy interface, the sqlite3_step() +** API always returns a generic error code, [SQLITE_ERROR], following any +** error other than [SQLITE_BUSY] and [SQLITE_MISUSE]. You must call +** [sqlite3_reset()] or [sqlite3_finalize()] in order to find one of the +** specific [error codes] that better describes the error. +** We admit that this is a goofy design. The problem has been fixed +** with the "v2" interface. If you prepare all of your SQL statements +** using [sqlite3_prepare_v3()] or [sqlite3_prepare_v2()] +** or [sqlite3_prepare16_v2()] or [sqlite3_prepare16_v3()] instead +** of the legacy [sqlite3_prepare()] and [sqlite3_prepare16()] interfaces, +** then the more specific [error codes] are returned directly +** by sqlite3_step(). The use of the "vX" interfaces is recommended. +*/ +SQLITE_API int sqlite3_step(sqlite3_stmt*); + +/* +** CAPI3REF: Number of columns in a result set +** METHOD: sqlite3_stmt +** +** ^The sqlite3_data_count(P) interface returns the number of columns in the +** current row of the result set of [prepared statement] P. +** ^If prepared statement P does not have results ready to return +** (via calls to the [sqlite3_column_int | sqlite3_column()] family of +** interfaces) then sqlite3_data_count(P) returns 0. +** ^The sqlite3_data_count(P) routine also returns 0 if P is a NULL pointer. +** ^The sqlite3_data_count(P) routine returns 0 if the previous call to +** [sqlite3_step](P) returned [SQLITE_DONE]. ^The sqlite3_data_count(P) +** will return non-zero if previous call to [sqlite3_step](P) returned +** [SQLITE_ROW], except in the case of the [PRAGMA incremental_vacuum] +** where it always returns zero since each step of that multi-step +** pragma returns 0 columns of data. +** +** See also: [sqlite3_column_count()] +*/ +SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Fundamental Datatypes +** KEYWORDS: SQLITE_TEXT +** +** ^(Every value in SQLite has one of five fundamental datatypes: +** +**
    +**
  • 64-bit signed integer +**
  • 64-bit IEEE floating point number +**
  • string +**
  • BLOB +**
  • NULL +**
)^ +** +** These constants are codes for each of those types. +** +** Note that the SQLITE_TEXT constant was also used in SQLite version 2 +** for a completely different meaning. Software that links against both +** SQLite version 2 and SQLite version 3 should use SQLITE3_TEXT, not +** SQLITE_TEXT. +*/ +#define SQLITE_INTEGER 1 +#define SQLITE_FLOAT 2 +#define SQLITE_BLOB 4 +#define SQLITE_NULL 5 +#ifdef SQLITE_TEXT +# undef SQLITE_TEXT +#else +# define SQLITE_TEXT 3 +#endif +#define SQLITE3_TEXT 3 + +/* +** CAPI3REF: Result Values From A Query +** KEYWORDS: {column access functions} +** METHOD: sqlite3_stmt +** +** Summary: +**
+**
sqlite3_column_blobBLOB result +**
sqlite3_column_doubleREAL result +**
sqlite3_column_int32-bit INTEGER result +**
sqlite3_column_int6464-bit INTEGER result +**
sqlite3_column_textUTF-8 TEXT result +**
sqlite3_column_text16UTF-16 TEXT result +**
sqlite3_column_valueThe result as an +** [sqlite3_value|unprotected sqlite3_value] object. +**
    +**
sqlite3_column_bytesSize of a BLOB +** or a UTF-8 TEXT result in bytes +**
sqlite3_column_bytes16   +** →  Size of UTF-16 +** TEXT in bytes +**
sqlite3_column_typeDefault +** datatype of the result +**
+** +** Details: +** +** ^These routines return information about a single column of the current +** result row of a query. ^In every case the first argument is a pointer +** to the [prepared statement] that is being evaluated (the [sqlite3_stmt*] +** that was returned from [sqlite3_prepare_v2()] or one of its variants) +** and the second argument is the index of the column for which information +** should be returned. ^The leftmost column of the result set has the index 0. +** ^The number of columns in the result can be determined using +** [sqlite3_column_count()]. +** +** If the SQL statement does not currently point to a valid row, or if the +** column index is out of range, the result is undefined. +** These routines may only be called when the most recent call to +** [sqlite3_step()] has returned [SQLITE_ROW] and neither +** [sqlite3_reset()] nor [sqlite3_finalize()] have been called subsequently. +** If any of these routines are called after [sqlite3_reset()] or +** [sqlite3_finalize()] or after [sqlite3_step()] has returned +** something other than [SQLITE_ROW], the results are undefined. +** If [sqlite3_step()] or [sqlite3_reset()] or [sqlite3_finalize()] +** are called from a different thread while any of these routines +** are pending, then the results are undefined. +** +** The first six interfaces (_blob, _double, _int, _int64, _text, and _text16) +** each return the value of a result column in a specific data format. If +** the result column is not initially in the requested format (for example, +** if the query returns an integer but the sqlite3_column_text() interface +** is used to extract the value) then an automatic type conversion is performed. +** +** ^The sqlite3_column_type() routine returns the +** [SQLITE_INTEGER | datatype code] for the initial data type +** of the result column. ^The returned value is one of [SQLITE_INTEGER], +** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL]. +** The return value of sqlite3_column_type() can be used to decide which +** of the first six interface should be used to extract the column value. +** The value returned by sqlite3_column_type() is only meaningful if no +** automatic type conversions have occurred for the value in question. +** After a type conversion, the result of calling sqlite3_column_type() +** is undefined, though harmless. Future +** versions of SQLite may change the behavior of sqlite3_column_type() +** following a type conversion. +** +** If the result is a BLOB or a TEXT string, then the sqlite3_column_bytes() +** or sqlite3_column_bytes16() interfaces can be used to determine the size +** of that BLOB or string. +** +** ^If the result is a BLOB or UTF-8 string then the sqlite3_column_bytes() +** routine returns the number of bytes in that BLOB or string. +** ^If the result is a UTF-16 string, then sqlite3_column_bytes() converts +** the string to UTF-8 and then returns the number of bytes. +** ^If the result is a numeric value then sqlite3_column_bytes() uses +** [sqlite3_snprintf()] to convert that value to a UTF-8 string and returns +** the number of bytes in that string. +** ^If the result is NULL, then sqlite3_column_bytes() returns zero. +** +** ^If the result is a BLOB or UTF-16 string then the sqlite3_column_bytes16() +** routine returns the number of bytes in that BLOB or string. +** ^If the result is a UTF-8 string, then sqlite3_column_bytes16() converts +** the string to UTF-16 and then returns the number of bytes. +** ^If the result is a numeric value then sqlite3_column_bytes16() uses +** [sqlite3_snprintf()] to convert that value to a UTF-16 string and returns +** the number of bytes in that string. +** ^If the result is NULL, then sqlite3_column_bytes16() returns zero. +** +** ^The values returned by [sqlite3_column_bytes()] and +** [sqlite3_column_bytes16()] do not include the zero terminators at the end +** of the string. ^For clarity: the values returned by +** [sqlite3_column_bytes()] and [sqlite3_column_bytes16()] are the number of +** bytes in the string, not the number of characters. +** +** ^Strings returned by sqlite3_column_text() and sqlite3_column_text16(), +** even empty strings, are always zero-terminated. ^The return +** value from sqlite3_column_blob() for a zero-length BLOB is a NULL pointer. +** +** ^Strings returned by sqlite3_column_text16() always have the endianness +** which is native to the platform, regardless of the text encoding set +** for the database. +** +** Warning: ^The object returned by [sqlite3_column_value()] is an +** [unprotected sqlite3_value] object. In a multithreaded environment, +** an unprotected sqlite3_value object may only be used safely with +** [sqlite3_bind_value()] and [sqlite3_result_value()]. +** If the [unprotected sqlite3_value] object returned by +** [sqlite3_column_value()] is used in any other way, including calls +** to routines like [sqlite3_value_int()], [sqlite3_value_text()], +** or [sqlite3_value_bytes()], the behavior is not threadsafe. +** Hence, the sqlite3_column_value() interface +** is normally only useful within the implementation of +** [application-defined SQL functions] or [virtual tables], not within +** top-level application code. +** +** These routines may attempt to convert the datatype of the result. +** ^For example, if the internal representation is FLOAT and a text result +** is requested, [sqlite3_snprintf()] is used internally to perform the +** conversion automatically. ^(The following table details the conversions +** that are applied: +** +**
+** +**
Internal
Type
Requested
Type
Conversion +** +**
NULL INTEGER Result is 0 +**
NULL FLOAT Result is 0.0 +**
NULL TEXT Result is a NULL pointer +**
NULL BLOB Result is a NULL pointer +**
INTEGER FLOAT Convert from integer to float +**
INTEGER TEXT ASCII rendering of the integer +**
INTEGER BLOB Same as INTEGER->TEXT +**
FLOAT INTEGER [CAST] to INTEGER +**
FLOAT TEXT ASCII rendering of the float +**
FLOAT BLOB [CAST] to BLOB +**
TEXT INTEGER [CAST] to INTEGER +**
TEXT FLOAT [CAST] to REAL +**
TEXT BLOB No change +**
BLOB INTEGER [CAST] to INTEGER +**
BLOB FLOAT [CAST] to REAL +**
BLOB TEXT [CAST] to TEXT, ensure zero terminator +**
+**
)^ +** +** Note that when type conversions occur, pointers returned by prior +** calls to sqlite3_column_blob(), sqlite3_column_text(), and/or +** sqlite3_column_text16() may be invalidated. +** Type conversions and pointer invalidations might occur +** in the following cases: +** +**
    +**
  • The initial content is a BLOB and sqlite3_column_text() or +** sqlite3_column_text16() is called. A zero-terminator might +** need to be added to the string.
  • +**
  • The initial content is UTF-8 text and sqlite3_column_bytes16() or +** sqlite3_column_text16() is called. The content must be converted +** to UTF-16.
  • +**
  • The initial content is UTF-16 text and sqlite3_column_bytes() or +** sqlite3_column_text() is called. The content must be converted +** to UTF-8.
  • +**
+** +** ^Conversions between UTF-16be and UTF-16le are always done in place and do +** not invalidate a prior pointer, though of course the content of the buffer +** that the prior pointer references will have been modified. Other kinds +** of conversion are done in place when it is possible, but sometimes they +** are not possible and in those cases prior pointers are invalidated. +** +** The safest policy is to invoke these routines +** in one of the following ways: +** +**
    +**
  • sqlite3_column_text() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_blob() followed by sqlite3_column_bytes()
  • +**
  • sqlite3_column_text16() followed by sqlite3_column_bytes16()
  • +**
+** +** In other words, you should call sqlite3_column_text(), +** sqlite3_column_blob(), or sqlite3_column_text16() first to force the result +** into the desired format, then invoke sqlite3_column_bytes() or +** sqlite3_column_bytes16() to find the size of the result. Do not mix calls +** to sqlite3_column_text() or sqlite3_column_blob() with calls to +** sqlite3_column_bytes16(), and do not mix calls to sqlite3_column_text16() +** with calls to sqlite3_column_bytes(). +** +** ^The pointers returned are valid until a type conversion occurs as +** described above, or until [sqlite3_step()] or [sqlite3_reset()] or +** [sqlite3_finalize()] is called. ^The memory space used to hold strings +** and BLOBs is freed automatically. Do not pass the pointers returned +** from [sqlite3_column_blob()], [sqlite3_column_text()], etc. into +** [sqlite3_free()]. +** +** As long as the input parameters are correct, these routines will only +** fail if an out-of-memory error occurs during a format conversion. +** Only the following subset of interfaces are subject to out-of-memory +** errors: +** +**
    +**
  • sqlite3_column_blob() +**
  • sqlite3_column_text() +**
  • sqlite3_column_text16() +**
  • sqlite3_column_bytes() +**
  • sqlite3_column_bytes16() +**
+** +** If an out-of-memory error occurs, then the return value from these +** routines is the same as if the column had contained an SQL NULL value. +** Valid SQL NULL returns can be distinguished from out-of-memory errors +** by invoking the [sqlite3_errcode()] immediately after the suspect +** return value is obtained and before any +** other SQLite interface is called on the same [database connection]. +*/ +SQLITE_API const void *sqlite3_column_blob(sqlite3_stmt*, int iCol); +SQLITE_API double sqlite3_column_double(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_int(sqlite3_stmt*, int iCol); +SQLITE_API sqlite3_int64 sqlite3_column_int64(sqlite3_stmt*, int iCol); +SQLITE_API const unsigned char *sqlite3_column_text(sqlite3_stmt*, int iCol); +SQLITE_API const void *sqlite3_column_text16(sqlite3_stmt*, int iCol); +SQLITE_API sqlite3_value *sqlite3_column_value(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_bytes(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_bytes16(sqlite3_stmt*, int iCol); +SQLITE_API int sqlite3_column_type(sqlite3_stmt*, int iCol); + +/* +** CAPI3REF: Destroy A Prepared Statement Object +** DESTRUCTOR: sqlite3_stmt +** +** ^The sqlite3_finalize() function is called to delete a [prepared statement]. +** ^If the most recent evaluation of the statement encountered no errors +** or if the statement is never been evaluated, then sqlite3_finalize() returns +** SQLITE_OK. ^If the most recent evaluation of statement S failed, then +** sqlite3_finalize(S) returns the appropriate [error code] or +** [extended error code]. +** +** ^The sqlite3_finalize(S) routine can be called at any point during +** the life cycle of [prepared statement] S: +** before statement S is ever evaluated, after +** one or more calls to [sqlite3_reset()], or after any call +** to [sqlite3_step()] regardless of whether or not the statement has +** completed execution. +** +** ^Invoking sqlite3_finalize() on a NULL pointer is a harmless no-op. +** +** The application must finalize every [prepared statement] in order to avoid +** resource leaks. It is a grievous error for the application to try to use +** a prepared statement after it has been finalized. Any use of a prepared +** statement after it has been finalized can result in undefined and +** undesirable behavior such as segfaults and heap corruption. +*/ +SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Reset A Prepared Statement Object +** METHOD: sqlite3_stmt +** +** The sqlite3_reset() function is called to reset a [prepared statement] +** object back to its initial state, ready to be re-executed. +** ^Any SQL statement variables that had values bound to them using +** the [sqlite3_bind_blob | sqlite3_bind_*() API] retain their values. +** Use [sqlite3_clear_bindings()] to reset the bindings. +** +** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S +** back to the beginning of its program. +** +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. +** +** ^If the most recent call to [sqlite3_step(S)] for the +** [prepared statement] S indicated an error, then +** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. +** +** ^The [sqlite3_reset(S)] interface does not change the values +** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. +*/ +SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + + +/* +** CAPI3REF: Create Or Redefine SQL Functions +** KEYWORDS: {function creation routines} +** METHOD: sqlite3 +** +** ^These functions (collectively known as "function creation routines") +** are used to add SQL functions or aggregates or to redefine the behavior +** of existing SQL functions or aggregates. The only differences between +** the three "sqlite3_create_function*" routines are the text encoding +** expected for the second parameter (the name of the function being +** created) and the presence or absence of a destructor callback for +** the application data pointer. Function sqlite3_create_window_function() +** is similar, but allows the user to supply the extra callback functions +** needed by [aggregate window functions]. +** +** ^The first parameter is the [database connection] to which the SQL +** function is to be added. ^If an application uses more than one database +** connection then application-defined SQL functions must be added +** to each database connection separately. +** +** ^The second parameter is the name of the SQL function to be created or +** redefined. ^The length of the name is limited to 255 bytes in a UTF-8 +** representation, exclusive of the zero-terminator. ^Note that the name +** length limit is in UTF-8 bytes, not characters nor UTF-16 bytes. +** ^Any attempt to create a function with a longer name +** will result in [SQLITE_MISUSE] being returned. +** +** ^The third parameter (nArg) +** is the number of arguments that the SQL function or +** aggregate takes. ^If this parameter is -1, then the SQL function or +** aggregate may take any number of arguments between 0 and the limit +** set by [sqlite3_limit]([SQLITE_LIMIT_FUNCTION_ARG]). If the third +** parameter is less than -1 or greater than 127 then the behavior is +** undefined. +** +** ^The fourth parameter, eTextRep, specifies what +** [SQLITE_UTF8 | text encoding] this SQL function prefers for +** its parameters. The application should set this parameter to +** [SQLITE_UTF16LE] if the function implementation invokes +** [sqlite3_value_text16le()] on an input, or [SQLITE_UTF16BE] if the +** implementation invokes [sqlite3_value_text16be()] on an input, or +** [SQLITE_UTF16] if [sqlite3_value_text16()] is used, or [SQLITE_UTF8] +** otherwise. ^The same SQL function may be registered multiple times using +** different preferred text encodings, with different implementations for +** each encoding. +** ^When multiple implementations of the same function are available, SQLite +** will pick the one that involves the least amount of data conversion. +** +** ^The fourth parameter may optionally be ORed with [SQLITE_DETERMINISTIC] +** to signal that the function will always return the same result given +** the same inputs within a single SQL statement. Most SQL functions are +** deterministic. The built-in [random()] SQL function is an example of a +** function that is not deterministic. The SQLite query planner is able to +** perform additional optimizations on deterministic functions, so use +** of the [SQLITE_DETERMINISTIC] flag is recommended where possible. +** +** ^The fourth parameter may also optionally include the [SQLITE_DIRECTONLY] +** flag, which if present prevents the function from being invoked from +** within VIEWs, TRIGGERs, CHECK constraints, generated column expressions, +** index expressions, or the WHERE clause of partial indexes. +** +** For best security, the [SQLITE_DIRECTONLY] flag is recommended for +** all application-defined SQL functions that do not need to be +** used inside of triggers, view, CHECK constraints, or other elements of +** the database schema. This flags is especially recommended for SQL +** functions that have side effects or reveal internal application state. +** Without this flag, an attacker might be able to modify the schema of +** a database file to include invocations of the function with parameters +** chosen by the attacker, which the application will then execute when +** the database file is opened and read. +** +** ^(The fifth parameter is an arbitrary pointer. The implementation of the +** function can gain access to this pointer using [sqlite3_user_data()].)^ +** +** ^The sixth, seventh and eighth parameters passed to the three +** "sqlite3_create_function*" functions, xFunc, xStep and xFinal, are +** pointers to C-language functions that implement the SQL function or +** aggregate. ^A scalar SQL function requires an implementation of the xFunc +** callback only; NULL pointers must be passed as the xStep and xFinal +** parameters. ^An aggregate SQL function requires an implementation of xStep +** and xFinal and NULL pointer must be passed for xFunc. ^To delete an existing +** SQL function or aggregate, pass NULL pointers for all three function +** callbacks. +** +** ^The sixth, seventh, eighth and ninth parameters (xStep, xFinal, xValue +** and xInverse) passed to sqlite3_create_window_function are pointers to +** C-language callbacks that implement the new function. xStep and xFinal +** must both be non-NULL. xValue and xInverse may either both be NULL, in +** which case a regular aggregate function is created, or must both be +** non-NULL, in which case the new function may be used as either an aggregate +** or aggregate window function. More details regarding the implementation +** of aggregate window functions are +** [user-defined window functions|available here]. +** +** ^(If the final parameter to sqlite3_create_function_v2() or +** sqlite3_create_window_function() is not NULL, then it is destructor for +** the application data pointer. The destructor is invoked when the function +** is deleted, either by being overloaded or when the database connection +** closes.)^ ^The destructor is also invoked if the call to +** sqlite3_create_function_v2() fails. ^When the destructor callback is +** invoked, it is passed a single argument which is a copy of the application +** data pointer which was the fifth parameter to sqlite3_create_function_v2(). +** +** ^It is permitted to register multiple implementations of the same +** functions with the same name but with either differing numbers of +** arguments or differing preferred text encodings. ^SQLite will use +** the implementation that most closely matches the way in which the +** SQL function is used. ^A function implementation with a non-negative +** nArg parameter is a better match than a function implementation with +** a negative nArg. ^A function where the preferred text encoding +** matches the database encoding is a better +** match than a function where the encoding is different. +** ^A function where the encoding difference is between UTF16le and UTF16be +** is a closer match than a function where the encoding difference is +** between UTF8 and UTF16. +** +** ^Built-in functions may be overloaded by new application-defined functions. +** +** ^An application-defined function is permitted to call other +** SQLite interfaces. However, such calls must not +** close the database connection nor finalize or reset the prepared +** statement in which the function is running. +*/ +SQLITE_API int sqlite3_create_function( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); +SQLITE_API int sqlite3_create_function16( + sqlite3 *db, + const void *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*) +); +SQLITE_API int sqlite3_create_function_v2( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xFunc)(sqlite3_context*,int,sqlite3_value**), + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void(*xDestroy)(void*) +); +SQLITE_API int sqlite3_create_window_function( + sqlite3 *db, + const char *zFunctionName, + int nArg, + int eTextRep, + void *pApp, + void (*xStep)(sqlite3_context*,int,sqlite3_value**), + void (*xFinal)(sqlite3_context*), + void (*xValue)(sqlite3_context*), + void (*xInverse)(sqlite3_context*,int,sqlite3_value**), + void(*xDestroy)(void*) +); + +/* +** CAPI3REF: Text Encodings +** +** These constant define integer codes that represent the various +** text encodings supported by SQLite. +*/ +#define SQLITE_UTF8 1 /* IMP: R-37514-35566 */ +#define SQLITE_UTF16LE 2 /* IMP: R-03371-37637 */ +#define SQLITE_UTF16BE 3 /* IMP: R-51971-34154 */ +#define SQLITE_UTF16 4 /* Use native byte order */ +#define SQLITE_ANY 5 /* Deprecated */ +#define SQLITE_UTF16_ALIGNED 8 /* sqlite3_create_collation only */ + +/* +** CAPI3REF: Function Flags +** +** These constants may be ORed together with the +** [SQLITE_UTF8 | preferred text encoding] as the fourth argument +** to [sqlite3_create_function()], [sqlite3_create_function16()], or +** [sqlite3_create_function_v2()]. +** +**
+** [[SQLITE_DETERMINISTIC]]
SQLITE_DETERMINISTIC
+** The SQLITE_DETERMINISTIC flag means that the new function always gives +** the same output when the input parameters are the same. +** The [abs|abs() function] is deterministic, for example, but +** [randomblob|randomblob()] is not. Functions must +** be deterministic in order to be used in certain contexts such as +** with the WHERE clause of [partial indexes] or in [generated columns]. +** SQLite might also optimize deterministic functions by factoring them +** out of inner loops. +**
+** +** [[SQLITE_DIRECTONLY]]
SQLITE_DIRECTONLY
+** The SQLITE_DIRECTONLY flag means that the function may only be invoked +** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in +** schema structures such as [CHECK constraints], [DEFAULT clauses], +** [expression indexes], [partial indexes], or [generated columns]. +**

+** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +**

+** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. +**

+** +** [[SQLITE_INNOCUOUS]]
SQLITE_INNOCUOUS
+** The SQLITE_INNOCUOUS flag means that the function is unlikely +** to cause problems even if misused. An innocuous function should have +** no side effects and should not depend on any values other than its +** input parameters. The [abs|abs() function] is an example of an +** innocuous function. +** The [load_extension() SQL function] is not innocuous because of its +** side effects. +**

SQLITE_INNOCUOUS is similar to SQLITE_DETERMINISTIC, but is not +** exactly the same. The [random|random() function] is an example of a +** function that is innocuous but not deterministic. +**

Some heightened security settings +** ([SQLITE_DBCONFIG_TRUSTED_SCHEMA] and [PRAGMA trusted_schema=OFF]) +** disable the use of SQL functions inside views and triggers and in +** schema structures such as [CHECK constraints], [DEFAULT clauses], +** [expression indexes], [partial indexes], and [generated columns] unless +** the function is tagged with SQLITE_INNOCUOUS. Most built-in functions +** are innocuous. Developers are advised to avoid using the +** SQLITE_INNOCUOUS flag for application-defined functions unless the +** function has been carefully audited and found to be free of potentially +** security-adverse side-effects and information-leaks. +**

+** +** [[SQLITE_SUBTYPE]]
SQLITE_SUBTYPE
+** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
SQLITE_RESULT_SUBTYPE
+** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. +**
+**
+*/ +#define SQLITE_DETERMINISTIC 0x000000800 +#define SQLITE_DIRECTONLY 0x000080000 +#define SQLITE_SUBTYPE 0x000100000 +#define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 + +/* +** CAPI3REF: Deprecated Functions +** DEPRECATED +** +** These functions are [deprecated]. In order to maintain +** backwards compatibility with older code, these functions continue +** to be supported. However, new applications should avoid +** the use of these functions. To encourage programmers to avoid +** these functions, we will not explain what they do. +*/ +#ifndef SQLITE_OMIT_DEPRECATED +SQLITE_API SQLITE_DEPRECATED int sqlite3_aggregate_count(sqlite3_context*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_expired(sqlite3_stmt*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_transfer_bindings(sqlite3_stmt*, sqlite3_stmt*); +SQLITE_API SQLITE_DEPRECATED int sqlite3_global_recover(void); +SQLITE_API SQLITE_DEPRECATED void sqlite3_thread_cleanup(void); +SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int64,int), + void*,sqlite3_int64); +#endif + +/* +** CAPI3REF: Obtaining SQL Values +** METHOD: sqlite3_value +** +** Summary: +**
+**
sqlite3_value_blobBLOB value +**
sqlite3_value_doubleREAL value +**
sqlite3_value_int32-bit INTEGER value +**
sqlite3_value_int6464-bit INTEGER value +**
sqlite3_value_pointerPointer value +**
sqlite3_value_textUTF-8 TEXT value +**
sqlite3_value_text16UTF-16 TEXT value in +** the native byteorder +**
sqlite3_value_text16beUTF-16be TEXT value +**
sqlite3_value_text16leUTF-16le TEXT value +**
    +**
sqlite3_value_bytesSize of a BLOB +** or a UTF-8 TEXT in bytes +**
sqlite3_value_bytes16   +** →  Size of UTF-16 +** TEXT in bytes +**
sqlite3_value_typeDefault +** datatype of the value +**
sqlite3_value_numeric_type   +** →  Best numeric datatype of the value +**
sqlite3_value_nochange   +** →  True if the column is unchanged in an UPDATE +** against a virtual table. +**
sqlite3_value_frombind   +** →  True if value originated from a [bound parameter] +**
+** +** Details: +** +** These routines extract type, size, and content information from +** [protected sqlite3_value] objects. Protected sqlite3_value objects +** are used to pass parameter information into the functions that +** implement [application-defined SQL functions] and [virtual tables]. +** +** These routines work only with [protected sqlite3_value] objects. +** Any attempt to use these routines on an [unprotected sqlite3_value] +** is not threadsafe. +** +** ^These routines work just like the corresponding [column access functions] +** except that these routines take a single [protected sqlite3_value] object +** pointer instead of a [sqlite3_stmt*] pointer and an integer column number. +** +** ^The sqlite3_value_text16() interface extracts a UTF-16 string +** in the native byte-order of the host machine. ^The +** sqlite3_value_text16be() and sqlite3_value_text16le() interfaces +** extract UTF-16 strings as big-endian and little-endian respectively. +** +** ^If [sqlite3_value] object V was initialized +** using [sqlite3_bind_pointer(S,I,P,X,D)] or [sqlite3_result_pointer(C,P,X,D)] +** and if X and Y are strings that compare equal according to strcmp(X,Y), +** then sqlite3_value_pointer(V,Y) will return the pointer P. ^Otherwise, +** sqlite3_value_pointer(V,Y) returns a NULL. The sqlite3_bind_pointer() +** routine is part of the [pointer passing interface] added for SQLite 3.20.0. +** +** ^(The sqlite3_value_type(V) interface returns the +** [SQLITE_INTEGER | datatype code] for the initial datatype of the +** [sqlite3_value] object V. The returned value is one of [SQLITE_INTEGER], +** [SQLITE_FLOAT], [SQLITE_TEXT], [SQLITE_BLOB], or [SQLITE_NULL].)^ +** Other interfaces might change the datatype for an sqlite3_value object. +** For example, if the datatype is initially SQLITE_INTEGER and +** sqlite3_value_text(V) is called to extract a text value for that +** integer, then subsequent calls to sqlite3_value_type(V) might return +** SQLITE_TEXT. Whether or not a persistent internal datatype conversion +** occurs is undefined and may change from one release of SQLite to the next. +** +** ^(The sqlite3_value_numeric_type() interface attempts to apply +** numeric affinity to the value. This means that an attempt is +** made to convert the value to an integer or floating point. If +** such a conversion is possible without loss of information (in other +** words, if the value is a string that looks like a number) +** then the conversion is performed. Otherwise no conversion occurs. +** The [SQLITE_INTEGER | datatype] after conversion is returned.)^ +** +** ^Within the [xUpdate] method of a [virtual table], the +** sqlite3_value_nochange(X) interface returns true if and only if +** the column corresponding to X is unchanged by the UPDATE operation +** that the xUpdate method call was invoked to implement and if +** and the prior [xColumn] method call that was invoked to extracted +** the value for that column returned without setting a result (probably +** because it queried [sqlite3_vtab_nochange()] and found that the column +** was unchanging). ^Within an [xUpdate] method, any value for which +** sqlite3_value_nochange(X) is true will in all other respects appear +** to be a NULL value. If sqlite3_value_nochange(X) is invoked anywhere other +** than within an [xUpdate] method call for an UPDATE statement, then +** the return value is arbitrary and meaningless. +** +** ^The sqlite3_value_frombind(X) interface returns non-zero if the +** value X originated from one of the [sqlite3_bind_int|sqlite3_bind()] +** interfaces. ^If X comes from an SQL literal value, or a table column, +** or an expression, then sqlite3_value_frombind(X) returns zero. +** +** Please pay particular attention to the fact that the pointer returned +** from [sqlite3_value_blob()], [sqlite3_value_text()], or +** [sqlite3_value_text16()] can be invalidated by a subsequent call to +** [sqlite3_value_bytes()], [sqlite3_value_bytes16()], [sqlite3_value_text()], +** or [sqlite3_value_text16()]. +** +** These routines must be called from the same thread as +** the SQL function that supplied the [sqlite3_value*] parameters. +** +** As long as the input parameter is correct, these routines can only +** fail if an out-of-memory error occurs during a format conversion. +** Only the following subset of interfaces are subject to out-of-memory +** errors: +** +**
    +**
  • sqlite3_value_blob() +**
  • sqlite3_value_text() +**
  • sqlite3_value_text16() +**
  • sqlite3_value_text16le() +**
  • sqlite3_value_text16be() +**
  • sqlite3_value_bytes() +**
  • sqlite3_value_bytes16() +**
+** +** If an out-of-memory error occurs, then the return value from these +** routines is the same as if the column had contained an SQL NULL value. +** Valid SQL NULL returns can be distinguished from out-of-memory errors +** by invoking the [sqlite3_errcode()] immediately after the suspect +** return value is obtained and before any +** other SQLite interface is called on the same [database connection]. +*/ +SQLITE_API const void *sqlite3_value_blob(sqlite3_value*); +SQLITE_API double sqlite3_value_double(sqlite3_value*); +SQLITE_API int sqlite3_value_int(sqlite3_value*); +SQLITE_API sqlite3_int64 sqlite3_value_int64(sqlite3_value*); +SQLITE_API void *sqlite3_value_pointer(sqlite3_value*, const char*); +SQLITE_API const unsigned char *sqlite3_value_text(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16le(sqlite3_value*); +SQLITE_API const void *sqlite3_value_text16be(sqlite3_value*); +SQLITE_API int sqlite3_value_bytes(sqlite3_value*); +SQLITE_API int sqlite3_value_bytes16(sqlite3_value*); +SQLITE_API int sqlite3_value_type(sqlite3_value*); +SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); +SQLITE_API int sqlite3_value_nochange(sqlite3_value*); +SQLITE_API int sqlite3_value_frombind(sqlite3_value*); + +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ +SQLITE_API int sqlite3_value_encoding(sqlite3_value*); + +/* +** CAPI3REF: Finding The Subtype Of SQL Values +** METHOD: sqlite3_value +** +** The sqlite3_value_subtype(V) function returns the subtype for +** an [application-defined SQL function] argument V. The subtype +** information can be used to pass a limited amount of context from +** one SQL function to another. Use the [sqlite3_result_subtype()] +** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. +*/ +SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); + +/* +** CAPI3REF: Copy And Free SQL Values +** METHOD: sqlite3_value +** +** ^The sqlite3_value_dup(V) interface makes a copy of the [sqlite3_value] +** object D and returns a pointer to that copy. ^The [sqlite3_value] returned +** is a [protected sqlite3_value] object even if the input is not. +** ^The sqlite3_value_dup(V) interface returns NULL if V is NULL or if a +** memory allocation fails. ^If V is a [pointer value], then the result +** of sqlite3_value_dup(V) is a NULL value. +** +** ^The sqlite3_value_free(V) interface frees an [sqlite3_value] object +** previously obtained from [sqlite3_value_dup()]. ^If V is a NULL pointer +** then sqlite3_value_free(V) is a harmless no-op. +*/ +SQLITE_API sqlite3_value *sqlite3_value_dup(const sqlite3_value*); +SQLITE_API void sqlite3_value_free(sqlite3_value*); + +/* +** CAPI3REF: Obtain Aggregate Function Context +** METHOD: sqlite3_context +** +** Implementations of aggregate SQL functions use this +** routine to allocate memory for storing their state. +** +** ^The first time the sqlite3_aggregate_context(C,N) routine is called +** for a particular aggregate function, SQLite allocates +** N bytes of memory, zeroes out that memory, and returns a pointer +** to the new memory. ^On second and subsequent calls to +** sqlite3_aggregate_context() for the same aggregate function instance, +** the same buffer is returned. Sqlite3_aggregate_context() is normally +** called once for each invocation of the xStep callback and then one +** last time when the xFinal callback is invoked. ^(When no rows match +** an aggregate query, the xStep() callback of the aggregate function +** implementation is never called and xFinal() is called exactly once. +** In those cases, sqlite3_aggregate_context() might be called for the +** first time from within xFinal().)^ +** +** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer +** when first called if N is less than or equal to zero or if a memory +** allocation error occurs. +** +** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is +** determined by the N parameter on first successful call. Changing the +** value of N in any subsequent call to sqlite3_aggregate_context() within +** the same aggregate function instance will not resize the memory +** allocation.)^ Within the xFinal callback, it is customary to set +** N=0 in calls to sqlite3_aggregate_context(C,N) so that no +** pointless memory allocations occur. +** +** ^SQLite automatically frees the memory allocated by +** sqlite3_aggregate_context() when the aggregate query concludes. +** +** The first parameter must be a copy of the +** [sqlite3_context | SQL function context] that is the first parameter +** to the xStep or xFinal callback routine that implements the aggregate +** function. +** +** This routine must be called from the same thread in which +** the aggregate SQL function is running. +*/ +SQLITE_API void *sqlite3_aggregate_context(sqlite3_context*, int nBytes); + +/* +** CAPI3REF: User Data For Functions +** METHOD: sqlite3_context +** +** ^The sqlite3_user_data() interface returns a copy of +** the pointer that was the pUserData parameter (the 5th parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. +** +** This routine must be called from the same thread in which +** the application-defined function is running. +*/ +SQLITE_API void *sqlite3_user_data(sqlite3_context*); + +/* +** CAPI3REF: Database Connection For Functions +** METHOD: sqlite3_context +** +** ^The sqlite3_context_db_handle() interface returns a copy of +** the pointer to the [database connection] (the 1st parameter) +** of the [sqlite3_create_function()] +** and [sqlite3_create_function16()] routines that originally +** registered the application defined function. +*/ +SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); + +/* +** CAPI3REF: Function Auxiliary Data +** METHOD: sqlite3_context +** +** These functions may be used by (non-aggregate) SQL functions to +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. +** Then as long as the pattern string remains the same, +** the compiled regular expression can be reused on multiple +** invocations of the same function. +** +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data +** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument +** value to the application-defined function. ^N is zero for the left-most +** function argument. ^If there is no auxiliary data +** associated with the function argument, the sqlite3_get_auxdata(C,N) interface +** returns a NULL pointer. +** +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent +** calls to sqlite3_get_auxdata(C,N) return P from the most recent +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. +** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, +** SQLite will invoke the destructor function X with parameter P exactly +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including:
    +**
  • ^(when the corresponding function parameter changes)^, or +**
  • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the +** SQL statement)^, or +**
  • ^(when sqlite3_set_auxdata() is invoked again on the same +** parameter)^, or +**
  • ^(during the original sqlite3_set_auxdata() call when a memory +** allocation error occurs.)^ +**
  • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^
+** +** Note the last two bullets in particular. The destructor X in +** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the +** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() +** should be called near the end of the function implementation and the +** function implementation should not make any use of P after +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. +** +** ^(In practice, auxiliary data is preserved between function calls for +** function parameters that are compile-time constants, including literal +** values and [parameters] and expressions composed from the same.)^ +** +** The value of the N parameter to these interfaces should be non-negative. +** Future enhancements may make use of negative N values to define new +** kinds of function caching behavior. +** +** These routines must be called from the same thread in which +** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. +*/ +SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); +SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); + +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +**
    +**
  • An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +**
  • A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +**
  • The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +**
+** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); + +/* +** CAPI3REF: Constants Defining Special Destructor Behavior +** +** These are special values for the destructor that is passed in as the +** final argument to routines like [sqlite3_result_blob()]. ^If the destructor +** argument is SQLITE_STATIC, it means that the content pointer is constant +** and will never change. It does not need to be destroyed. ^The +** SQLITE_TRANSIENT value means that the content will likely change in +** the near future and that SQLite should make its own private copy of +** the content before returning. +** +** The typedef is necessary to work around problems in certain +** C++ compilers. +*/ +typedef void (*sqlite3_destructor_type)(void*); +#define SQLITE_STATIC ((sqlite3_destructor_type)0) +#define SQLITE_TRANSIENT ((sqlite3_destructor_type)-1) + +/* +** CAPI3REF: Setting The Result Of An SQL Function +** METHOD: sqlite3_context +** +** These routines are used by the xFunc or xFinal callbacks that +** implement SQL functions and aggregates. See +** [sqlite3_create_function()] and [sqlite3_create_function16()] +** for additional information. +** +** These functions work very much like the [parameter binding] family of +** functions used to bind values to host parameters in prepared statements. +** Refer to the [SQL parameter] documentation for additional information. +** +** ^The sqlite3_result_blob() interface sets the result from +** an application-defined function to be the BLOB whose content is pointed +** to by the second parameter and which is N bytes long where N is the +** third parameter. +** +** ^The sqlite3_result_zeroblob(C,N) and sqlite3_result_zeroblob64(C,N) +** interfaces set the result of the application-defined function to be +** a BLOB containing all zero bytes and N bytes in size. +** +** ^The sqlite3_result_double() interface sets the result from +** an application-defined function to be a floating point value specified +** by its 2nd argument. +** +** ^The sqlite3_result_error() and sqlite3_result_error16() functions +** cause the implemented SQL function to throw an exception. +** ^SQLite uses the string pointed to by the +** 2nd parameter of sqlite3_result_error() or sqlite3_result_error16() +** as the text of an error message. ^SQLite interprets the error +** message string from sqlite3_result_error() as UTF-8. ^SQLite +** interprets the string from sqlite3_result_error16() as UTF-16 using +** the same [byte-order determination rules] as [sqlite3_bind_text16()]. +** ^If the third parameter to sqlite3_result_error() +** or sqlite3_result_error16() is negative then SQLite takes as the error +** message all text up through the first zero character. +** ^If the third parameter to sqlite3_result_error() or +** sqlite3_result_error16() is non-negative then SQLite takes that many +** bytes (not characters) from the 2nd parameter as the error message. +** ^The sqlite3_result_error() and sqlite3_result_error16() +** routines make a private copy of the error message text before +** they return. Hence, the calling function can deallocate or +** modify the text after they return without harm. +** ^The sqlite3_result_error_code() function changes the error code +** returned by SQLite as a result of an error in a function. ^By default, +** the error code is SQLITE_ERROR. ^A subsequent call to sqlite3_result_error() +** or sqlite3_result_error16() resets the error code to SQLITE_ERROR. +** +** ^The sqlite3_result_error_toobig() interface causes SQLite to throw an +** error indicating that a string or BLOB is too long to represent. +** +** ^The sqlite3_result_error_nomem() interface causes SQLite to throw an +** error indicating that a memory allocation failed. +** +** ^The sqlite3_result_int() interface sets the return value +** of the application-defined function to be the 32-bit signed integer +** value given in the 2nd argument. +** ^The sqlite3_result_int64() interface sets the return value +** of the application-defined function to be the 64-bit signed integer +** value given in the 2nd argument. +** +** ^The sqlite3_result_null() interface sets the return value +** of the application-defined function to be NULL. +** +** ^The sqlite3_result_text(), sqlite3_result_text16(), +** sqlite3_result_text16le(), and sqlite3_result_text16be() interfaces +** set the return value of the application-defined function to be +** a text string which is represented as UTF-8, UTF-16 native byte order, +** UTF-16 little endian, or UTF-16 big endian, respectively. +** ^The sqlite3_result_text64() interface sets the return value of an +** application-defined function to be a text string in an encoding +** specified by the fifth (and last) parameter, which must be one +** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. +** ^SQLite takes the text result from the application from +** the 2nd parameter of the sqlite3_result_text* interfaces. +** ^If the 3rd parameter to any of the sqlite3_result_text* interfaces +** other than sqlite3_result_text64() is negative, then SQLite computes +** the string length itself by searching the 2nd parameter for the first +** zero character. +** ^If the 3rd parameter to the sqlite3_result_text* interfaces +** is non-negative, then as many bytes (not characters) of the text +** pointed to by the 2nd parameter are taken as the application-defined +** function result. If the 3rd parameter is non-negative, then it +** must be the byte offset into the string where the NUL terminator would +** appear if the string where NUL terminated. If any NUL characters occur +** in the string at a byte offset that is less than the value of the 3rd +** parameter, then the resulting string will contain embedded NULs and the +** result of expressions operating on strings with embedded NULs is undefined. +** ^If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is a non-NULL pointer, then SQLite calls that +** function as the destructor on the text or BLOB result when it has +** finished using that result. +** ^If the 4th parameter to the sqlite3_result_text* interfaces or to +** sqlite3_result_blob is the special constant SQLITE_STATIC, then SQLite +** assumes that the text or BLOB result is in constant space and does not +** copy the content of the parameter nor call a destructor on the content +** when it has finished using that result. +** ^If the 4th parameter to the sqlite3_result_text* interfaces +** or sqlite3_result_blob is the special constant SQLITE_TRANSIENT +** then SQLite makes a copy of the result into space obtained +** from [sqlite3_malloc()] before it returns. +** +** ^For the sqlite3_result_text16(), sqlite3_result_text16le(), and +** sqlite3_result_text16be() routines, and for sqlite3_result_text64() +** when the encoding is not UTF8, if the input UTF16 begins with a +** byte-order mark (BOM, U+FEFF) then the BOM is removed from the +** string and the rest of the string is interpreted according to the +** byte-order specified by the BOM. ^The byte-order specified by +** the BOM at the beginning of the text overrides the byte-order +** specified by the interface procedure. ^So, for example, if +** sqlite3_result_text16le() is invoked with text that begins +** with bytes 0xfe, 0xff (a big-endian byte-order mark) then the +** first two bytes of input are skipped and the remaining input +** is interpreted as UTF16BE text. +** +** ^For UTF16 input text to the sqlite3_result_text16(), +** sqlite3_result_text16be(), sqlite3_result_text16le(), and +** sqlite3_result_text64() routines, if the text contains invalid +** UTF16 characters, the invalid characters might be converted +** into the unicode replacement character, U+FFFD. +** +** ^The sqlite3_result_value() interface sets the result of +** the application-defined function to be a copy of the +** [unprotected sqlite3_value] object specified by the 2nd parameter. ^The +** sqlite3_result_value() interface makes a copy of the [sqlite3_value] +** so that the [sqlite3_value] specified in the parameter may change or +** be deallocated after sqlite3_result_value() returns without harm. +** ^A [protected sqlite3_value] object may always be used where an +** [unprotected sqlite3_value] object is required, so either +** kind of [sqlite3_value] object can be used with this interface. +** +** ^The sqlite3_result_pointer(C,P,T,D) interface sets the result to an +** SQL NULL value, just like [sqlite3_result_null(C)], except that it +** also associates the host-language pointer P or type T with that +** NULL value such that the pointer can be retrieved within an +** [application-defined SQL function] using [sqlite3_value_pointer()]. +** ^If the D parameter is not NULL, then it is a pointer to a destructor +** for the P parameter. ^SQLite invokes D with P as its only argument +** when SQLite is finished with P. The T parameter should be a static +** string and preferably a string literal. The sqlite3_result_pointer() +** routine is part of the [pointer passing interface] added for SQLite 3.20.0. +** +** If these routines are called from within the different thread +** than the one containing the application-defined function that received +** the [sqlite3_context] pointer, the results are undefined. +*/ +SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_blob64(sqlite3_context*,const void*, + sqlite3_uint64,void(*)(void*)); +SQLITE_API void sqlite3_result_double(sqlite3_context*, double); +SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int); +SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int); +SQLITE_API void sqlite3_result_error_toobig(sqlite3_context*); +SQLITE_API void sqlite3_result_error_nomem(sqlite3_context*); +SQLITE_API void sqlite3_result_error_code(sqlite3_context*, int); +SQLITE_API void sqlite3_result_int(sqlite3_context*, int); +SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64); +SQLITE_API void sqlite3_result_null(sqlite3_context*); +SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_text64(sqlite3_context*, const char*,sqlite3_uint64, + void(*)(void*), unsigned char encoding); +SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*)); +SQLITE_API void sqlite3_result_text16le(sqlite3_context*, const void*, int,void(*)(void*)); +SQLITE_API void sqlite3_result_text16be(sqlite3_context*, const void*, int,void(*)(void*)); +SQLITE_API void sqlite3_result_value(sqlite3_context*, sqlite3_value*); +SQLITE_API void sqlite3_result_pointer(sqlite3_context*, void*,const char*,void(*)(void*)); +SQLITE_API void sqlite3_result_zeroblob(sqlite3_context*, int n); +SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); + + +/* +** CAPI3REF: Setting The Subtype Of An SQL Function +** METHOD: sqlite3_context +** +** The sqlite3_result_subtype(C,T) function causes the subtype of +** the result from the [application-defined SQL function] with +** [sqlite3_context] C to be the value T. Only the lower 8 bits +** of the subtype T are preserved in current versions of SQLite; +** higher order bits are discarded. +** The number of subtype bytes preserved by SQLite might increase +** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. +*/ +SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); + +/* +** CAPI3REF: Define New Collating Sequences +** METHOD: sqlite3 +** +** ^These functions add, remove, or modify a [collation] associated +** with the [database connection] specified as the first argument. +** +** ^The name of the collation is a UTF-8 string +** for sqlite3_create_collation() and sqlite3_create_collation_v2() +** and a UTF-16 string in native byte order for sqlite3_create_collation16(). +** ^Collation names that compare equal according to [sqlite3_strnicmp()] are +** considered to be the same name. +** +** ^(The third argument (eTextRep) must be one of the constants: +**
    +**
  • [SQLITE_UTF8], +**
  • [SQLITE_UTF16LE], +**
  • [SQLITE_UTF16BE], +**
  • [SQLITE_UTF16], or +**
  • [SQLITE_UTF16_ALIGNED]. +**
)^ +** ^The eTextRep argument determines the encoding of strings passed +** to the collating function callback, xCompare. +** ^The [SQLITE_UTF16] and [SQLITE_UTF16_ALIGNED] values for eTextRep +** force strings to be UTF16 with native byte order. +** ^The [SQLITE_UTF16_ALIGNED] value for eTextRep forces strings to begin +** on an even byte address. +** +** ^The fourth argument, pArg, is an application data pointer that is passed +** through as the first argument to the collating function callback. +** +** ^The fifth argument, xCompare, is a pointer to the collating function. +** ^Multiple collating functions can be registered using the same name but +** with different eTextRep parameters and SQLite will use whichever +** function requires the least amount of data transformation. +** ^If the xCompare argument is NULL then the collating function is +** deleted. ^When all collating functions having the same name are deleted, +** that collation is no longer usable. +** +** ^The collating function callback is invoked with a copy of the pArg +** application data pointer and with two strings in the encoding specified +** by the eTextRep argument. The two integer parameters to the collating +** function callback are the length of the two strings, in bytes. The collating +** function must return an integer that is negative, zero, or positive +** if the first string is less than, equal to, or greater than the second, +** respectively. A collating function must always return the same answer +** given the same inputs. If two or more collating functions are registered +** to the same collation name (using different eTextRep values) then all +** must give an equivalent answer when invoked with equivalent strings. +** The collating function must obey the following properties for all +** strings A, B, and C: +** +**
    +**
  1. If A==B then B==A. +**
  2. If A==B and B==C then A==C. +**
  3. If A<B THEN B>A. +**
  4. If A<B and B<C then A<C. +**
+** +** If a collating function fails any of the above constraints and that +** collating function is registered and used, then the behavior of SQLite +** is undefined. +** +** ^The sqlite3_create_collation_v2() works like sqlite3_create_collation() +** with the addition that the xDestroy callback is invoked on pArg when +** the collating function is deleted. +** ^Collating functions are deleted when they are overridden by later +** calls to the collation creation functions or when the +** [database connection] is closed using [sqlite3_close()]. +** +** ^The xDestroy callback is not called if the +** sqlite3_create_collation_v2() function fails. Applications that invoke +** sqlite3_create_collation_v2() with a non-NULL xDestroy argument should +** check the return code and dispose of the application data pointer +** themselves rather than expecting SQLite to deal with it for them. +** This is different from every other SQLite interface. The inconsistency +** is unfortunate but cannot be changed without breaking backwards +** compatibility. +** +** See also: [sqlite3_collation_needed()] and [sqlite3_collation_needed16()]. +*/ +SQLITE_API int sqlite3_create_collation( + sqlite3*, + const char *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*) +); +SQLITE_API int sqlite3_create_collation_v2( + sqlite3*, + const char *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*), + void(*xDestroy)(void*) +); +SQLITE_API int sqlite3_create_collation16( + sqlite3*, + const void *zName, + int eTextRep, + void *pArg, + int(*xCompare)(void*,int,const void*,int,const void*) +); + +/* +** CAPI3REF: Collation Needed Callbacks +** METHOD: sqlite3 +** +** ^To avoid having to register all collation sequences before a database +** can be used, a single callback function may be registered with the +** [database connection] to be invoked whenever an undefined collation +** sequence is required. +** +** ^If the function is registered using the sqlite3_collation_needed() API, +** then it is passed the names of undefined collation sequences as strings +** encoded in UTF-8. ^If sqlite3_collation_needed16() is used, +** the names are passed as UTF-16 in machine native byte order. +** ^A call to either function replaces the existing collation-needed callback. +** +** ^(When the callback is invoked, the first argument passed is a copy +** of the second argument to sqlite3_collation_needed() or +** sqlite3_collation_needed16(). The second argument is the database +** connection. The third argument is one of [SQLITE_UTF8], [SQLITE_UTF16BE], +** or [SQLITE_UTF16LE], indicating the most desirable form of the collation +** sequence function required. The fourth parameter is the name of the +** required collation sequence.)^ +** +** The callback function should register the desired collation using +** [sqlite3_create_collation()], [sqlite3_create_collation16()], or +** [sqlite3_create_collation_v2()]. +*/ +SQLITE_API int sqlite3_collation_needed( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const char*) +); +SQLITE_API int sqlite3_collation_needed16( + sqlite3*, + void*, + void(*)(void*,sqlite3*,int eTextRep,const void*) +); + +#ifdef SQLITE_ENABLE_CEROD +/* +** Specify the activation key for a CEROD database. Unless +** activated, none of the CEROD routines will work. +*/ +SQLITE_API void sqlite3_activate_cerod( + const char *zPassPhrase /* Activation phrase */ +); +#endif + +/* +** CAPI3REF: Suspend Execution For A Short Time +** +** The sqlite3_sleep() function causes the current thread to suspend execution +** for at least a number of milliseconds specified in its parameter. +** +** If the operating system does not support sleep requests with +** millisecond time resolution, then the time will be rounded up to +** the nearest second. The number of milliseconds of sleep actually +** requested from the operating system is returned. +** +** ^SQLite implements this interface by calling the xSleep() +** method of the default [sqlite3_vfs] object. If the xSleep() method +** of the default VFS is not implemented correctly, or not implemented at +** all, then the behavior of sqlite3_sleep() may deviate from the description +** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. +*/ +SQLITE_API int sqlite3_sleep(int); + +/* +** CAPI3REF: Name Of The Folder Holding Temporary Files +** +** ^(If this global variable is made to point to a string which is +** the name of a folder (a.k.a. directory), then all temporary files +** created by SQLite when using a built-in [sqlite3_vfs | VFS] +** will be placed in that directory.)^ ^If this variable +** is a NULL pointer, then SQLite performs a search for an appropriate +** temporary file directory. +** +** Applications are strongly discouraged from using this global variable. +** It is required to set a temporary folder on Windows Runtime (WinRT). +** But for all other platforms, it is highly recommended that applications +** neither read nor write this variable. This global variable is a relic +** that exists for backwards compatibility of legacy applications and should +** be avoided in new projects. +** +** It is not safe to read or modify this variable in more than one +** thread at a time. It is not safe to read or modify this variable +** if a [database connection] is being used at the same time in a separate +** thread. +** It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been called and that this variable remain unchanged +** thereafter. +** +** ^The [temp_store_directory pragma] may modify this variable and cause +** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, +** the [temp_store_directory pragma] always assumes that any string +** that this variable points to is held in memory obtained from +** [sqlite3_malloc] and the pragma may attempt to free that memory +** using [sqlite3_free]. +** Hence, if this variable is modified directly, either it should be +** made NULL or made to point to memory obtained from [sqlite3_malloc] +** or else the use of the [temp_store_directory pragma] should be avoided. +** Except when requested by the [temp_store_directory pragma], SQLite +** does not free the memory that sqlite3_temp_directory points to. If +** the application wants that memory to be freed, it must do +** so itself, taking care to only do so after all [database connection] +** objects have been destroyed. +** +** Note to Windows Runtime users: The temporary directory must be set +** prior to calling [sqlite3_open] or [sqlite3_open_v2]. Otherwise, various +** features that require the use of temporary files may fail. Here is an +** example of how to do this using C++ with the Windows Runtime: +** +**
+** LPCWSTR zPath = Windows::Storage::ApplicationData::Current->
+**       TemporaryFolder->Path->Data();
+** char zPathBuf[MAX_PATH + 1];
+** memset(zPathBuf, 0, sizeof(zPathBuf));
+** WideCharToMultiByte(CP_UTF8, 0, zPath, -1, zPathBuf, sizeof(zPathBuf),
+**       NULL, NULL);
+** sqlite3_temp_directory = sqlite3_mprintf("%s", zPathBuf);
+** 
+*/ +SQLITE_API SQLITE_EXTERN char *sqlite3_temp_directory; + +/* +** CAPI3REF: Name Of The Folder Holding Database Files +** +** ^(If this global variable is made to point to a string which is +** the name of a folder (a.k.a. directory), then all database files +** specified with a relative pathname and created or accessed by +** SQLite when using a built-in windows [sqlite3_vfs | VFS] will be assumed +** to be relative to that directory.)^ ^If this variable is a NULL +** pointer, then SQLite assumes that all database files specified +** with a relative pathname are relative to the current directory +** for the process. Only the windows VFS makes use of this global +** variable; it is ignored by the unix VFS. +** +** Changing the value of this variable while a database connection is +** open can result in a corrupt database. +** +** It is not safe to read or modify this variable in more than one +** thread at a time. It is not safe to read or modify this variable +** if a [database connection] is being used at the same time in a separate +** thread. +** It is intended that this variable be set once +** as part of process initialization and before any SQLite interface +** routines have been called and that this variable remain unchanged +** thereafter. +** +** ^The [data_store_directory pragma] may modify this variable and cause +** it to point to memory obtained from [sqlite3_malloc]. ^Furthermore, +** the [data_store_directory pragma] always assumes that any string +** that this variable points to is held in memory obtained from +** [sqlite3_malloc] and the pragma may attempt to free that memory +** using [sqlite3_free]. +** Hence, if this variable is modified directly, either it should be +** made NULL or made to point to memory obtained from [sqlite3_malloc] +** or else the use of the [data_store_directory pragma] should be avoided. +*/ +SQLITE_API SQLITE_EXTERN char *sqlite3_data_directory; + +/* +** CAPI3REF: Win32 Specific Interface +** +** These interfaces are available only on Windows. The +** [sqlite3_win32_set_directory] interface is used to set the value associated +** with the [sqlite3_temp_directory] or [sqlite3_data_directory] variable, to +** zValue, depending on the value of the type parameter. The zValue parameter +** should be NULL to cause the previous value to be freed via [sqlite3_free]; +** a non-NULL value will be copied into memory obtained from [sqlite3_malloc] +** prior to being used. The [sqlite3_win32_set_directory] interface returns +** [SQLITE_OK] to indicate success, [SQLITE_ERROR] if the type is unsupported, +** or [SQLITE_NOMEM] if memory could not be allocated. The value of the +** [sqlite3_data_directory] variable is intended to act as a replacement for +** the current directory on the sub-platforms of Win32 where that concept is +** not present, e.g. WinRT and UWP. The [sqlite3_win32_set_directory8] and +** [sqlite3_win32_set_directory16] interfaces behave exactly the same as the +** sqlite3_win32_set_directory interface except the string parameter must be +** UTF-8 or UTF-16, respectively. +*/ +SQLITE_API int sqlite3_win32_set_directory( + unsigned long type, /* Identifier for directory being set or reset */ + void *zValue /* New value for directory being set or reset */ +); +SQLITE_API int sqlite3_win32_set_directory8(unsigned long type, const char *zValue); +SQLITE_API int sqlite3_win32_set_directory16(unsigned long type, const void *zValue); + +/* +** CAPI3REF: Win32 Directory Types +** +** These macros are only available on Windows. They define the allowed values +** for the type argument to the [sqlite3_win32_set_directory] interface. +*/ +#define SQLITE_WIN32_DATA_DIRECTORY_TYPE 1 +#define SQLITE_WIN32_TEMP_DIRECTORY_TYPE 2 + +/* +** CAPI3REF: Test For Auto-Commit Mode +** KEYWORDS: {autocommit mode} +** METHOD: sqlite3 +** +** ^The sqlite3_get_autocommit() interface returns non-zero or +** zero if the given database connection is or is not in autocommit mode, +** respectively. ^Autocommit mode is on by default. +** ^Autocommit mode is disabled by a [BEGIN] statement. +** ^Autocommit mode is re-enabled by a [COMMIT] or [ROLLBACK]. +** +** If certain kinds of errors occur on a statement within a multi-statement +** transaction (errors including [SQLITE_FULL], [SQLITE_IOERR], +** [SQLITE_NOMEM], [SQLITE_BUSY], and [SQLITE_INTERRUPT]) then the +** transaction might be rolled back automatically. The only way to +** find out whether SQLite automatically rolled back the transaction after +** an error is to use this function. +** +** If another thread changes the autocommit status of the database +** connection while this routine is running, then the return value +** is undefined. +*/ +SQLITE_API int sqlite3_get_autocommit(sqlite3*); + +/* +** CAPI3REF: Find The Database Handle Of A Prepared Statement +** METHOD: sqlite3_stmt +** +** ^The sqlite3_db_handle interface returns the [database connection] handle +** to which a [prepared statement] belongs. ^The [database connection] +** returned by sqlite3_db_handle is the same [database connection] +** that was the first argument +** to the [sqlite3_prepare_v2()] call (or its variants) that was used to +** create the statement in the first place. +*/ +SQLITE_API sqlite3 *sqlite3_db_handle(sqlite3_stmt*); + +/* +** CAPI3REF: Return The Schema Name For A Database Connection +** METHOD: sqlite3 +** +** ^The sqlite3_db_name(D,N) interface returns a pointer to the schema name +** for the N-th database on database connection D, or a NULL pointer of N is +** out of range. An N value of 0 means the main database file. An N of 1 is +** the "temp" schema. Larger values of N correspond to various ATTACH-ed +** databases. +** +** Space to hold the string that is returned by sqlite3_db_name() is managed +** by SQLite itself. The string might be deallocated by any operation that +** changes the schema, including [ATTACH] or [DETACH] or calls to +** [sqlite3_serialize()] or [sqlite3_deserialize()], even operations that +** occur on a different thread. Applications that need to +** remember the string long-term should make their own copy. Applications that +** are accessing the same database connection simultaneously on multiple +** threads should mutex-protect calls to this API and should make their own +** private copy of the result prior to releasing the mutex. +*/ +SQLITE_API const char *sqlite3_db_name(sqlite3 *db, int N); + +/* +** CAPI3REF: Return The Filename For A Database Connection +** METHOD: sqlite3 +** +** ^The sqlite3_db_filename(D,N) interface returns a pointer to the filename +** associated with database N of connection D. +** ^If there is no attached database N on the database +** connection D, or if database N is a temporary or in-memory database, then +** this function will return either a NULL pointer or an empty string. +** +** ^The string value returned by this routine is owned and managed by +** the database connection. ^The value will be valid until the database N +** is [DETACH]-ed or until the database connection closes. +** +** ^The filename returned by this function is the output of the +** xFullPathname method of the [VFS]. ^In other words, the filename +** will be an absolute pathname, even if the filename used +** to open the database originally was a URI or relative pathname. +** +** If the filename pointer returned by this routine is not NULL, then it +** can be used as the filename input parameter to these routines: +**
    +**
  • [sqlite3_uri_parameter()] +**
  • [sqlite3_uri_boolean()] +**
  • [sqlite3_uri_int64()] +**
  • [sqlite3_filename_database()] +**
  • [sqlite3_filename_journal()] +**
  • [sqlite3_filename_wal()] +**
+*/ +SQLITE_API sqlite3_filename sqlite3_db_filename(sqlite3 *db, const char *zDbName); + +/* +** CAPI3REF: Determine if a database is read-only +** METHOD: sqlite3 +** +** ^The sqlite3_db_readonly(D,N) interface returns 1 if the database N +** of connection D is read-only, 0 if it is read/write, or -1 if N is not +** the name of a database on connection D. +*/ +SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); + +/* +** CAPI3REF: Determine the transaction state of a database +** METHOD: sqlite3 +** +** ^The sqlite3_txn_state(D,S) interface returns the current +** [transaction state] of schema S in database connection D. ^If S is NULL, +** then the highest transaction state of any schema on database connection D +** is returned. Transaction states are (in order of lowest to highest): +**
    +**
  1. SQLITE_TXN_NONE +**
  2. SQLITE_TXN_READ +**
  3. SQLITE_TXN_WRITE +**
+** ^If the S argument to sqlite3_txn_state(D,S) is not the name of +** a valid schema, then -1 is returned. +*/ +SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); + +/* +** CAPI3REF: Allowed return values from sqlite3_txn_state() +** KEYWORDS: {transaction state} +** +** These constants define the current transaction state of a database file. +** ^The [sqlite3_txn_state(D,S)] interface returns one of these +** constants in order to describe the transaction state of schema S +** in [database connection] D. +** +**
+** [[SQLITE_TXN_NONE]]
SQLITE_TXN_NONE
+**
The SQLITE_TXN_NONE state means that no transaction is currently +** pending.
+** +** [[SQLITE_TXN_READ]]
SQLITE_TXN_READ
+**
The SQLITE_TXN_READ state means that the database is currently +** in a read transaction. Content has been read from the database file +** but nothing in the database file has changed. The transaction state +** will advanced to SQLITE_TXN_WRITE if any changes occur and there are +** no other conflicting concurrent write transactions. The transaction +** state will revert to SQLITE_TXN_NONE following a [ROLLBACK] or +** [COMMIT].
+** +** [[SQLITE_TXN_WRITE]]
SQLITE_TXN_WRITE
+**
The SQLITE_TXN_WRITE state means that the database is currently +** in a write transaction. Content has been written to the database file +** but has not yet committed. The transaction state will change to +** to SQLITE_TXN_NONE at the next [ROLLBACK] or [COMMIT].
+*/ +#define SQLITE_TXN_NONE 0 +#define SQLITE_TXN_READ 1 +#define SQLITE_TXN_WRITE 2 + +/* +** CAPI3REF: Find the next prepared statement +** METHOD: sqlite3 +** +** ^This interface returns a pointer to the next [prepared statement] after +** pStmt associated with the [database connection] pDb. ^If pStmt is NULL +** then this interface returns a pointer to the first prepared statement +** associated with the database connection pDb. ^If no prepared statement +** satisfies the conditions of this routine, it returns NULL. +** +** The [database connection] pointer D in a call to +** [sqlite3_next_stmt(D,S)] must refer to an open database +** connection and in particular must not be a NULL pointer. +*/ +SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt); + +/* +** CAPI3REF: Commit And Rollback Notification Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_commit_hook() interface registers a callback +** function to be invoked whenever a transaction is [COMMIT | committed]. +** ^Any callback set by a previous call to sqlite3_commit_hook() +** for the same database connection is overridden. +** ^The sqlite3_rollback_hook() interface registers a callback +** function to be invoked whenever a transaction is [ROLLBACK | rolled back]. +** ^Any callback set by a previous call to sqlite3_rollback_hook() +** for the same database connection is overridden. +** ^The pArg argument is passed through to the callback. +** ^If the callback on a commit hook function returns non-zero, +** then the commit is converted into a rollback. +** +** ^The sqlite3_commit_hook(D,C,P) and sqlite3_rollback_hook(D,C,P) functions +** return the P argument from the previous call of the same function +** on the same [database connection] D, or NULL for +** the first call for each function on D. +** +** The commit and rollback hook callbacks are not reentrant. +** The callback implementation must not do anything that will modify +** the database connection that invoked the callback. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the commit +** or rollback hook in the first place. +** Note that running any other SQL statements, including SELECT statements, +** or merely calling [sqlite3_prepare_v2()] and [sqlite3_step()] will modify +** the database connections for the meaning of "modify" in this paragraph. +** +** ^Registering a NULL function disables the callback. +** +** ^When the commit hook callback routine returns zero, the [COMMIT] +** operation is allowed to continue normally. ^If the commit hook +** returns non-zero, then the [COMMIT] is converted into a [ROLLBACK]. +** ^The rollback hook is invoked on a rollback that results from a commit +** hook returning non-zero, just as it would be with any other rollback. +** +** ^For the purposes of this API, a transaction is said to have been +** rolled back if an explicit "ROLLBACK" statement is executed, or +** an error or constraint causes an implicit rollback to occur. +** ^The rollback callback is not invoked if a transaction is +** automatically rolled back because the database connection is closed. +** +** See also the [sqlite3_update_hook()] interface. +*/ +SQLITE_API void *sqlite3_commit_hook(sqlite3*, int(*)(void*), void*); +SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); + +/* +** CAPI3REF: Autovacuum Compaction Amount Callback +** METHOD: sqlite3 +** +** ^The sqlite3_autovacuum_pages(D,C,P,X) interface registers a callback +** function C that is invoked prior to each autovacuum of the database +** file. ^The callback is passed a copy of the generic data pointer (P), +** the schema-name of the attached database that is being autovacuumed, +** the size of the database file in pages, the number of free pages, +** and the number of bytes per page, respectively. The callback should +** return the number of free pages that should be removed by the +** autovacuum. ^If the callback returns zero, then no autovacuum happens. +** ^If the value returned is greater than or equal to the number of +** free pages, then a complete autovacuum happens. +** +**

^If there are multiple ATTACH-ed database files that are being +** modified as part of a transaction commit, then the autovacuum pages +** callback is invoked separately for each file. +** +**

The callback is not reentrant. The callback function should +** not attempt to invoke any other SQLite interface. If it does, bad +** things may happen, including segmentation faults and corrupt database +** files. The callback function should be a simple function that +** does some arithmetic on its input parameters and returns a result. +** +** ^The X parameter to sqlite3_autovacuum_pages(D,C,P,X) is an optional +** destructor for the P parameter. ^If X is not NULL, then X(P) is +** invoked whenever the database connection closes or when the callback +** is overwritten by another invocation of sqlite3_autovacuum_pages(). +** +**

^There is only one autovacuum pages callback per database connection. +** ^Each call to the sqlite3_autovacuum_pages() interface overrides all +** previous invocations for that database connection. ^If the callback +** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, +** then the autovacuum steps callback is canceled. The return value +** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might +** be some other error code if something goes wrong. The current +** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other +** return codes might be added in future releases. +** +**

If no autovacuum pages callback is specified (the usual case) or +** a NULL pointer is provided for the callback, +** then the default behavior is to vacuum all free pages. So, in other +** words, the default behavior is the same as if the callback function +** were something like this: +** +**

+**     unsigned int demonstration_autovac_pages_callback(
+**       void *pClientData,
+**       const char *zSchema,
+**       unsigned int nDbPage,
+**       unsigned int nFreePage,
+**       unsigned int nBytePerPage
+**     ){
+**       return nFreePage;
+**     }
+** 
+*/ +SQLITE_API int sqlite3_autovacuum_pages( + sqlite3 *db, + unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int), + void*, + void(*)(void*) +); + + +/* +** CAPI3REF: Data Change Notification Callbacks +** METHOD: sqlite3 +** +** ^The sqlite3_update_hook() interface registers a callback function +** with the [database connection] identified by the first argument +** to be invoked whenever a row is updated, inserted or deleted in +** a [rowid table]. +** ^Any callback set by a previous call to this function +** for the same database connection is overridden. +** +** ^The second argument is a pointer to the function to invoke when a +** row is updated, inserted or deleted in a rowid table. +** ^The first argument to the callback is a copy of the third argument +** to sqlite3_update_hook(). +** ^The second callback argument is one of [SQLITE_INSERT], [SQLITE_DELETE], +** or [SQLITE_UPDATE], depending on the operation that caused the callback +** to be invoked. +** ^The third and fourth arguments to the callback contain pointers to the +** database and table name containing the affected row. +** ^The final callback parameter is the [rowid] of the row. +** ^In the case of an update, this is the [rowid] after the update takes place. +** +** ^(The update hook is not invoked when internal system tables are +** modified (i.e. sqlite_sequence).)^ +** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified. +** +** ^In the current implementation, the update hook +** is not invoked when conflicting rows are deleted because of an +** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook +** invoked when rows are deleted using the [truncate optimization]. +** The exceptions defined in this paragraph might change in a future +** release of SQLite. +** +** The update hook implementation must not do anything that will modify +** the database connection that invoked the update hook. Any actions +** to modify the database connection must be deferred until after the +** completion of the [sqlite3_step()] call that triggered the update hook. +** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their +** database connections for the meaning of "modify" in this paragraph. +** +** ^The sqlite3_update_hook(D,C,P) function +** returns the P argument from the previous call +** on the same [database connection] D, or NULL for +** the first call on D. +** +** See also the [sqlite3_commit_hook()], [sqlite3_rollback_hook()], +** and [sqlite3_preupdate_hook()] interfaces. +*/ +SQLITE_API void *sqlite3_update_hook( + sqlite3*, + void(*)(void *,int ,char const *,char const *,sqlite3_int64), + void* +); + +/* +** CAPI3REF: Enable Or Disable Shared Pager Cache +** +** ^(This routine enables or disables the sharing of the database cache +** and schema data structures between [database connection | connections] +** to the same database. Sharing is enabled if the argument is true +** and disabled if the argument is false.)^ +** +** This interface is omitted if SQLite is compiled with +** [-DSQLITE_OMIT_SHARED_CACHE]. The [-DSQLITE_OMIT_SHARED_CACHE] +** compile-time option is recommended because the +** [use of shared cache mode is discouraged]. +** +** ^Cache sharing is enabled and disabled for an entire process. +** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]). +** In prior versions of SQLite, +** sharing was enabled or disabled for each thread separately. +** +** ^(The cache sharing mode set by this interface effects all subsequent +** calls to [sqlite3_open()], [sqlite3_open_v2()], and [sqlite3_open16()]. +** Existing database connections continue to use the sharing mode +** that was in effect at the time they were opened.)^ +** +** ^(This routine returns [SQLITE_OK] if shared cache was enabled or disabled +** successfully. An [error code] is returned otherwise.)^ +** +** ^Shared cache is disabled by default. It is recommended that it stay +** that way. In other words, do not use this routine. This interface +** continues to be provided for historical compatibility, but its use is +** discouraged. Any use of shared cache is discouraged. If shared cache +** must be used, it is recommended that shared cache only be enabled for +** individual database connections using the [sqlite3_open_v2()] interface +** with the [SQLITE_OPEN_SHAREDCACHE] flag. +** +** Note: This method is disabled on MacOS X 10.7 and iOS version 5.0 +** and will always return SQLITE_MISUSE. On those systems, +** shared cache mode should be enabled per-database connection via +** [sqlite3_open_v2()] with [SQLITE_OPEN_SHAREDCACHE]. +** +** This interface is threadsafe on processors where writing a +** 32-bit integer is atomic. +** +** See Also: [SQLite Shared-Cache Mode] +*/ +SQLITE_API int sqlite3_enable_shared_cache(int); + +/* +** CAPI3REF: Attempt To Free Heap Memory +** +** ^The sqlite3_release_memory() interface attempts to free N bytes +** of heap memory by deallocating non-essential memory allocations +** held by the database library. Memory used to cache database +** pages to improve performance is an example of non-essential memory. +** ^sqlite3_release_memory() returns the number of bytes actually freed, +** which might be more or less than the amount requested. +** ^The sqlite3_release_memory() routine is a no-op returning zero +** if SQLite is not compiled with [SQLITE_ENABLE_MEMORY_MANAGEMENT]. +** +** See also: [sqlite3_db_release_memory()] +*/ +SQLITE_API int sqlite3_release_memory(int); + +/* +** CAPI3REF: Free Memory Used By A Database Connection +** METHOD: sqlite3 +** +** ^The sqlite3_db_release_memory(D) interface attempts to free as much heap +** memory as possible from database connection D. Unlike the +** [sqlite3_release_memory()] interface, this interface is in effect even +** when the [SQLITE_ENABLE_MEMORY_MANAGEMENT] compile-time option is +** omitted. +** +** See also: [sqlite3_release_memory()] +*/ +SQLITE_API int sqlite3_db_release_memory(sqlite3*); + +/* +** CAPI3REF: Impose A Limit On Heap Size +** +** These interfaces impose limits on the amount of heap memory that will be +** by all database connections within a single process. +** +** ^The sqlite3_soft_heap_limit64() interface sets and/or queries the +** soft limit on the amount of heap memory that may be allocated by SQLite. +** ^SQLite strives to keep heap memory utilization below the soft heap +** limit by reducing the number of pages held in the page cache +** as heap memory usages approaches the limit. +** ^The soft heap limit is "soft" because even though SQLite strives to stay +** below the limit, it will exceed the limit rather than generate +** an [SQLITE_NOMEM] error. In other words, the soft heap limit +** is advisory only. +** +** ^The sqlite3_hard_heap_limit64(N) interface sets a hard upper bound of +** N bytes on the amount of memory that will be allocated. ^The +** sqlite3_hard_heap_limit64(N) interface is similar to +** sqlite3_soft_heap_limit64(N) except that memory allocations will fail +** when the hard heap limit is reached. +** +** ^The return value from both sqlite3_soft_heap_limit64() and +** sqlite3_hard_heap_limit64() is the size of +** the heap limit prior to the call, or negative in the case of an +** error. ^If the argument N is negative +** then no change is made to the heap limit. Hence, the current +** size of heap limits can be determined by invoking +** sqlite3_soft_heap_limit64(-1) or sqlite3_hard_heap_limit(-1). +** +** ^Setting the heap limits to zero disables the heap limiter mechanism. +** +** ^The soft heap limit may not be greater than the hard heap limit. +** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N) +** is invoked with a value of N that is greater than the hard heap limit, +** the soft heap limit is set to the value of the hard heap limit. +** ^The soft heap limit is automatically enabled whenever the hard heap +** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and +** the soft heap limit is outside the range of 1..N, then the soft heap +** limit is set to N. ^Invoking sqlite3_soft_heap_limit64(0) when the +** hard heap limit is enabled makes the soft heap limit equal to the +** hard heap limit. +** +** The memory allocation limits can also be adjusted using +** [PRAGMA soft_heap_limit] and [PRAGMA hard_heap_limit]. +** +** ^(The heap limits are not enforced in the current implementation +** if one or more of following conditions are true: +** +**
    +**
  • The limit value is set to zero. +**
  • Memory accounting is disabled using a combination of the +** [sqlite3_config]([SQLITE_CONFIG_MEMSTATUS],...) start-time option and +** the [SQLITE_DEFAULT_MEMSTATUS] compile-time option. +**
  • An alternative page cache implementation is specified using +** [sqlite3_config]([SQLITE_CONFIG_PCACHE2],...). +**
  • The page cache allocates from its own memory pool supplied +** by [sqlite3_config]([SQLITE_CONFIG_PAGECACHE],...) rather than +** from the heap. +**
)^ +** +** The circumstances under which SQLite will enforce the heap limits may +** changes in future releases of SQLite. +*/ +SQLITE_API sqlite3_int64 sqlite3_soft_heap_limit64(sqlite3_int64 N); +SQLITE_API sqlite3_int64 sqlite3_hard_heap_limit64(sqlite3_int64 N); + +/* +** CAPI3REF: Deprecated Soft Heap Limit Interface +** DEPRECATED +** +** This is a deprecated version of the [sqlite3_soft_heap_limit64()] +** interface. This routine is provided for historical compatibility +** only. All new applications should use the +** [sqlite3_soft_heap_limit64()] interface rather than this one. +*/ +SQLITE_API SQLITE_DEPRECATED void sqlite3_soft_heap_limit(int N); + + +/* +** CAPI3REF: Extract Metadata About A Column Of A Table +** METHOD: sqlite3 +** +** ^(The sqlite3_table_column_metadata(X,D,T,C,....) routine returns +** information about column C of table T in database D +** on [database connection] X.)^ ^The sqlite3_table_column_metadata() +** interface returns SQLITE_OK and fills in the non-NULL pointers in +** the final five arguments with appropriate values if the specified +** column exists. ^The sqlite3_table_column_metadata() interface returns +** SQLITE_ERROR if the specified column does not exist. +** ^If the column-name parameter to sqlite3_table_column_metadata() is a +** NULL pointer, then this routine simply checks for the existence of the +** table and returns SQLITE_OK if the table exists and SQLITE_ERROR if it +** does not. If the table name parameter T in a call to +** sqlite3_table_column_metadata(X,D,T,C,...) is NULL then the result is +** undefined behavior. +** +** ^The column is identified by the second, third and fourth parameters to +** this function. ^(The second parameter is either the name of the database +** (i.e. "main", "temp", or an attached database) containing the specified +** table or NULL.)^ ^If it is NULL, then all attached databases are searched +** for the table using the same algorithm used by the database engine to +** resolve unqualified table references. +** +** ^The third and fourth parameters to this function are the table and column +** name of the desired column, respectively. +** +** ^Metadata is returned by writing to the memory locations passed as the 5th +** and subsequent parameters to this function. ^Any of these arguments may be +** NULL, in which case the corresponding element of metadata is omitted. +** +** ^(
+** +**
Parameter Output
Type
Description +** +**
5th const char* Data type +**
6th const char* Name of default collation sequence +**
7th int True if column has a NOT NULL constraint +**
8th int True if column is part of the PRIMARY KEY +**
9th int True if column is [AUTOINCREMENT] +**
+**
)^ +** +** ^The memory pointed to by the character pointers returned for the +** declaration type and collation sequence is valid until the next +** call to any SQLite API function. +** +** ^If the specified table is actually a view, an [error code] is returned. +** +** ^If the specified column is "rowid", "oid" or "_rowid_" and the table +** is not a [WITHOUT ROWID] table and an +** [INTEGER PRIMARY KEY] column has been explicitly declared, then the output +** parameters are set for the explicitly declared column. ^(If there is no +** [INTEGER PRIMARY KEY] column, then the outputs +** for the [rowid] are set as follows: +** +**
+**     data type: "INTEGER"
+**     collation sequence: "BINARY"
+**     not null: 0
+**     primary key: 1
+**     auto increment: 0
+** 
)^ +** +** ^This function causes all database schemas to be read from disk and +** parsed, if that has not already been done, and returns an error if +** any errors are encountered while loading the schema. +*/ +SQLITE_API int sqlite3_table_column_metadata( + sqlite3 *db, /* Connection handle */ + const char *zDbName, /* Database name or NULL */ + const char *zTableName, /* Table name */ + const char *zColumnName, /* Column name */ + char const **pzDataType, /* OUTPUT: Declared data type */ + char const **pzCollSeq, /* OUTPUT: Collation sequence name */ + int *pNotNull, /* OUTPUT: True if NOT NULL constraint exists */ + int *pPrimaryKey, /* OUTPUT: True if column part of PK */ + int *pAutoinc /* OUTPUT: True if column is auto-increment */ +); + +/* +** CAPI3REF: Load An Extension +** METHOD: sqlite3 +** +** ^This interface loads an SQLite extension library from the named file. +** +** ^The sqlite3_load_extension() interface attempts to load an +** [SQLite extension] library contained in the file zFile. If +** the file cannot be loaded directly, attempts are made to load +** with various operating-system specific extensions added. +** So for example, if "samplelib" cannot be loaded, then names like +** "samplelib.so" or "samplelib.dylib" or "samplelib.dll" might +** be tried also. +** +** ^The entry point is zProc. +** ^(zProc may be 0, in which case SQLite will try to come up with an +** entry point name on its own. It first tries "sqlite3_extension_init". +** If that does not work, it constructs a name "sqlite3_X_init" where the +** X is consists of the lower-case equivalent of all ASCII alphabetic +** characters in the filename from the last "/" to the first following +** "." and omitting any initial "lib".)^ +** ^The sqlite3_load_extension() interface returns +** [SQLITE_OK] on success and [SQLITE_ERROR] if something goes wrong. +** ^If an error occurs and pzErrMsg is not 0, then the +** [sqlite3_load_extension()] interface shall attempt to +** fill *pzErrMsg with error message text stored in memory +** obtained from [sqlite3_malloc()]. The calling function +** should free this memory by calling [sqlite3_free()]. +** +** ^Extension loading must be enabled using +** [sqlite3_enable_load_extension()] or +** [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],1,NULL) +** prior to calling this API, +** otherwise an error will be returned. +** +** Security warning: It is recommended that the +** [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method be used to enable only this +** interface. The use of the [sqlite3_enable_load_extension()] interface +** should be avoided. This will keep the SQL function [load_extension()] +** disabled and prevent SQL injections from giving attackers +** access to extension loading capabilities. +** +** See also the [load_extension() SQL function]. +*/ +SQLITE_API int sqlite3_load_extension( + sqlite3 *db, /* Load the extension into this database connection */ + const char *zFile, /* Name of the shared library containing extension */ + const char *zProc, /* Entry point. Derived from zFile if 0 */ + char **pzErrMsg /* Put error message here if not 0 */ +); + +/* +** CAPI3REF: Enable Or Disable Extension Loading +** METHOD: sqlite3 +** +** ^So as not to open security holes in older applications that are +** unprepared to deal with [extension loading], and as a means of disabling +** [extension loading] while evaluating user-entered SQL, the following API +** is provided to turn the [sqlite3_load_extension()] mechanism on and off. +** +** ^Extension loading is off by default. +** ^Call the sqlite3_enable_load_extension() routine with onoff==1 +** to turn extension loading on and call it with onoff==0 to turn +** it back off again. +** +** ^This interface enables or disables both the C-API +** [sqlite3_load_extension()] and the SQL function [load_extension()]. +** ^(Use [sqlite3_db_config](db,[SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION],..) +** to enable or disable only the C-API.)^ +** +** Security warning: It is recommended that extension loading +** be enabled using the [SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION] method +** rather than this interface, so the [load_extension()] SQL function +** remains disabled. This will prevent SQL injections from giving attackers +** access to extension loading capabilities. +*/ +SQLITE_API int sqlite3_enable_load_extension(sqlite3 *db, int onoff); + +/* +** CAPI3REF: Automatically Load Statically Linked Extensions +** +** ^This interface causes the xEntryPoint() function to be invoked for +** each new [database connection] that is created. The idea here is that +** xEntryPoint() is the entry point for a statically linked [SQLite extension] +** that is to be automatically loaded into all new database connections. +** +** ^(Even though the function prototype shows that xEntryPoint() takes +** no arguments and returns void, SQLite invokes xEntryPoint() with three +** arguments and expects an integer result as if the signature of the +** entry point where as follows: +** +**
+**    int xEntryPoint(
+**      sqlite3 *db,
+**      const char **pzErrMsg,
+**      const struct sqlite3_api_routines *pThunk
+**    );
+** 
)^ +** +** If the xEntryPoint routine encounters an error, it should make *pzErrMsg +** point to an appropriate error message (obtained from [sqlite3_mprintf()]) +** and return an appropriate [error code]. ^SQLite ensures that *pzErrMsg +** is NULL before calling the xEntryPoint(). ^SQLite will invoke +** [sqlite3_free()] on *pzErrMsg after xEntryPoint() returns. ^If any +** xEntryPoint() returns an error, the [sqlite3_open()], [sqlite3_open16()], +** or [sqlite3_open_v2()] call that provoked the xEntryPoint() will fail. +** +** ^Calling sqlite3_auto_extension(X) with an entry point X that is already +** on the list of automatic extensions is a harmless no-op. ^No entry point +** will be called more than once for each database connection that is opened. +** +** See also: [sqlite3_reset_auto_extension()] +** and [sqlite3_cancel_auto_extension()] +*/ +SQLITE_API int sqlite3_auto_extension(void(*xEntryPoint)(void)); + +/* +** CAPI3REF: Cancel Automatic Extension Loading +** +** ^The [sqlite3_cancel_auto_extension(X)] interface unregisters the +** initialization routine X that was registered using a prior call to +** [sqlite3_auto_extension(X)]. ^The [sqlite3_cancel_auto_extension(X)] +** routine returns 1 if initialization routine X was successfully +** unregistered and it returns 0 if X was not on the list of initialization +** routines. +*/ +SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void)); + +/* +** CAPI3REF: Reset Automatic Extension Loading +** +** ^This interface disables all automatic extensions previously +** registered using [sqlite3_auto_extension()]. +*/ +SQLITE_API void sqlite3_reset_auto_extension(void); + +/* +** Structures used by the virtual table interface +*/ +typedef struct sqlite3_vtab sqlite3_vtab; +typedef struct sqlite3_index_info sqlite3_index_info; +typedef struct sqlite3_vtab_cursor sqlite3_vtab_cursor; +typedef struct sqlite3_module sqlite3_module; + +/* +** CAPI3REF: Virtual Table Object +** KEYWORDS: sqlite3_module {virtual table module} +** +** This structure, sometimes called a "virtual table module", +** defines the implementation of a [virtual table]. +** This structure consists mostly of methods for the module. +** +** ^A virtual table module is created by filling in a persistent +** instance of this structure and passing a pointer to that instance +** to [sqlite3_create_module()] or [sqlite3_create_module_v2()]. +** ^The registration remains valid until it is replaced by a different +** module or until the [database connection] closes. The content +** of this structure must not change while it is registered with +** any database connection. +*/ +struct sqlite3_module { + int iVersion; + int (*xCreate)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xConnect)(sqlite3*, void *pAux, + int argc, const char *const*argv, + sqlite3_vtab **ppVTab, char**); + int (*xBestIndex)(sqlite3_vtab *pVTab, sqlite3_index_info*); + int (*xDisconnect)(sqlite3_vtab *pVTab); + int (*xDestroy)(sqlite3_vtab *pVTab); + int (*xOpen)(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor); + int (*xClose)(sqlite3_vtab_cursor*); + int (*xFilter)(sqlite3_vtab_cursor*, int idxNum, const char *idxStr, + int argc, sqlite3_value **argv); + int (*xNext)(sqlite3_vtab_cursor*); + int (*xEof)(sqlite3_vtab_cursor*); + int (*xColumn)(sqlite3_vtab_cursor*, sqlite3_context*, int); + int (*xRowid)(sqlite3_vtab_cursor*, sqlite3_int64 *pRowid); + int (*xUpdate)(sqlite3_vtab *, int, sqlite3_value **, sqlite3_int64 *); + int (*xBegin)(sqlite3_vtab *pVTab); + int (*xSync)(sqlite3_vtab *pVTab); + int (*xCommit)(sqlite3_vtab *pVTab); + int (*xRollback)(sqlite3_vtab *pVTab); + int (*xFindFunction)(sqlite3_vtab *pVtab, int nArg, const char *zName, + void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), + void **ppArg); + int (*xRename)(sqlite3_vtab *pVtab, const char *zNew); + /* The methods above are in version 1 of the sqlite_module object. Those + ** below are for version 2 and greater. */ + int (*xSavepoint)(sqlite3_vtab *pVTab, int); + int (*xRelease)(sqlite3_vtab *pVTab, int); + int (*xRollbackTo)(sqlite3_vtab *pVTab, int); + /* The methods above are in versions 1 and 2 of the sqlite_module object. + ** Those below are for version 3 and greater. */ + int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); +}; + +/* +** CAPI3REF: Virtual Table Indexing Information +** KEYWORDS: sqlite3_index_info +** +** The sqlite3_index_info structure and its substructures is used as part +** of the [virtual table] interface to +** pass information into and receive the reply from the [xBestIndex] +** method of a [virtual table module]. The fields under **Inputs** are the +** inputs to xBestIndex and are read-only. xBestIndex inserts its +** results into the **Outputs** fields. +** +** ^(The aConstraint[] array records WHERE clause constraints of the form: +** +**
column OP expr
+** +** where OP is =, <, <=, >, or >=.)^ ^(The particular operator is +** stored in aConstraint[].op using one of the +** [SQLITE_INDEX_CONSTRAINT_EQ | SQLITE_INDEX_CONSTRAINT_ values].)^ +** ^(The index of the column is stored in +** aConstraint[].iColumn.)^ ^(aConstraint[].usable is TRUE if the +** expr on the right-hand side can be evaluated (and thus the constraint +** is usable) and false if it cannot.)^ +** +** ^The optimizer automatically inverts terms of the form "expr OP column" +** and makes other simplifications to the WHERE clause in an attempt to +** get as many WHERE clause terms into the form shown above as possible. +** ^The aConstraint[] array only reports WHERE clause terms that are +** relevant to the particular virtual table being queried. +** +** ^Information about the ORDER BY clause is stored in aOrderBy[]. +** ^Each term of aOrderBy records a column of the ORDER BY clause. +** +** The colUsed field indicates which columns of the virtual table may be +** required by the current scan. Virtual table columns are numbered from +** zero in the order in which they appear within the CREATE TABLE statement +** passed to sqlite3_declare_vtab(). For the first 63 columns (columns 0-62), +** the corresponding bit is set within the colUsed mask if the column may be +** required by SQLite. If the table has at least 64 columns and any column +** to the right of the first 63 is required, then bit 63 of colUsed is also +** set. In other words, column iCol may be required if the expression +** (colUsed & ((sqlite3_uint64)1 << (iCol>=63 ? 63 : iCol))) evaluates to +** non-zero. +** +** The [xBestIndex] method must fill aConstraintUsage[] with information +** about what parameters to pass to xFilter. ^If argvIndex>0 then +** the right-hand side of the corresponding aConstraint[] is evaluated +** and becomes the argvIndex-th entry in argv. ^(If aConstraintUsage[].omit +** is true, then the constraint is assumed to be fully handled by the +** virtual table and might not be checked again by the byte code.)^ ^(The +** aConstraintUsage[].omit flag is an optimization hint. When the omit flag +** is left in its default setting of false, the constraint will always be +** checked separately in byte code. If the omit flag is change to true, then +** the constraint may or may not be checked in byte code. In other words, +** when the omit flag is true there is no guarantee that the constraint will +** not be checked again using byte code.)^ +** +** ^The idxNum and idxStr values are recorded and passed into the +** [xFilter] method. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. +** +** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in +** the correct order to satisfy the ORDER BY clause so that no separate +** sorting step is required. +** +** ^The estimatedCost value is an estimate of the cost of a particular +** strategy. A cost of N indicates that the cost of the strategy is similar +** to a linear scan of an SQLite table with N rows. A cost of log(N) +** indicates that the expense of the operation is similar to that of a +** binary search on a unique indexed field of an SQLite table with N rows. +** +** ^The estimatedRows value is an estimate of the number of rows that +** will be returned by the strategy. +** +** The xBestIndex method may optionally populate the idxFlags field with a +** mask of SQLITE_INDEX_SCAN_* flags. Currently there is only one such flag - +** SQLITE_INDEX_SCAN_UNIQUE. If the xBestIndex method sets this flag, SQLite +** assumes that the strategy may visit at most one row. +** +** Additionally, if xBestIndex sets the SQLITE_INDEX_SCAN_UNIQUE flag, then +** SQLite also assumes that if a call to the xUpdate() method is made as +** part of the same statement to delete or update a virtual table row and the +** implementation returns SQLITE_CONSTRAINT, then there is no need to rollback +** any database changes. In other words, if the xUpdate() returns +** SQLITE_CONSTRAINT, the database contents must be exactly as they were +** before xUpdate was called. By contrast, if SQLITE_INDEX_SCAN_UNIQUE is not +** set and xUpdate returns SQLITE_CONSTRAINT, any database changes made by +** the xUpdate method are automatically rolled back by SQLite. +** +** IMPORTANT: The estimatedRows field was added to the sqlite3_index_info +** structure for SQLite [version 3.8.2] ([dateof:3.8.2]). +** If a virtual table extension is +** used with an SQLite version earlier than 3.8.2, the results of attempting +** to read or write the estimatedRows field are undefined (but are likely +** to include crashing the application). The estimatedRows field should +** therefore only be used if [sqlite3_libversion_number()] returns a +** value greater than or equal to 3008002. Similarly, the idxFlags field +** was added for [version 3.9.0] ([dateof:3.9.0]). +** It may therefore only be used if +** sqlite3_libversion_number() returns a value greater than or equal to +** 3009000. +*/ +struct sqlite3_index_info { + /* Inputs */ + int nConstraint; /* Number of entries in aConstraint */ + struct sqlite3_index_constraint { + int iColumn; /* Column constrained. -1 for ROWID */ + unsigned char op; /* Constraint operator */ + unsigned char usable; /* True if this constraint is usable */ + int iTermOffset; /* Used internally - xBestIndex should ignore */ + } *aConstraint; /* Table of WHERE clause constraints */ + int nOrderBy; /* Number of terms in the ORDER BY clause */ + struct sqlite3_index_orderby { + int iColumn; /* Column number */ + unsigned char desc; /* True for DESC. False for ASC. */ + } *aOrderBy; /* The ORDER BY clause */ + /* Outputs */ + struct sqlite3_index_constraint_usage { + int argvIndex; /* if >0, constraint is part of argv to xFilter */ + unsigned char omit; /* Do not code a test for this constraint */ + } *aConstraintUsage; + int idxNum; /* Number used to identify the index */ + char *idxStr; /* String, possibly obtained from sqlite3_malloc */ + int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ + int orderByConsumed; /* True if output is already ordered */ + double estimatedCost; /* Estimated cost of using this index */ + /* Fields below are only available in SQLite 3.8.2 and later */ + sqlite3_int64 estimatedRows; /* Estimated number of rows returned */ + /* Fields below are only available in SQLite 3.9.0 and later */ + int idxFlags; /* Mask of SQLITE_INDEX_SCAN_* flags */ + /* Fields below are only available in SQLite 3.10.0 and later */ + sqlite3_uint64 colUsed; /* Input: Mask of columns used by statement */ +}; + +/* +** CAPI3REF: Virtual Table Scan Flags +** +** Virtual table implementations are allowed to set the +** [sqlite3_index_info].idxFlags field to some combination of +** these bits. +*/ +#define SQLITE_INDEX_SCAN_UNIQUE 1 /* Scan visits at most 1 row */ + +/* +** CAPI3REF: Virtual Table Constraint Operator Codes +** +** These macros define the allowed values for the +** [sqlite3_index_info].aConstraint[].op field. Each value represents +** an operator that is part of a constraint term in the WHERE clause of +** a query that uses a [virtual table]. +** +** ^The left-hand operand of the operator is given by the corresponding +** aConstraint[].iColumn field. ^An iColumn of -1 indicates the left-hand +** operand is the rowid. +** The SQLITE_INDEX_CONSTRAINT_LIMIT and SQLITE_INDEX_CONSTRAINT_OFFSET +** operators have no left-hand operand, and so for those operators the +** corresponding aConstraint[].iColumn is meaningless and should not be +** used. +** +** All operator values from SQLITE_INDEX_CONSTRAINT_FUNCTION through +** value 255 are reserved to represent functions that are overloaded +** by the [xFindFunction|xFindFunction method] of the virtual table +** implementation. +** +** The right-hand operands for each constraint might be accessible using +** the [sqlite3_vtab_rhs_value()] interface. Usually the right-hand +** operand is only available if it appears as a single constant literal +** in the input SQL. If the right-hand operand is another column or an +** expression (even a constant expression) or a parameter, then the +** sqlite3_vtab_rhs_value() probably will not be able to extract it. +** ^The SQLITE_INDEX_CONSTRAINT_ISNULL and +** SQLITE_INDEX_CONSTRAINT_ISNOTNULL operators have no right-hand operand +** and hence calls to sqlite3_vtab_rhs_value() for those operators will +** always return SQLITE_NOTFOUND. +** +** The collating sequence to be used for comparison can be found using +** the [sqlite3_vtab_collation()] interface. For most real-world virtual +** tables, the collating sequence of constraints does not matter (for example +** because the constraints are numeric) and so the sqlite3_vtab_collation() +** interface is not commonly needed. +*/ +#define SQLITE_INDEX_CONSTRAINT_EQ 2 +#define SQLITE_INDEX_CONSTRAINT_GT 4 +#define SQLITE_INDEX_CONSTRAINT_LE 8 +#define SQLITE_INDEX_CONSTRAINT_LT 16 +#define SQLITE_INDEX_CONSTRAINT_GE 32 +#define SQLITE_INDEX_CONSTRAINT_MATCH 64 +#define SQLITE_INDEX_CONSTRAINT_LIKE 65 +#define SQLITE_INDEX_CONSTRAINT_GLOB 66 +#define SQLITE_INDEX_CONSTRAINT_REGEXP 67 +#define SQLITE_INDEX_CONSTRAINT_NE 68 +#define SQLITE_INDEX_CONSTRAINT_ISNOT 69 +#define SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70 +#define SQLITE_INDEX_CONSTRAINT_ISNULL 71 +#define SQLITE_INDEX_CONSTRAINT_IS 72 +#define SQLITE_INDEX_CONSTRAINT_LIMIT 73 +#define SQLITE_INDEX_CONSTRAINT_OFFSET 74 +#define SQLITE_INDEX_CONSTRAINT_FUNCTION 150 + +/* +** CAPI3REF: Register A Virtual Table Implementation +** METHOD: sqlite3 +** +** ^These routines are used to register a new [virtual table module] name. +** ^Module names must be registered before +** creating a new [virtual table] using the module and before using a +** preexisting [virtual table] for the module. +** +** ^The module name is registered on the [database connection] specified +** by the first parameter. ^The name of the module is given by the +** second parameter. ^The third parameter is a pointer to +** the implementation of the [virtual table module]. ^The fourth +** parameter is an arbitrary client data pointer that is passed through +** into the [xCreate] and [xConnect] methods of the virtual table module +** when a new virtual table is be being created or reinitialized. +** +** ^The sqlite3_create_module_v2() interface has a fifth parameter which +** is a pointer to a destructor for the pClientData. ^SQLite will +** invoke the destructor function (if it is not NULL) when SQLite +** no longer needs the pClientData pointer. ^The destructor will also +** be invoked if the call to sqlite3_create_module_v2() fails. +** ^The sqlite3_create_module() +** interface is equivalent to sqlite3_create_module_v2() with a NULL +** destructor. +** +** ^If the third parameter (the pointer to the sqlite3_module object) is +** NULL then no new module is created and any existing modules with the +** same name are dropped. +** +** See also: [sqlite3_drop_modules()] +*/ +SQLITE_API int sqlite3_create_module( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData /* Client data for xCreate/xConnect */ +); +SQLITE_API int sqlite3_create_module_v2( + sqlite3 *db, /* SQLite connection to register module with */ + const char *zName, /* Name of the module */ + const sqlite3_module *p, /* Methods for the module */ + void *pClientData, /* Client data for xCreate/xConnect */ + void(*xDestroy)(void*) /* Module destructor function */ +); + +/* +** CAPI3REF: Remove Unnecessary Virtual Table Implementations +** METHOD: sqlite3 +** +** ^The sqlite3_drop_modules(D,L) interface removes all virtual +** table modules from database connection D except those named on list L. +** The L parameter must be either NULL or a pointer to an array of pointers +** to strings where the array is terminated by a single NULL pointer. +** ^If the L parameter is NULL, then all virtual table modules are removed. +** +** See also: [sqlite3_create_module()] +*/ +SQLITE_API int sqlite3_drop_modules( + sqlite3 *db, /* Remove modules from this connection */ + const char **azKeep /* Except, do not remove the ones named here */ +); + +/* +** CAPI3REF: Virtual Table Instance Object +** KEYWORDS: sqlite3_vtab +** +** Every [virtual table module] implementation uses a subclass +** of this object to describe a particular instance +** of the [virtual table]. Each subclass will +** be tailored to the specific needs of the module implementation. +** The purpose of this superclass is to define certain fields that are +** common to all module implementations. +** +** ^Virtual tables methods can set an error message by assigning a +** string obtained from [sqlite3_mprintf()] to zErrMsg. The method should +** take care that any prior string is freed by a call to [sqlite3_free()] +** prior to assigning a new string to zErrMsg. ^After the error message +** is delivered up to the client application, the string will be automatically +** freed by sqlite3_free() and the zErrMsg field will be zeroed. +*/ +struct sqlite3_vtab { + const sqlite3_module *pModule; /* The module for this virtual table */ + int nRef; /* Number of open cursors */ + char *zErrMsg; /* Error message from sqlite3_mprintf() */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* +** CAPI3REF: Virtual Table Cursor Object +** KEYWORDS: sqlite3_vtab_cursor {virtual table cursor} +** +** Every [virtual table module] implementation uses a subclass of the +** following structure to describe cursors that point into the +** [virtual table] and are used +** to loop through the virtual table. Cursors are created using the +** [sqlite3_module.xOpen | xOpen] method of the module and are destroyed +** by the [sqlite3_module.xClose | xClose] method. Cursors are used +** by the [xFilter], [xNext], [xEof], [xColumn], and [xRowid] methods +** of the module. Each module implementation will define +** the content of a cursor structure to suit its own needs. +** +** This superclass exists in order to define fields of the cursor that +** are common to all implementations. +*/ +struct sqlite3_vtab_cursor { + sqlite3_vtab *pVtab; /* Virtual table of this cursor */ + /* Virtual table implementations will typically add additional fields */ +}; + +/* +** CAPI3REF: Declare The Schema Of A Virtual Table +** +** ^The [xCreate] and [xConnect] methods of a +** [virtual table module] call this interface +** to declare the format (the names and datatypes of the columns) of +** the virtual tables they implement. +*/ +SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); + +/* +** CAPI3REF: Overload A Function For A Virtual Table +** METHOD: sqlite3 +** +** ^(Virtual tables can provide alternative implementations of functions +** using the [xFindFunction] method of the [virtual table module]. +** But global versions of those functions +** must exist in order to be overloaded.)^ +** +** ^(This API makes sure a global version of a function with a particular +** name and number of parameters exists. If no such function exists +** before this API is called, a new function is created.)^ ^The implementation +** of the new function always causes an exception to be thrown. So +** the new function is not good for anything by itself. Its only +** purpose is to be a placeholder function that can be overloaded +** by a [virtual table]. +*/ +SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); + +/* +** CAPI3REF: A Handle To An Open BLOB +** KEYWORDS: {BLOB handle} {BLOB handles} +** +** An instance of this object represents an open BLOB on which +** [sqlite3_blob_open | incremental BLOB I/O] can be performed. +** ^Objects of this type are created by [sqlite3_blob_open()] +** and destroyed by [sqlite3_blob_close()]. +** ^The [sqlite3_blob_read()] and [sqlite3_blob_write()] interfaces +** can be used to read or write small subsections of the BLOB. +** ^The [sqlite3_blob_bytes()] interface returns the size of the BLOB in bytes. +*/ +typedef struct sqlite3_blob sqlite3_blob; + +/* +** CAPI3REF: Open A BLOB For Incremental I/O +** METHOD: sqlite3 +** CONSTRUCTOR: sqlite3_blob +** +** ^(This interfaces opens a [BLOB handle | handle] to the BLOB located +** in row iRow, column zColumn, table zTable in database zDb; +** in other words, the same BLOB that would be selected by: +** +**
+**     SELECT zColumn FROM zDb.zTable WHERE [rowid] = iRow;
+** 
)^ +** +** ^(Parameter zDb is not the filename that contains the database, but +** rather the symbolic name of the database. For attached databases, this is +** the name that appears after the AS keyword in the [ATTACH] statement. +** For the main database file, the database name is "main". For TEMP +** tables, the database name is "temp".)^ +** +** ^If the flags parameter is non-zero, then the BLOB is opened for read +** and write access. ^If the flags parameter is zero, the BLOB is opened for +** read-only access. +** +** ^(On success, [SQLITE_OK] is returned and the new [BLOB handle] is stored +** in *ppBlob. Otherwise an [error code] is returned and, unless the error +** code is SQLITE_MISUSE, *ppBlob is set to NULL.)^ ^This means that, provided +** the API is not misused, it is always safe to call [sqlite3_blob_close()] +** on *ppBlob after this function it returns. +** +** This function fails with SQLITE_ERROR if any of the following are true: +**
    +**
  • ^(Database zDb does not exist)^, +**
  • ^(Table zTable does not exist within database zDb)^, +**
  • ^(Table zTable is a WITHOUT ROWID table)^, +**
  • ^(Column zColumn does not exist)^, +**
  • ^(Row iRow is not present in the table)^, +**
  • ^(The specified column of row iRow contains a value that is not +** a TEXT or BLOB value)^, +**
  • ^(Column zColumn is part of an index, PRIMARY KEY or UNIQUE +** constraint and the blob is being opened for read/write access)^, +**
  • ^([foreign key constraints | Foreign key constraints] are enabled, +** column zColumn is part of a [child key] definition and the blob is +** being opened for read/write access)^. +**
+** +** ^Unless it returns SQLITE_MISUSE, this function sets the +** [database connection] error code and message accessible via +** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. +** +** A BLOB referenced by sqlite3_blob_open() may be read using the +** [sqlite3_blob_read()] interface and modified by using +** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a +** different row of the same table using the [sqlite3_blob_reopen()] +** interface. However, the column, table, or database of a [BLOB handle] +** cannot be changed after the [BLOB handle] is opened. +** +** ^(If the row that a BLOB handle points to is modified by an +** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects +** then the BLOB handle is marked as "expired". +** This is true if any column of the row is changed, even a column +** other than the one the BLOB handle is open on.)^ +** ^Calls to [sqlite3_blob_read()] and [sqlite3_blob_write()] for +** an expired BLOB handle fail with a return code of [SQLITE_ABORT]. +** ^(Changes written into a BLOB prior to the BLOB expiring are not +** rolled back by the expiration of the BLOB. Such changes will eventually +** commit if the transaction continues to completion.)^ +** +** ^Use the [sqlite3_blob_bytes()] interface to determine the size of +** the opened blob. ^The size of a blob may not be changed by this +** interface. Use the [UPDATE] SQL command to change the size of a +** blob. +** +** ^The [sqlite3_bind_zeroblob()] and [sqlite3_result_zeroblob()] interfaces +** and the built-in [zeroblob] SQL function may be used to create a +** zero-filled blob to read or write using the incremental-blob interface. +** +** To avoid a resource leak, every open [BLOB handle] should eventually +** be released by a call to [sqlite3_blob_close()]. +** +** See also: [sqlite3_blob_close()], +** [sqlite3_blob_reopen()], [sqlite3_blob_read()], +** [sqlite3_blob_bytes()], [sqlite3_blob_write()]. +*/ +SQLITE_API int sqlite3_blob_open( + sqlite3*, + const char *zDb, + const char *zTable, + const char *zColumn, + sqlite3_int64 iRow, + int flags, + sqlite3_blob **ppBlob +); + +/* +** CAPI3REF: Move a BLOB Handle to a New Row +** METHOD: sqlite3_blob +** +** ^This function is used to move an existing [BLOB handle] so that it points +** to a different row of the same database table. ^The new row is identified +** by the rowid value passed as the second argument. Only the row can be +** changed. ^The database, table and column on which the blob handle is open +** remain the same. Moving an existing [BLOB handle] to a new row is +** faster than closing the existing handle and opening a new one. +** +** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] - +** it must exist and there must be either a blob or text value stored in +** the nominated column.)^ ^If the new row is not present in the table, or if +** it does not contain a blob or text value, or if another error occurs, an +** SQLite error code is returned and the blob handle is considered aborted. +** ^All subsequent calls to [sqlite3_blob_read()], [sqlite3_blob_write()] or +** [sqlite3_blob_reopen()] on an aborted blob handle immediately return +** SQLITE_ABORT. ^Calling [sqlite3_blob_bytes()] on an aborted blob handle +** always returns zero. +** +** ^This function sets the database handle error code and message. +*/ +SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); + +/* +** CAPI3REF: Close A BLOB Handle +** DESTRUCTOR: sqlite3_blob +** +** ^This function closes an open [BLOB handle]. ^(The BLOB handle is closed +** unconditionally. Even if this routine returns an error code, the +** handle is still closed.)^ +** +** ^If the blob handle being closed was opened for read-write access, and if +** the database is in auto-commit mode and there are no other open read-write +** blob handles or active write statements, the current transaction is +** committed. ^If an error occurs while committing the transaction, an error +** code is returned and the transaction rolled back. +** +** Calling this function with an argument that is not a NULL pointer or an +** open blob handle results in undefined behavior. ^Calling this routine +** with a null pointer (such as would be returned by a failed call to +** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function +** is passed a valid open blob handle, the values returned by the +** sqlite3_errcode() and sqlite3_errmsg() functions are set before returning. +*/ +SQLITE_API int sqlite3_blob_close(sqlite3_blob *); + +/* +** CAPI3REF: Return The Size Of An Open BLOB +** METHOD: sqlite3_blob +** +** ^Returns the size in bytes of the BLOB accessible via the +** successfully opened [BLOB handle] in its only argument. ^The +** incremental blob I/O routines can only read or overwriting existing +** blob content; they cannot change the size of a blob. +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +*/ +SQLITE_API int sqlite3_blob_bytes(sqlite3_blob *); + +/* +** CAPI3REF: Read Data From A BLOB Incrementally +** METHOD: sqlite3_blob +** +** ^(This function is used to read data from an open [BLOB handle] into a +** caller-supplied buffer. N bytes of data are copied into buffer Z +** from the open BLOB, starting at offset iOffset.)^ +** +** ^If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is read. ^If N or iOffset is +** less than zero, [SQLITE_ERROR] is returned and no data is read. +** ^The size of the blob (and hence the maximum value of N+iOffset) +** can be determined using the [sqlite3_blob_bytes()] interface. +** +** ^An attempt to read from an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. +** +** ^(On success, sqlite3_blob_read() returns SQLITE_OK. +** Otherwise, an [error code] or an [extended error code] is returned.)^ +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_write()]. +*/ +SQLITE_API int sqlite3_blob_read(sqlite3_blob *, void *Z, int N, int iOffset); + +/* +** CAPI3REF: Write Data Into A BLOB Incrementally +** METHOD: sqlite3_blob +** +** ^(This function is used to write data into an open [BLOB handle] from a +** caller-supplied buffer. N bytes of data are copied from the buffer Z +** into the open BLOB, starting at offset iOffset.)^ +** +** ^(On success, sqlite3_blob_write() returns SQLITE_OK. +** Otherwise, an [error code] or an [extended error code] is returned.)^ +** ^Unless SQLITE_MISUSE is returned, this function sets the +** [database connection] error code and message accessible via +** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions. +** +** ^If the [BLOB handle] passed as the first argument was not opened for +** writing (the flags parameter to [sqlite3_blob_open()] was zero), +** this function returns [SQLITE_READONLY]. +** +** This function may only modify the contents of the BLOB; it is +** not possible to increase the size of a BLOB using this API. +** ^If offset iOffset is less than N bytes from the end of the BLOB, +** [SQLITE_ERROR] is returned and no data is written. The size of the +** BLOB (and hence the maximum value of N+iOffset) can be determined +** using the [sqlite3_blob_bytes()] interface. ^If N or iOffset are less +** than zero [SQLITE_ERROR] is returned and no data is written. +** +** ^An attempt to write to an expired [BLOB handle] fails with an +** error code of [SQLITE_ABORT]. ^Writes to the BLOB that occurred +** before the [BLOB handle] expired are not rolled back by the +** expiration of the handle, though of course those changes might +** have been overwritten by the statement that expired the BLOB handle +** or by other independent statements. +** +** This routine only works on a [BLOB handle] which has been created +** by a prior successful call to [sqlite3_blob_open()] and which has not +** been closed by [sqlite3_blob_close()]. Passing any other pointer in +** to this routine results in undefined and probably undesirable behavior. +** +** See also: [sqlite3_blob_read()]. +*/ +SQLITE_API int sqlite3_blob_write(sqlite3_blob *, const void *z, int n, int iOffset); + +/* +** CAPI3REF: Virtual File System Objects +** +** A virtual filesystem (VFS) is an [sqlite3_vfs] object +** that SQLite uses to interact +** with the underlying operating system. Most SQLite builds come with a +** single default VFS that is appropriate for the host computer. +** New VFSes can be registered and existing VFSes can be unregistered. +** The following interfaces are provided. +** +** ^The sqlite3_vfs_find() interface returns a pointer to a VFS given its name. +** ^Names are case sensitive. +** ^Names are zero-terminated UTF-8 strings. +** ^If there is no match, a NULL pointer is returned. +** ^If zVfsName is NULL then the default VFS is returned. +** +** ^New VFSes are registered with sqlite3_vfs_register(). +** ^Each new VFS becomes the default VFS if the makeDflt flag is set. +** ^The same VFS can be registered multiple times without injury. +** ^To make an existing VFS into the default VFS, register it again +** with the makeDflt flag set. If two different VFSes with the +** same name are registered, the behavior is undefined. If a +** VFS is registered with a name that is NULL or an empty string, +** then the behavior is undefined. +** +** ^Unregister a VFS with the sqlite3_vfs_unregister() interface. +** ^(If the default VFS is unregistered, another VFS is chosen as +** the default. The choice for the new VFS is arbitrary.)^ +*/ +SQLITE_API sqlite3_vfs *sqlite3_vfs_find(const char *zVfsName); +SQLITE_API int sqlite3_vfs_register(sqlite3_vfs*, int makeDflt); +SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); + +/* +** CAPI3REF: Mutexes +** +** The SQLite core uses these routines for thread +** synchronization. Though they are intended for internal +** use by SQLite, code that links against SQLite is +** permitted to use any of these routines. +** +** The SQLite source code contains multiple implementations +** of these mutex routines. An appropriate implementation +** is selected automatically at compile-time. The following +** implementations are available in the SQLite core: +** +**
    +**
  • SQLITE_MUTEX_PTHREADS +**
  • SQLITE_MUTEX_W32 +**
  • SQLITE_MUTEX_NOOP +**
+** +** The SQLITE_MUTEX_NOOP implementation is a set of routines +** that does no real locking and is appropriate for use in +** a single-threaded application. The SQLITE_MUTEX_PTHREADS and +** SQLITE_MUTEX_W32 implementations are appropriate for use on Unix +** and Windows. +** +** If SQLite is compiled with the SQLITE_MUTEX_APPDEF preprocessor +** macro defined (with "-DSQLITE_MUTEX_APPDEF=1"), then no mutex +** implementation is included with the library. In this case the +** application must supply a custom mutex implementation using the +** [SQLITE_CONFIG_MUTEX] option of the sqlite3_config() function +** before calling sqlite3_initialize() or any other public sqlite3_ +** function that calls sqlite3_initialize(). +** +** ^The sqlite3_mutex_alloc() routine allocates a new +** mutex and returns a pointer to it. ^The sqlite3_mutex_alloc() +** routine returns NULL if it is unable to allocate the requested +** mutex. The argument to sqlite3_mutex_alloc() must one of these +** integer constants: +** +**
    +**
  • SQLITE_MUTEX_FAST +**
  • SQLITE_MUTEX_RECURSIVE +**
  • SQLITE_MUTEX_STATIC_MAIN +**
  • SQLITE_MUTEX_STATIC_MEM +**
  • SQLITE_MUTEX_STATIC_OPEN +**
  • SQLITE_MUTEX_STATIC_PRNG +**
  • SQLITE_MUTEX_STATIC_LRU +**
  • SQLITE_MUTEX_STATIC_PMEM +**
  • SQLITE_MUTEX_STATIC_APP1 +**
  • SQLITE_MUTEX_STATIC_APP2 +**
  • SQLITE_MUTEX_STATIC_APP3 +**
  • SQLITE_MUTEX_STATIC_VFS1 +**
  • SQLITE_MUTEX_STATIC_VFS2 +**
  • SQLITE_MUTEX_STATIC_VFS3 +**
+** +** ^The first two constants (SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) +** cause sqlite3_mutex_alloc() to create +** a new mutex. ^The new mutex is recursive when SQLITE_MUTEX_RECURSIVE +** is used but not necessarily so when SQLITE_MUTEX_FAST is used. +** The mutex implementation does not need to make a distinction +** between SQLITE_MUTEX_RECURSIVE and SQLITE_MUTEX_FAST if it does +** not want to. SQLite will only request a recursive mutex in +** cases where it really needs one. If a faster non-recursive mutex +** implementation is available on the host platform, the mutex subsystem +** might return such a mutex in response to SQLITE_MUTEX_FAST. +** +** ^The other allowed parameters to sqlite3_mutex_alloc() (anything other +** than SQLITE_MUTEX_FAST and SQLITE_MUTEX_RECURSIVE) each return +** a pointer to a static preexisting mutex. ^Nine static mutexes are +** used by the current version of SQLite. Future versions of SQLite +** may add additional static mutexes. Static mutexes are for internal +** use by SQLite only. Applications that use SQLite mutexes should +** use only the dynamic mutexes returned by SQLITE_MUTEX_FAST or +** SQLITE_MUTEX_RECURSIVE. +** +** ^Note that if one of the dynamic mutex parameters (SQLITE_MUTEX_FAST +** or SQLITE_MUTEX_RECURSIVE) is used then sqlite3_mutex_alloc() +** returns a different mutex on every call. ^For the static +** mutex types, the same mutex is returned on every call that has +** the same type number. +** +** ^The sqlite3_mutex_free() routine deallocates a previously +** allocated dynamic mutex. Attempting to deallocate a static +** mutex results in undefined behavior. +** +** ^The sqlite3_mutex_enter() and sqlite3_mutex_try() routines attempt +** to enter a mutex. ^If another thread is already within the mutex, +** sqlite3_mutex_enter() will block and sqlite3_mutex_try() will return +** SQLITE_BUSY. ^The sqlite3_mutex_try() interface returns [SQLITE_OK] +** upon successful entry. ^(Mutexes created using +** SQLITE_MUTEX_RECURSIVE can be entered multiple times by the same thread. +** In such cases, the +** mutex must be exited an equal number of times before another thread +** can enter.)^ If the same thread tries to enter any mutex other +** than an SQLITE_MUTEX_RECURSIVE more than once, the behavior is undefined. +** +** ^(Some systems (for example, Windows 95) do not support the operation +** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ +** +** ^The sqlite3_mutex_leave() routine exits a mutex that was +** previously entered by the same thread. The behavior +** is undefined if the mutex is not currently entered by the +** calling thread or is not currently allocated. +** +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. +** +** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. +*/ +SQLITE_API sqlite3_mutex *sqlite3_mutex_alloc(int); +SQLITE_API void sqlite3_mutex_free(sqlite3_mutex*); +SQLITE_API void sqlite3_mutex_enter(sqlite3_mutex*); +SQLITE_API int sqlite3_mutex_try(sqlite3_mutex*); +SQLITE_API void sqlite3_mutex_leave(sqlite3_mutex*); + +/* +** CAPI3REF: Mutex Methods Object +** +** An instance of this structure defines the low-level routines +** used to allocate and use mutexes. +** +** Usually, the default mutex implementations provided by SQLite are +** sufficient, however the application has the option of substituting a custom +** implementation for specialized deployments or systems for which SQLite +** does not provide a suitable implementation. In this case, the application +** creates and populates an instance of this structure to pass +** to sqlite3_config() along with the [SQLITE_CONFIG_MUTEX] option. +** Additionally, an instance of this structure can be used as an +** output variable when querying the system for the current mutex +** implementation, using the [SQLITE_CONFIG_GETMUTEX] option. +** +** ^The xMutexInit method defined by this structure is invoked as +** part of system initialization by the sqlite3_initialize() function. +** ^The xMutexInit routine is called by SQLite exactly once for each +** effective call to [sqlite3_initialize()]. +** +** ^The xMutexEnd method defined by this structure is invoked as +** part of system shutdown by the sqlite3_shutdown() function. The +** implementation of this method is expected to release all outstanding +** resources obtained by the mutex methods implementation, especially +** those obtained by the xMutexInit method. ^The xMutexEnd() +** interface is invoked exactly once for each call to [sqlite3_shutdown()]. +** +** ^(The remaining seven methods defined by this structure (xMutexAlloc, +** xMutexFree, xMutexEnter, xMutexTry, xMutexLeave, xMutexHeld and +** xMutexNotheld) implement the following interfaces (respectively): +** +**
    +**
  • [sqlite3_mutex_alloc()]
  • +**
  • [sqlite3_mutex_free()]
  • +**
  • [sqlite3_mutex_enter()]
  • +**
  • [sqlite3_mutex_try()]
  • +**
  • [sqlite3_mutex_leave()]
  • +**
  • [sqlite3_mutex_held()]
  • +**
  • [sqlite3_mutex_notheld()]
  • +**
)^ +** +** The only difference is that the public sqlite3_XXX functions enumerated +** above silently ignore any invocations that pass a NULL pointer instead +** of a valid mutex handle. The implementations of the methods defined +** by this structure are not required to handle this case. The results +** of passing a NULL pointer instead of a valid mutex handle are undefined +** (i.e. it is acceptable to provide an implementation that segfaults if +** it is passed a NULL pointer). +** +** The xMutexInit() method must be threadsafe. It must be harmless to +** invoke xMutexInit() multiple times within the same process and without +** intervening calls to xMutexEnd(). Second and subsequent calls to +** xMutexInit() must be no-ops. +** +** xMutexInit() must not use SQLite memory allocation ([sqlite3_malloc()] +** and its associates). Similarly, xMutexAlloc() must not use SQLite memory +** allocation for a static mutex. ^However xMutexAlloc() may use SQLite +** memory allocation for a fast or recursive mutex. +** +** ^SQLite will invoke the xMutexEnd() method when [sqlite3_shutdown()] is +** called, but only if the prior call to xMutexInit returned SQLITE_OK. +** If xMutexInit fails in any way, it is expected to clean up after itself +** prior to returning. +*/ +typedef struct sqlite3_mutex_methods sqlite3_mutex_methods; +struct sqlite3_mutex_methods { + int (*xMutexInit)(void); + int (*xMutexEnd)(void); + sqlite3_mutex *(*xMutexAlloc)(int); + void (*xMutexFree)(sqlite3_mutex *); + void (*xMutexEnter)(sqlite3_mutex *); + int (*xMutexTry)(sqlite3_mutex *); + void (*xMutexLeave)(sqlite3_mutex *); + int (*xMutexHeld)(sqlite3_mutex *); + int (*xMutexNotheld)(sqlite3_mutex *); +}; + +/* +** CAPI3REF: Mutex Verification Routines +** +** The sqlite3_mutex_held() and sqlite3_mutex_notheld() routines +** are intended for use inside assert() statements. The SQLite core +** never uses these routines except inside an assert() and applications +** are advised to follow the lead of the core. The SQLite core only +** provides implementations for these routines when it is compiled +** with the SQLITE_DEBUG flag. External mutex implementations +** are only required to provide these routines if SQLITE_DEBUG is +** defined and if NDEBUG is not defined. +** +** These routines should return true if the mutex in their argument +** is held or not held, respectively, by the calling thread. +** +** The implementation is not required to provide versions of these +** routines that actually work. If the implementation does not provide working +** versions of these routines, it should at least provide stubs that always +** return true so that one does not get spurious assertion failures. +** +** If the argument to sqlite3_mutex_held() is a NULL pointer then +** the routine should return 1. This seems counter-intuitive since +** clearly the mutex cannot be held if it does not exist. But +** the reason the mutex does not exist is because the build is not +** using mutexes. And we do not want the assert() containing the +** call to sqlite3_mutex_held() to fail, so a non-zero return is +** the appropriate thing to do. The sqlite3_mutex_notheld() +** interface should also return 1 when given a NULL pointer. +*/ +#ifndef NDEBUG +SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); +SQLITE_API int sqlite3_mutex_notheld(sqlite3_mutex*); +#endif + +/* +** CAPI3REF: Mutex Types +** +** The [sqlite3_mutex_alloc()] interface takes a single argument +** which is one of these integer constants. +** +** The set of static mutexes may change from one SQLite release to the +** next. Applications that override the built-in mutex logic must be +** prepared to accommodate additional static mutexes. +*/ +#define SQLITE_MUTEX_FAST 0 +#define SQLITE_MUTEX_RECURSIVE 1 +#define SQLITE_MUTEX_STATIC_MAIN 2 +#define SQLITE_MUTEX_STATIC_MEM 3 /* sqlite3_malloc() */ +#define SQLITE_MUTEX_STATIC_MEM2 4 /* NOT USED */ +#define SQLITE_MUTEX_STATIC_OPEN 4 /* sqlite3BtreeOpen() */ +#define SQLITE_MUTEX_STATIC_PRNG 5 /* sqlite3_randomness() */ +#define SQLITE_MUTEX_STATIC_LRU 6 /* lru page list */ +#define SQLITE_MUTEX_STATIC_LRU2 7 /* NOT USED */ +#define SQLITE_MUTEX_STATIC_PMEM 7 /* sqlite3PageMalloc() */ +#define SQLITE_MUTEX_STATIC_APP1 8 /* For use by application */ +#define SQLITE_MUTEX_STATIC_APP2 9 /* For use by application */ +#define SQLITE_MUTEX_STATIC_APP3 10 /* For use by application */ +#define SQLITE_MUTEX_STATIC_VFS1 11 /* For use by built-in VFS */ +#define SQLITE_MUTEX_STATIC_VFS2 12 /* For use by extension VFS */ +#define SQLITE_MUTEX_STATIC_VFS3 13 /* For use by application VFS */ + +/* Legacy compatibility: */ +#define SQLITE_MUTEX_STATIC_MASTER 2 + + +/* +** CAPI3REF: Retrieve the mutex for a database connection +** METHOD: sqlite3 +** +** ^This interface returns a pointer the [sqlite3_mutex] object that +** serializes access to the [database connection] given in the argument +** when the [threading mode] is Serialized. +** ^If the [threading mode] is Single-thread or Multi-thread then this +** routine returns a NULL pointer. +*/ +SQLITE_API sqlite3_mutex *sqlite3_db_mutex(sqlite3*); + +/* +** CAPI3REF: Low-Level Control Of Database Files +** METHOD: sqlite3 +** KEYWORDS: {file control} +** +** ^The [sqlite3_file_control()] interface makes a direct call to the +** xFileControl method for the [sqlite3_io_methods] object associated +** with a particular database identified by the second argument. ^The +** name of the database is "main" for the main database or "temp" for the +** TEMP database, or the name that appears after the AS keyword for +** databases that are added using the [ATTACH] SQL command. +** ^A NULL pointer can be used in place of "main" to refer to the +** main database file. +** ^The third and fourth parameters to this routine +** are passed directly through to the second and third parameters of +** the xFileControl method. ^The return value of the xFileControl +** method becomes the return value of this routine. +** +** A few opcodes for [sqlite3_file_control()] are handled directly +** by the SQLite core and never invoke the +** sqlite3_io_methods.xFileControl method. +** ^The [SQLITE_FCNTL_FILE_POINTER] value for the op parameter causes +** a pointer to the underlying [sqlite3_file] object to be written into +** the space pointed to by the 4th parameter. The +** [SQLITE_FCNTL_JOURNAL_POINTER] works similarly except that it returns +** the [sqlite3_file] object associated with the journal file instead of +** the main database. The [SQLITE_FCNTL_VFS_POINTER] opcode returns +** a pointer to the underlying [sqlite3_vfs] object for the file. +** The [SQLITE_FCNTL_DATA_VERSION] returns the data version counter +** from the pager. +** +** ^If the second parameter (zDbName) does not match the name of any +** open database file, then SQLITE_ERROR is returned. ^This error +** code is not remembered and will not be recalled by [sqlite3_errcode()] +** or [sqlite3_errmsg()]. The underlying xFileControl method might +** also return SQLITE_ERROR. There is no way to distinguish between +** an incorrect zDbName and an SQLITE_ERROR return from the underlying +** xFileControl method. +** +** See also: [file control opcodes] +*/ +SQLITE_API int sqlite3_file_control(sqlite3*, const char *zDbName, int op, void*); + +/* +** CAPI3REF: Testing Interface +** +** ^The sqlite3_test_control() interface is used to read out internal +** state of SQLite and to inject faults into SQLite for testing +** purposes. ^The first parameter is an operation code that determines +** the number, meaning, and operation of all subsequent parameters. +** +** This interface is not for use by applications. It exists solely +** for verifying the correct operation of the SQLite library. Depending +** on how the SQLite library is compiled, this interface might not exist. +** +** The details of the operation codes, their meanings, the parameters +** they take, and what they do are all subject to change without notice. +** Unlike most of the SQLite API, this function is not guaranteed to +** operate consistently from one release to the next. +*/ +SQLITE_API int sqlite3_test_control(int op, ...); + +/* +** CAPI3REF: Testing Interface Operation Codes +** +** These constants are the valid operation code parameters used +** as the first argument to [sqlite3_test_control()]. +** +** These parameters and their meanings are subject to change +** without notice. These values are for testing purposes only. +** Applications should not use any of these parameters or the +** [sqlite3_test_control()] interface. +*/ +#define SQLITE_TESTCTRL_FIRST 5 +#define SQLITE_TESTCTRL_PRNG_SAVE 5 +#define SQLITE_TESTCTRL_PRNG_RESTORE 6 +#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 +#define SQLITE_TESTCTRL_BITVEC_TEST 8 +#define SQLITE_TESTCTRL_FAULT_INSTALL 9 +#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 +#define SQLITE_TESTCTRL_PENDING_BYTE 11 +#define SQLITE_TESTCTRL_ASSERT 12 +#define SQLITE_TESTCTRL_ALWAYS 13 +#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 +#define SQLITE_TESTCTRL_OPTIMIZATIONS 15 +#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ +#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ +#define SQLITE_TESTCTRL_INTERNAL_FUNCTIONS 17 +#define SQLITE_TESTCTRL_LOCALTIME_FAULT 18 +#define SQLITE_TESTCTRL_EXPLAIN_STMT 19 /* NOT USED */ +#define SQLITE_TESTCTRL_ONCE_RESET_THRESHOLD 19 +#define SQLITE_TESTCTRL_NEVER_CORRUPT 20 +#define SQLITE_TESTCTRL_VDBE_COVERAGE 21 +#define SQLITE_TESTCTRL_BYTEORDER 22 +#define SQLITE_TESTCTRL_ISINIT 23 +#define SQLITE_TESTCTRL_SORTER_MMAP 24 +#define SQLITE_TESTCTRL_IMPOSTER 25 +#define SQLITE_TESTCTRL_PARSER_COVERAGE 26 +#define SQLITE_TESTCTRL_RESULT_INTREAL 27 +#define SQLITE_TESTCTRL_PRNG_SEED 28 +#define SQLITE_TESTCTRL_EXTRA_SCHEMA_CHECKS 29 +#define SQLITE_TESTCTRL_SEEK_COUNT 30 +#define SQLITE_TESTCTRL_TRACEFLAGS 31 +#define SQLITE_TESTCTRL_TUNE 32 +#define SQLITE_TESTCTRL_LOGEST 33 +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ + +/* +** CAPI3REF: SQL Keyword Checking +** +** These routines provide access to the set of SQL language keywords +** recognized by SQLite. Applications can uses these routines to determine +** whether or not a specific identifier needs to be escaped (for example, +** by enclosing in double-quotes) so as not to confuse the parser. +** +** The sqlite3_keyword_count() interface returns the number of distinct +** keywords understood by SQLite. +** +** The sqlite3_keyword_name(N,Z,L) interface finds the N-th keyword and +** makes *Z point to that keyword expressed as UTF8 and writes the number +** of bytes in the keyword into *L. The string that *Z points to is not +** zero-terminated. The sqlite3_keyword_name(N,Z,L) routine returns +** SQLITE_OK if N is within bounds and SQLITE_ERROR if not. If either Z +** or L are NULL or invalid pointers then calls to +** sqlite3_keyword_name(N,Z,L) result in undefined behavior. +** +** The sqlite3_keyword_check(Z,L) interface checks to see whether or not +** the L-byte UTF8 identifier that Z points to is a keyword, returning non-zero +** if it is and zero if not. +** +** The parser used by SQLite is forgiving. It is often possible to use +** a keyword as an identifier as long as such use does not result in a +** parsing ambiguity. For example, the statement +** "CREATE TABLE BEGIN(REPLACE,PRAGMA,END);" is accepted by SQLite, and +** creates a new table named "BEGIN" with three columns named +** "REPLACE", "PRAGMA", and "END". Nevertheless, best practice is to avoid +** using keywords as identifiers. Common techniques used to avoid keyword +** name collisions include: +**
    +**
  • Put all identifier names inside double-quotes. This is the official +** SQL way to escape identifier names. +**
  • Put identifier names inside [...]. This is not standard SQL, +** but it is what SQL Server does and so lots of programmers use this +** technique. +**
  • Begin every identifier with the letter "Z" as no SQL keywords start +** with "Z". +**
  • Include a digit somewhere in every identifier name. +**
+** +** Note that the number of keywords understood by SQLite can depend on +** compile-time options. For example, "VACUUM" is not a keyword if +** SQLite is compiled with the [-DSQLITE_OMIT_VACUUM] option. Also, +** new keywords may be added to future releases of SQLite. +*/ +SQLITE_API int sqlite3_keyword_count(void); +SQLITE_API int sqlite3_keyword_name(int,const char**,int*); +SQLITE_API int sqlite3_keyword_check(const char*,int); + +/* +** CAPI3REF: Dynamic String Object +** KEYWORDS: {dynamic string} +** +** An instance of the sqlite3_str object contains a dynamically-sized +** string under construction. +** +** The lifecycle of an sqlite3_str object is as follows: +**
    +**
  1. ^The sqlite3_str object is created using [sqlite3_str_new()]. +**
  2. ^Text is appended to the sqlite3_str object using various +** methods, such as [sqlite3_str_appendf()]. +**
  3. ^The sqlite3_str object is destroyed and the string it created +** is returned using the [sqlite3_str_finish()] interface. +**
+*/ +typedef struct sqlite3_str sqlite3_str; + +/* +** CAPI3REF: Create A New Dynamic String Object +** CONSTRUCTOR: sqlite3_str +** +** ^The [sqlite3_str_new(D)] interface allocates and initializes +** a new [sqlite3_str] object. To avoid memory leaks, the object returned by +** [sqlite3_str_new()] must be freed by a subsequent call to +** [sqlite3_str_finish(X)]. +** +** ^The [sqlite3_str_new(D)] interface always returns a pointer to a +** valid [sqlite3_str] object, though in the event of an out-of-memory +** error the returned object might be a special singleton that will +** silently reject new text, always return SQLITE_NOMEM from +** [sqlite3_str_errcode()], always return 0 for +** [sqlite3_str_length()], and always return NULL from +** [sqlite3_str_finish(X)]. It is always safe to use the value +** returned by [sqlite3_str_new(D)] as the sqlite3_str parameter +** to any of the other [sqlite3_str] methods. +** +** The D parameter to [sqlite3_str_new(D)] may be NULL. If the +** D parameter in [sqlite3_str_new(D)] is not NULL, then the maximum +** length of the string contained in the [sqlite3_str] object will be +** the value set for [sqlite3_limit](D,[SQLITE_LIMIT_LENGTH]) instead +** of [SQLITE_MAX_LENGTH]. +*/ +SQLITE_API sqlite3_str *sqlite3_str_new(sqlite3*); + +/* +** CAPI3REF: Finalize A Dynamic String +** DESTRUCTOR: sqlite3_str +** +** ^The [sqlite3_str_finish(X)] interface destroys the sqlite3_str object X +** and returns a pointer to a memory buffer obtained from [sqlite3_malloc64()] +** that contains the constructed string. The calling application should +** pass the returned value to [sqlite3_free()] to avoid a memory leak. +** ^The [sqlite3_str_finish(X)] interface may return a NULL pointer if any +** errors were encountered during construction of the string. ^The +** [sqlite3_str_finish(X)] interface will also return a NULL pointer if the +** string in [sqlite3_str] object X is zero bytes long. +*/ +SQLITE_API char *sqlite3_str_finish(sqlite3_str*); + +/* +** CAPI3REF: Add Content To A Dynamic String +** METHOD: sqlite3_str +** +** These interfaces add content to an sqlite3_str object previously obtained +** from [sqlite3_str_new()]. +** +** ^The [sqlite3_str_appendf(X,F,...)] and +** [sqlite3_str_vappendf(X,F,V)] interfaces uses the [built-in printf] +** functionality of SQLite to append formatted text onto the end of +** [sqlite3_str] object X. +** +** ^The [sqlite3_str_append(X,S,N)] method appends exactly N bytes from string S +** onto the end of the [sqlite3_str] object X. N must be non-negative. +** S must contain at least N non-zero bytes of content. To append a +** zero-terminated string in its entirety, use the [sqlite3_str_appendall()] +** method instead. +** +** ^The [sqlite3_str_appendall(X,S)] method appends the complete content of +** zero-terminated string S onto the end of [sqlite3_str] object X. +** +** ^The [sqlite3_str_appendchar(X,N,C)] method appends N copies of the +** single-byte character C onto the end of [sqlite3_str] object X. +** ^This method can be used, for example, to add whitespace indentation. +** +** ^The [sqlite3_str_reset(X)] method resets the string under construction +** inside [sqlite3_str] object X back to zero bytes in length. +** +** These methods do not return a result code. ^If an error occurs, that fact +** is recorded in the [sqlite3_str] object and can be recovered by a +** subsequent call to [sqlite3_str_errcode(X)]. +*/ +SQLITE_API void sqlite3_str_appendf(sqlite3_str*, const char *zFormat, ...); +SQLITE_API void sqlite3_str_vappendf(sqlite3_str*, const char *zFormat, va_list); +SQLITE_API void sqlite3_str_append(sqlite3_str*, const char *zIn, int N); +SQLITE_API void sqlite3_str_appendall(sqlite3_str*, const char *zIn); +SQLITE_API void sqlite3_str_appendchar(sqlite3_str*, int N, char C); +SQLITE_API void sqlite3_str_reset(sqlite3_str*); + +/* +** CAPI3REF: Status Of A Dynamic String +** METHOD: sqlite3_str +** +** These interfaces return the current status of an [sqlite3_str] object. +** +** ^If any prior errors have occurred while constructing the dynamic string +** in sqlite3_str X, then the [sqlite3_str_errcode(X)] method will return +** an appropriate error code. ^The [sqlite3_str_errcode(X)] method returns +** [SQLITE_NOMEM] following any out-of-memory error, or +** [SQLITE_TOOBIG] if the size of the dynamic string exceeds +** [SQLITE_MAX_LENGTH], or [SQLITE_OK] if there have been no errors. +** +** ^The [sqlite3_str_length(X)] method returns the current length, in bytes, +** of the dynamic string under construction in [sqlite3_str] object X. +** ^The length returned by [sqlite3_str_length(X)] does not include the +** zero-termination byte. +** +** ^The [sqlite3_str_value(X)] method returns a pointer to the current +** content of the dynamic string under construction in X. The value +** returned by [sqlite3_str_value(X)] is managed by the sqlite3_str object X +** and might be freed or altered by any subsequent method on the same +** [sqlite3_str] object. Applications must not used the pointer returned +** [sqlite3_str_value(X)] after any subsequent method call on the same +** object. ^Applications may change the content of the string returned +** by [sqlite3_str_value(X)] as long as they do not write into any bytes +** outside the range of 0 to [sqlite3_str_length(X)] and do not read or +** write any byte after any subsequent sqlite3_str method call. +*/ +SQLITE_API int sqlite3_str_errcode(sqlite3_str*); +SQLITE_API int sqlite3_str_length(sqlite3_str*); +SQLITE_API char *sqlite3_str_value(sqlite3_str*); + +/* +** CAPI3REF: SQLite Runtime Status +** +** ^These interfaces are used to retrieve runtime status information +** about the performance of SQLite, and optionally to reset various +** highwater marks. ^The first argument is an integer code for +** the specific parameter to measure. ^(Recognized integer codes +** are of the form [status parameters | SQLITE_STATUS_...].)^ +** ^The current value of the parameter is returned into *pCurrent. +** ^The highest recorded value is returned in *pHighwater. ^If the +** resetFlag is true, then the highest record value is reset after +** *pHighwater is written. ^(Some parameters do not record the highest +** value. For those parameters +** nothing is written into *pHighwater and the resetFlag is ignored.)^ +** ^(Other parameters record only the highwater mark and not the current +** value. For these latter parameters nothing is written into *pCurrent.)^ +** +** ^The sqlite3_status() and sqlite3_status64() routines return +** SQLITE_OK on success and a non-zero [error code] on failure. +** +** If either the current value or the highwater mark is too large to +** be represented by a 32-bit integer, then the values returned by +** sqlite3_status() are undefined. +** +** See also: [sqlite3_db_status()] +*/ +SQLITE_API int sqlite3_status(int op, int *pCurrent, int *pHighwater, int resetFlag); +SQLITE_API int sqlite3_status64( + int op, + sqlite3_int64 *pCurrent, + sqlite3_int64 *pHighwater, + int resetFlag +); + + +/* +** CAPI3REF: Status Parameters +** KEYWORDS: {status parameters} +** +** These integer constants designate various run-time status parameters +** that can be returned by [sqlite3_status()]. +** +**
+** [[SQLITE_STATUS_MEMORY_USED]] ^(
SQLITE_STATUS_MEMORY_USED
+**
This parameter is the current amount of memory checked out +** using [sqlite3_malloc()], either directly or indirectly. The +** figure includes calls made to [sqlite3_malloc()] by the application +** and internal memory usage by the SQLite library. Auxiliary page-cache +** memory controlled by [SQLITE_CONFIG_PAGECACHE] is not included in +** this parameter. The amount returned is the sum of the allocation +** sizes as reported by the xSize method in [sqlite3_mem_methods].
)^ +** +** [[SQLITE_STATUS_MALLOC_SIZE]] ^(
SQLITE_STATUS_MALLOC_SIZE
+**
This parameter records the largest memory allocation request +** handed to [sqlite3_malloc()] or [sqlite3_realloc()] (or their +** internal equivalents). Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
)^ +** +** [[SQLITE_STATUS_MALLOC_COUNT]] ^(
SQLITE_STATUS_MALLOC_COUNT
+**
This parameter records the number of separate memory allocations +** currently checked out.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_USED]] ^(
SQLITE_STATUS_PAGECACHE_USED
+**
This parameter returns the number of pages used out of the +** [pagecache memory allocator] that was configured using +** [SQLITE_CONFIG_PAGECACHE]. The +** value returned is in pages, not in bytes.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_OVERFLOW]] +** ^(
SQLITE_STATUS_PAGECACHE_OVERFLOW
+**
This parameter returns the number of bytes of page cache +** allocation which could not be satisfied by the [SQLITE_CONFIG_PAGECACHE] +** buffer and where forced to overflow to [sqlite3_malloc()]. The +** returned value includes allocations that overflowed because they +** where too large (they were larger than the "sz" parameter to +** [SQLITE_CONFIG_PAGECACHE]) and allocations that overflowed because +** no space was left in the page cache.
)^ +** +** [[SQLITE_STATUS_PAGECACHE_SIZE]] ^(
SQLITE_STATUS_PAGECACHE_SIZE
+**
This parameter records the largest memory allocation request +** handed to the [pagecache memory allocator]. Only the value returned in the +** *pHighwater parameter to [sqlite3_status()] is of interest. +** The value written into the *pCurrent parameter is undefined.
)^ +** +** [[SQLITE_STATUS_SCRATCH_USED]]
SQLITE_STATUS_SCRATCH_USED
+**
No longer used.
+** +** [[SQLITE_STATUS_SCRATCH_OVERFLOW]] ^(
SQLITE_STATUS_SCRATCH_OVERFLOW
+**
No longer used.
+** +** [[SQLITE_STATUS_SCRATCH_SIZE]]
SQLITE_STATUS_SCRATCH_SIZE
+**
No longer used.
+** +** [[SQLITE_STATUS_PARSER_STACK]] ^(
SQLITE_STATUS_PARSER_STACK
+**
The *pHighwater parameter records the deepest parser stack. +** The *pCurrent value is undefined. The *pHighwater value is only +** meaningful if SQLite is compiled with [YYTRACKMAXSTACKDEPTH].
)^ +**
+** +** New status parameters may be added from time to time. +*/ +#define SQLITE_STATUS_MEMORY_USED 0 +#define SQLITE_STATUS_PAGECACHE_USED 1 +#define SQLITE_STATUS_PAGECACHE_OVERFLOW 2 +#define SQLITE_STATUS_SCRATCH_USED 3 /* NOT USED */ +#define SQLITE_STATUS_SCRATCH_OVERFLOW 4 /* NOT USED */ +#define SQLITE_STATUS_MALLOC_SIZE 5 +#define SQLITE_STATUS_PARSER_STACK 6 +#define SQLITE_STATUS_PAGECACHE_SIZE 7 +#define SQLITE_STATUS_SCRATCH_SIZE 8 /* NOT USED */ +#define SQLITE_STATUS_MALLOC_COUNT 9 + +/* +** CAPI3REF: Database Connection Status +** METHOD: sqlite3 +** +** ^This interface is used to retrieve runtime status information +** about a single [database connection]. ^The first argument is the +** database connection object to be interrogated. ^The second argument +** is an integer constant, taken from the set of +** [SQLITE_DBSTATUS options], that +** determines the parameter to interrogate. The set of +** [SQLITE_DBSTATUS options] is likely +** to grow in future releases of SQLite. +** +** ^The current value of the requested parameter is written into *pCur +** and the highest instantaneous value is written into *pHiwtr. ^If +** the resetFlg is true, then the highest instantaneous value is +** reset back down to the current value. +** +** ^The sqlite3_db_status() routine returns SQLITE_OK on success and a +** non-zero [error code] on failure. +** +** See also: [sqlite3_status()] and [sqlite3_stmt_status()]. +*/ +SQLITE_API int sqlite3_db_status(sqlite3*, int op, int *pCur, int *pHiwtr, int resetFlg); + +/* +** CAPI3REF: Status Parameters for database connections +** KEYWORDS: {SQLITE_DBSTATUS options} +** +** These constants are the available integer "verbs" that can be passed as +** the second argument to the [sqlite3_db_status()] interface. +** +** New verbs may be added in future releases of SQLite. Existing verbs +** might be discontinued. Applications should check the return code from +** [sqlite3_db_status()] to make sure that the call worked. +** The [sqlite3_db_status()] interface will return a non-zero error code +** if a discontinued or unsupported verb is invoked. +** +**
+** [[SQLITE_DBSTATUS_LOOKASIDE_USED]] ^(
SQLITE_DBSTATUS_LOOKASIDE_USED
+**
This parameter returns the number of lookaside memory slots currently +** checked out.
)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_HIT]] ^(
SQLITE_DBSTATUS_LOOKASIDE_HIT
+**
This parameter returns the number of malloc attempts that were +** satisfied using lookaside memory. Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE]] +** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE
+**
This parameter returns the number malloc attempts that might have +** been satisfied using lookaside memory but failed due to the amount of +** memory requested being larger than the lookaside slot size. +** Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL]] +** ^(
SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL
+**
This parameter returns the number malloc attempts that might have +** been satisfied using lookaside memory but failed due to all lookaside +** memory already being in use. +** Only the high-water value is meaningful; +** the current value is always zero.)^ +** +** [[SQLITE_DBSTATUS_CACHE_USED]] ^(
SQLITE_DBSTATUS_CACHE_USED
+**
This parameter returns the approximate number of bytes of heap +** memory used by all pager caches associated with the database connection.)^ +** ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_USED is always 0. +** +** [[SQLITE_DBSTATUS_CACHE_USED_SHARED]] +** ^(
SQLITE_DBSTATUS_CACHE_USED_SHARED
+**
This parameter is similar to DBSTATUS_CACHE_USED, except that if a +** pager cache is shared between two or more connections the bytes of heap +** memory used by that pager cache is divided evenly between the attached +** connections.)^ In other words, if none of the pager caches associated +** with the database connection are shared, this request returns the same +** value as DBSTATUS_CACHE_USED. Or, if one or more or the pager caches are +** shared, the value returned by this call will be smaller than that returned +** by DBSTATUS_CACHE_USED. ^The highwater mark associated with +** SQLITE_DBSTATUS_CACHE_USED_SHARED is always 0. +** +** [[SQLITE_DBSTATUS_SCHEMA_USED]] ^(
SQLITE_DBSTATUS_SCHEMA_USED
+**
This parameter returns the approximate number of bytes of heap +** memory used to store the schema for all databases associated +** with the connection - main, temp, and any [ATTACH]-ed databases.)^ +** ^The full amount of memory used by the schemas is reported, even if the +** schema memory is shared with other database connections due to +** [shared cache mode] being enabled. +** ^The highwater mark associated with SQLITE_DBSTATUS_SCHEMA_USED is always 0. +** +** [[SQLITE_DBSTATUS_STMT_USED]] ^(
SQLITE_DBSTATUS_STMT_USED
+**
This parameter returns the approximate number of bytes of heap +** and lookaside memory used by all prepared statements associated with +** the database connection.)^ +** ^The highwater mark associated with SQLITE_DBSTATUS_STMT_USED is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_HIT]] ^(
SQLITE_DBSTATUS_CACHE_HIT
+**
This parameter returns the number of pager cache hits that have +** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_HIT +** is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_MISS]] ^(
SQLITE_DBSTATUS_CACHE_MISS
+**
This parameter returns the number of pager cache misses that have +** occurred.)^ ^The highwater mark associated with SQLITE_DBSTATUS_CACHE_MISS +** is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_WRITE]] ^(
SQLITE_DBSTATUS_CACHE_WRITE
+**
This parameter returns the number of dirty cache entries that have +** been written to disk. Specifically, the number of pages written to the +** wal file in wal mode databases, or the number of pages written to the +** database file in rollback mode databases. Any pages written as part of +** transaction rollback or database recovery operations are not included. +** If an IO or other error occurs while writing a page to disk, the effect +** on subsequent SQLITE_DBSTATUS_CACHE_WRITE requests is undefined.)^ ^The +** highwater mark associated with SQLITE_DBSTATUS_CACHE_WRITE is always 0. +**
+** +** [[SQLITE_DBSTATUS_CACHE_SPILL]] ^(
SQLITE_DBSTATUS_CACHE_SPILL
+**
This parameter returns the number of dirty cache entries that have +** been written to disk in the middle of a transaction due to the page +** cache overflowing. Transactions are more efficient if they are written +** to disk all at once. When pages spill mid-transaction, that introduces +** additional overhead. This parameter can be used help identify +** inefficiencies that can be resolved by increasing the cache size. +**
+** +** [[SQLITE_DBSTATUS_DEFERRED_FKS]] ^(
SQLITE_DBSTATUS_DEFERRED_FKS
+**
This parameter returns zero for the current value if and only if +** all foreign key constraints (deferred or immediate) have been +** resolved.)^ ^The highwater mark is always 0. +**
+**
+*/ +#define SQLITE_DBSTATUS_LOOKASIDE_USED 0 +#define SQLITE_DBSTATUS_CACHE_USED 1 +#define SQLITE_DBSTATUS_SCHEMA_USED 2 +#define SQLITE_DBSTATUS_STMT_USED 3 +#define SQLITE_DBSTATUS_LOOKASIDE_HIT 4 +#define SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5 +#define SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6 +#define SQLITE_DBSTATUS_CACHE_HIT 7 +#define SQLITE_DBSTATUS_CACHE_MISS 8 +#define SQLITE_DBSTATUS_CACHE_WRITE 9 +#define SQLITE_DBSTATUS_DEFERRED_FKS 10 +#define SQLITE_DBSTATUS_CACHE_USED_SHARED 11 +#define SQLITE_DBSTATUS_CACHE_SPILL 12 +#define SQLITE_DBSTATUS_MAX 12 /* Largest defined DBSTATUS */ + + +/* +** CAPI3REF: Prepared Statement Status +** METHOD: sqlite3_stmt +** +** ^(Each prepared statement maintains various +** [SQLITE_STMTSTATUS counters] that measure the number +** of times it has performed specific operations.)^ These counters can +** be used to monitor the performance characteristics of the prepared +** statements. For example, if the number of table steps greatly exceeds +** the number of table searches or result rows, that would tend to indicate +** that the prepared statement is using a full table scan rather than +** an index. +** +** ^(This interface is used to retrieve and reset counter values from +** a [prepared statement]. The first argument is the prepared statement +** object to be interrogated. The second argument +** is an integer code for a specific [SQLITE_STMTSTATUS counter] +** to be interrogated.)^ +** ^The current value of the requested counter is returned. +** ^If the resetFlg is true, then the counter is reset to zero after this +** interface call returns. +** +** See also: [sqlite3_status()] and [sqlite3_db_status()]. +*/ +SQLITE_API int sqlite3_stmt_status(sqlite3_stmt*, int op,int resetFlg); + +/* +** CAPI3REF: Status Parameters for prepared statements +** KEYWORDS: {SQLITE_STMTSTATUS counter} {SQLITE_STMTSTATUS counters} +** +** These preprocessor macros define integer codes that name counter +** values associated with the [sqlite3_stmt_status()] interface. +** The meanings of the various counters are as follows: +** +**
+** [[SQLITE_STMTSTATUS_FULLSCAN_STEP]]
SQLITE_STMTSTATUS_FULLSCAN_STEP
+**
^This is the number of times that SQLite has stepped forward in +** a table as part of a full table scan. Large numbers for this counter +** may indicate opportunities for performance improvement through +** careful use of indices.
+** +** [[SQLITE_STMTSTATUS_SORT]]
SQLITE_STMTSTATUS_SORT
+**
^This is the number of sort operations that have occurred. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance through careful use of indices.
+** +** [[SQLITE_STMTSTATUS_AUTOINDEX]]
SQLITE_STMTSTATUS_AUTOINDEX
+**
^This is the number of rows inserted into transient indices that +** were created automatically in order to help joins run faster. +** A non-zero value in this counter may indicate an opportunity to +** improvement performance by adding permanent indices that do not +** need to be reinitialized each time the statement is run.
+** +** [[SQLITE_STMTSTATUS_VM_STEP]]
SQLITE_STMTSTATUS_VM_STEP
+**
^This is the number of virtual machine operations executed +** by the prepared statement if that number is less than or equal +** to 2147483647. The number of virtual machine operations can be +** used as a proxy for the total work done by the prepared statement. +** If the number of virtual machine operations exceeds 2147483647 +** then the value returned by this statement status code is undefined. +** +** [[SQLITE_STMTSTATUS_REPREPARE]]
SQLITE_STMTSTATUS_REPREPARE
+**
^This is the number of times that the prepare statement has been +** automatically regenerated due to schema changes or changes to +** [bound parameters] that might affect the query plan. +** +** [[SQLITE_STMTSTATUS_RUN]]
SQLITE_STMTSTATUS_RUN
+**
^This is the number of times that the prepared statement has +** been run. A single "run" for the purposes of this counter is one +** or more calls to [sqlite3_step()] followed by a call to [sqlite3_reset()]. +** The counter is incremented on the first [sqlite3_step()] call of each +** cycle. +** +** [[SQLITE_STMTSTATUS_FILTER_MISS]] +** [[SQLITE_STMTSTATUS_FILTER HIT]] +**
SQLITE_STMTSTATUS_FILTER_HIT
+** SQLITE_STMTSTATUS_FILTER_MISS
+**
^SQLITE_STMTSTATUS_FILTER_HIT is the number of times that a join +** step was bypassed because a Bloom filter returned not-found. The +** corresponding SQLITE_STMTSTATUS_FILTER_MISS value is the number of +** times that the Bloom filter returned a find, and thus the join step +** had to be processed as normal. +** +** [[SQLITE_STMTSTATUS_MEMUSED]]
SQLITE_STMTSTATUS_MEMUSED
+**
^This is the approximate number of bytes of heap memory +** used to store the prepared statement. ^This value is not actually +** a counter, and so the resetFlg parameter to sqlite3_stmt_status() +** is ignored when the opcode is SQLITE_STMTSTATUS_MEMUSED. +**
+**
+*/ +#define SQLITE_STMTSTATUS_FULLSCAN_STEP 1 +#define SQLITE_STMTSTATUS_SORT 2 +#define SQLITE_STMTSTATUS_AUTOINDEX 3 +#define SQLITE_STMTSTATUS_VM_STEP 4 +#define SQLITE_STMTSTATUS_REPREPARE 5 +#define SQLITE_STMTSTATUS_RUN 6 +#define SQLITE_STMTSTATUS_FILTER_MISS 7 +#define SQLITE_STMTSTATUS_FILTER_HIT 8 +#define SQLITE_STMTSTATUS_MEMUSED 99 + +/* +** CAPI3REF: Custom Page Cache Object +** +** The sqlite3_pcache type is opaque. It is implemented by +** the pluggable module. The SQLite core has no knowledge of +** its size or internal structure and never deals with the +** sqlite3_pcache object except by holding and passing pointers +** to the object. +** +** See [sqlite3_pcache_methods2] for additional information. +*/ +typedef struct sqlite3_pcache sqlite3_pcache; + +/* +** CAPI3REF: Custom Page Cache Object +** +** The sqlite3_pcache_page object represents a single page in the +** page cache. The page cache will allocate instances of this +** object. Various methods of the page cache use pointers to instances +** of this object as parameters or as their return value. +** +** See [sqlite3_pcache_methods2] for additional information. +*/ +typedef struct sqlite3_pcache_page sqlite3_pcache_page; +struct sqlite3_pcache_page { + void *pBuf; /* The content of the page */ + void *pExtra; /* Extra information associated with the page */ +}; + +/* +** CAPI3REF: Application Defined Page Cache. +** KEYWORDS: {page cache} +** +** ^(The [sqlite3_config]([SQLITE_CONFIG_PCACHE2], ...) interface can +** register an alternative page cache implementation by passing in an +** instance of the sqlite3_pcache_methods2 structure.)^ +** In many applications, most of the heap memory allocated by +** SQLite is used for the page cache. +** By implementing a +** custom page cache using this API, an application can better control +** the amount of memory consumed by SQLite, the way in which +** that memory is allocated and released, and the policies used to +** determine exactly which parts of a database file are cached and for +** how long. +** +** The alternative page cache mechanism is an +** extreme measure that is only needed by the most demanding applications. +** The built-in page cache is recommended for most uses. +** +** ^(The contents of the sqlite3_pcache_methods2 structure are copied to an +** internal buffer by SQLite within the call to [sqlite3_config]. Hence +** the application may discard the parameter after the call to +** [sqlite3_config()] returns.)^ +** +** [[the xInit() page cache method]] +** ^(The xInit() method is called once for each effective +** call to [sqlite3_initialize()])^ +** (usually only once during the lifetime of the process). ^(The xInit() +** method is passed a copy of the sqlite3_pcache_methods2.pArg value.)^ +** The intent of the xInit() method is to set up global data structures +** required by the custom page cache implementation. +** ^(If the xInit() method is NULL, then the +** built-in default page cache is used instead of the application defined +** page cache.)^ +** +** [[the xShutdown() page cache method]] +** ^The xShutdown() method is called by [sqlite3_shutdown()]. +** It can be used to clean up +** any outstanding resources before process shutdown, if required. +** ^The xShutdown() method may be NULL. +** +** ^SQLite automatically serializes calls to the xInit method, +** so the xInit method need not be threadsafe. ^The +** xShutdown method is only called from [sqlite3_shutdown()] so it does +** not need to be threadsafe either. All other methods must be threadsafe +** in multithreaded applications. +** +** ^SQLite will never invoke xInit() more than once without an intervening +** call to xShutdown(). +** +** [[the xCreate() page cache methods]] +** ^SQLite invokes the xCreate() method to construct a new cache instance. +** SQLite will typically create one cache instance for each open database file, +** though this is not guaranteed. ^The +** first parameter, szPage, is the size in bytes of the pages that must +** be allocated by the cache. ^szPage will always a power of two. ^The +** second parameter szExtra is a number of bytes of extra storage +** associated with each page cache entry. ^The szExtra parameter will +** a number less than 250. SQLite will use the +** extra szExtra bytes on each page to store metadata about the underlying +** database page on disk. The value passed into szExtra depends +** on the SQLite version, the target platform, and how SQLite was compiled. +** ^The third argument to xCreate(), bPurgeable, is true if the cache being +** created will be used to cache database pages of a file stored on disk, or +** false if it is used for an in-memory database. The cache implementation +** does not have to do anything special based with the value of bPurgeable; +** it is purely advisory. ^On a cache where bPurgeable is false, SQLite will +** never invoke xUnpin() except to deliberately delete a page. +** ^In other words, calls to xUnpin() on a cache with bPurgeable set to +** false will always have the "discard" flag set to true. +** ^Hence, a cache created with bPurgeable false will +** never contain any unpinned pages. +** +** [[the xCachesize() page cache method]] +** ^(The xCachesize() method may be called at any time by SQLite to set the +** suggested maximum cache-size (number of pages stored by) the cache +** instance passed as the first argument. This is the value configured using +** the SQLite "[PRAGMA cache_size]" command.)^ As with the bPurgeable +** parameter, the implementation is not required to do anything with this +** value; it is advisory only. +** +** [[the xPagecount() page cache methods]] +** The xPagecount() method must return the number of pages currently +** stored in the cache, both pinned and unpinned. +** +** [[the xFetch() page cache methods]] +** The xFetch() method locates a page in the cache and returns a pointer to +** an sqlite3_pcache_page object associated with that page, or a NULL pointer. +** The pBuf element of the returned sqlite3_pcache_page object will be a +** pointer to a buffer of szPage bytes used to store the content of a +** single database page. The pExtra element of sqlite3_pcache_page will be +** a pointer to the szExtra bytes of extra storage that SQLite has requested +** for each entry in the page cache. +** +** The page to be fetched is determined by the key. ^The minimum key value +** is 1. After it has been retrieved using xFetch, the page is considered +** to be "pinned". +** +** If the requested page is already in the page cache, then the page cache +** implementation must return a pointer to the page buffer with its content +** intact. If the requested page is not already in the cache, then the +** cache implementation should use the value of the createFlag +** parameter to help it determined what action to take: +** +** +**
createFlag Behavior when page is not already in cache +**
0 Do not allocate a new page. Return NULL. +**
1 Allocate a new page if it easy and convenient to do so. +** Otherwise return NULL. +**
2 Make every effort to allocate a new page. Only return +** NULL if allocating a new page is effectively impossible. +**
+** +** ^(SQLite will normally invoke xFetch() with a createFlag of 0 or 1. SQLite +** will only use a createFlag of 2 after a prior call with a createFlag of 1 +** failed.)^ In between the xFetch() calls, SQLite may +** attempt to unpin one or more cache pages by spilling the content of +** pinned pages to disk and synching the operating system disk cache. +** +** [[the xUnpin() page cache method]] +** ^xUnpin() is called by SQLite with a pointer to a currently pinned page +** as its second argument. If the third parameter, discard, is non-zero, +** then the page must be evicted from the cache. +** ^If the discard parameter is +** zero, then the page may be discarded or retained at the discretion of +** page cache implementation. ^The page cache implementation +** may choose to evict unpinned pages at any time. +** +** The cache must not perform any reference counting. A single +** call to xUnpin() unpins the page regardless of the number of prior calls +** to xFetch(). +** +** [[the xRekey() page cache methods]] +** The xRekey() method is used to change the key value associated with the +** page passed as the second argument. If the cache +** previously contains an entry associated with newKey, it must be +** discarded. ^Any prior cache entry associated with newKey is guaranteed not +** to be pinned. +** +** When SQLite calls the xTruncate() method, the cache must discard all +** existing cache entries with page numbers (keys) greater than or equal +** to the value of the iLimit parameter passed to xTruncate(). If any +** of these pages are pinned, they are implicitly unpinned, meaning that +** they can be safely discarded. +** +** [[the xDestroy() page cache method]] +** ^The xDestroy() method is used to delete a cache allocated by xCreate(). +** All resources associated with the specified cache should be freed. ^After +** calling the xDestroy() method, SQLite considers the [sqlite3_pcache*] +** handle invalid, and will not use it with any other sqlite3_pcache_methods2 +** functions. +** +** [[the xShrink() page cache method]] +** ^SQLite invokes the xShrink() method when it wants the page cache to +** free up as much of heap memory as possible. The page cache implementation +** is not obligated to free any memory, but well-behaved implementations should +** do their best. +*/ +typedef struct sqlite3_pcache_methods2 sqlite3_pcache_methods2; +struct sqlite3_pcache_methods2 { + int iVersion; + void *pArg; + int (*xInit)(void*); + void (*xShutdown)(void*); + sqlite3_pcache *(*xCreate)(int szPage, int szExtra, int bPurgeable); + void (*xCachesize)(sqlite3_pcache*, int nCachesize); + int (*xPagecount)(sqlite3_pcache*); + sqlite3_pcache_page *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); + void (*xUnpin)(sqlite3_pcache*, sqlite3_pcache_page*, int discard); + void (*xRekey)(sqlite3_pcache*, sqlite3_pcache_page*, + unsigned oldKey, unsigned newKey); + void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); + void (*xDestroy)(sqlite3_pcache*); + void (*xShrink)(sqlite3_pcache*); +}; + +/* +** This is the obsolete pcache_methods object that has now been replaced +** by sqlite3_pcache_methods2. This object is not used by SQLite. It is +** retained in the header file for backwards compatibility only. +*/ +typedef struct sqlite3_pcache_methods sqlite3_pcache_methods; +struct sqlite3_pcache_methods { + void *pArg; + int (*xInit)(void*); + void (*xShutdown)(void*); + sqlite3_pcache *(*xCreate)(int szPage, int bPurgeable); + void (*xCachesize)(sqlite3_pcache*, int nCachesize); + int (*xPagecount)(sqlite3_pcache*); + void *(*xFetch)(sqlite3_pcache*, unsigned key, int createFlag); + void (*xUnpin)(sqlite3_pcache*, void*, int discard); + void (*xRekey)(sqlite3_pcache*, void*, unsigned oldKey, unsigned newKey); + void (*xTruncate)(sqlite3_pcache*, unsigned iLimit); + void (*xDestroy)(sqlite3_pcache*); +}; + + +/* +** CAPI3REF: Online Backup Object +** +** The sqlite3_backup object records state information about an ongoing +** online backup operation. ^The sqlite3_backup object is created by +** a call to [sqlite3_backup_init()] and is destroyed by a call to +** [sqlite3_backup_finish()]. +** +** See Also: [Using the SQLite Online Backup API] +*/ +typedef struct sqlite3_backup sqlite3_backup; + +/* +** CAPI3REF: Online Backup API. +** +** The backup API copies the content of one database into another. +** It is useful either for creating backups of databases or +** for copying in-memory databases to or from persistent files. +** +** See Also: [Using the SQLite Online Backup API] +** +** ^SQLite holds a write transaction open on the destination database file +** for the duration of the backup operation. +** ^The source database is read-locked only while it is being read; +** it is not locked continuously for the entire backup operation. +** ^Thus, the backup may be performed on a live source database without +** preventing other database connections from +** reading or writing to the source database while the backup is underway. +** +** ^(To perform a backup operation: +**
    +**
  1. sqlite3_backup_init() is called once to initialize the +** backup, +**
  2. sqlite3_backup_step() is called one or more times to transfer +** the data between the two databases, and finally +**
  3. sqlite3_backup_finish() is called to release all resources +** associated with the backup operation. +**
)^ +** There should be exactly one call to sqlite3_backup_finish() for each +** successful call to sqlite3_backup_init(). +** +** [[sqlite3_backup_init()]] sqlite3_backup_init() +** +** ^The D and N arguments to sqlite3_backup_init(D,N,S,M) are the +** [database connection] associated with the destination database +** and the database name, respectively. +** ^The database name is "main" for the main database, "temp" for the +** temporary database, or the name specified after the AS keyword in +** an [ATTACH] statement for an attached database. +** ^The S and M arguments passed to +** sqlite3_backup_init(D,N,S,M) identify the [database connection] +** and database name of the source database, respectively. +** ^The source and destination [database connections] (parameters S and D) +** must be different or else sqlite3_backup_init(D,N,S,M) will fail with +** an error. +** +** ^A call to sqlite3_backup_init() will fail, returning NULL, if +** there is already a read or read-write transaction open on the +** destination database. +** +** ^If an error occurs within sqlite3_backup_init(D,N,S,M), then NULL is +** returned and an error code and error message are stored in the +** destination [database connection] D. +** ^The error code and message for the failed call to sqlite3_backup_init() +** can be retrieved using the [sqlite3_errcode()], [sqlite3_errmsg()], and/or +** [sqlite3_errmsg16()] functions. +** ^A successful call to sqlite3_backup_init() returns a pointer to an +** [sqlite3_backup] object. +** ^The [sqlite3_backup] object may be used with the sqlite3_backup_step() and +** sqlite3_backup_finish() functions to perform the specified backup +** operation. +** +** [[sqlite3_backup_step()]] sqlite3_backup_step() +** +** ^Function sqlite3_backup_step(B,N) will copy up to N pages between +** the source and destination databases specified by [sqlite3_backup] object B. +** ^If N is negative, all remaining source pages are copied. +** ^If sqlite3_backup_step(B,N) successfully copies N pages and there +** are still more pages to be copied, then the function returns [SQLITE_OK]. +** ^If sqlite3_backup_step(B,N) successfully finishes copying all pages +** from source to destination, then it returns [SQLITE_DONE]. +** ^If an error occurs while running sqlite3_backup_step(B,N), +** then an [error code] is returned. ^As well as [SQLITE_OK] and +** [SQLITE_DONE], a call to sqlite3_backup_step() may return [SQLITE_READONLY], +** [SQLITE_NOMEM], [SQLITE_BUSY], [SQLITE_LOCKED], or an +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX] extended error code. +** +** ^(The sqlite3_backup_step() might return [SQLITE_READONLY] if +**
    +**
  1. the destination database was opened read-only, or +**
  2. the destination database is using write-ahead-log journaling +** and the destination and source page sizes differ, or +**
  3. the destination database is an in-memory database and the +** destination and source page sizes differ. +**
)^ +** +** ^If sqlite3_backup_step() cannot obtain a required file-system lock, then +** the [sqlite3_busy_handler | busy-handler function] +** is invoked (if one is specified). ^If the +** busy-handler returns non-zero before the lock is available, then +** [SQLITE_BUSY] is returned to the caller. ^In this case the call to +** sqlite3_backup_step() can be retried later. ^If the source +** [database connection] +** is being used to write to the source database when sqlite3_backup_step() +** is called, then [SQLITE_LOCKED] is returned immediately. ^Again, in this +** case the call to sqlite3_backup_step() can be retried later on. ^(If +** [SQLITE_IOERR_ACCESS | SQLITE_IOERR_XXX], [SQLITE_NOMEM], or +** [SQLITE_READONLY] is returned, then +** there is no point in retrying the call to sqlite3_backup_step(). These +** errors are considered fatal.)^ The application must accept +** that the backup operation has failed and pass the backup operation handle +** to the sqlite3_backup_finish() to release associated resources. +** +** ^The first call to sqlite3_backup_step() obtains an exclusive lock +** on the destination file. ^The exclusive lock is not released until either +** sqlite3_backup_finish() is called or the backup operation is complete +** and sqlite3_backup_step() returns [SQLITE_DONE]. ^Every call to +** sqlite3_backup_step() obtains a [shared lock] on the source database that +** lasts for the duration of the sqlite3_backup_step() call. +** ^Because the source database is not locked between calls to +** sqlite3_backup_step(), the source database may be modified mid-way +** through the backup process. ^If the source database is modified by an +** external process or via a database connection other than the one being +** used by the backup operation, then the backup will be automatically +** restarted by the next call to sqlite3_backup_step(). ^If the source +** database is modified by the using the same database connection as is used +** by the backup operation, then the backup database is automatically +** updated at the same time. +** +** [[sqlite3_backup_finish()]] sqlite3_backup_finish() +** +** When sqlite3_backup_step() has returned [SQLITE_DONE], or when the +** application wishes to abandon the backup operation, the application +** should destroy the [sqlite3_backup] by passing it to sqlite3_backup_finish(). +** ^The sqlite3_backup_finish() interfaces releases all +** resources associated with the [sqlite3_backup] object. +** ^If sqlite3_backup_step() has not yet returned [SQLITE_DONE], then any +** active write-transaction on the destination database is rolled back. +** The [sqlite3_backup] object is invalid +** and may not be used following a call to sqlite3_backup_finish(). +** +** ^The value returned by sqlite3_backup_finish is [SQLITE_OK] if no +** sqlite3_backup_step() errors occurred, regardless or whether or not +** sqlite3_backup_step() completed. +** ^If an out-of-memory condition or IO error occurred during any prior +** sqlite3_backup_step() call on the same [sqlite3_backup] object, then +** sqlite3_backup_finish() returns the corresponding [error code]. +** +** ^A return of [SQLITE_BUSY] or [SQLITE_LOCKED] from sqlite3_backup_step() +** is not a permanent error and does not affect the return value of +** sqlite3_backup_finish(). +** +** [[sqlite3_backup_remaining()]] [[sqlite3_backup_pagecount()]] +** sqlite3_backup_remaining() and sqlite3_backup_pagecount() +** +** ^The sqlite3_backup_remaining() routine returns the number of pages still +** to be backed up at the conclusion of the most recent sqlite3_backup_step(). +** ^The sqlite3_backup_pagecount() routine returns the total number of pages +** in the source database at the conclusion of the most recent +** sqlite3_backup_step(). +** ^(The values returned by these functions are only updated by +** sqlite3_backup_step(). If the source database is modified in a way that +** changes the size of the source database or the number of pages remaining, +** those changes are not reflected in the output of sqlite3_backup_pagecount() +** and sqlite3_backup_remaining() until after the next +** sqlite3_backup_step().)^ +** +** Concurrent Usage of Database Handles +** +** ^The source [database connection] may be used by the application for other +** purposes while a backup operation is underway or being initialized. +** ^If SQLite is compiled and configured to support threadsafe database +** connections, then the source database connection may be used concurrently +** from within other threads. +** +** However, the application must guarantee that the destination +** [database connection] is not passed to any other API (by any thread) after +** sqlite3_backup_init() is called and before the corresponding call to +** sqlite3_backup_finish(). SQLite does not currently check to see +** if the application incorrectly accesses the destination [database connection] +** and so no error code is reported, but the operations may malfunction +** nevertheless. Use of the destination database connection while a +** backup is in progress might also cause a mutex deadlock. +** +** If running in [shared cache mode], the application must +** guarantee that the shared cache used by the destination database +** is not accessed while the backup is running. In practice this means +** that the application must guarantee that the disk file being +** backed up to is not accessed by any connection within the process, +** not just the specific connection that was passed to sqlite3_backup_init(). +** +** The [sqlite3_backup] object itself is partially threadsafe. Multiple +** threads may safely make multiple concurrent calls to sqlite3_backup_step(). +** However, the sqlite3_backup_remaining() and sqlite3_backup_pagecount() +** APIs are not strictly speaking threadsafe. If they are invoked at the +** same time as another thread is invoking sqlite3_backup_step() it is +** possible that they return invalid values. +*/ +SQLITE_API sqlite3_backup *sqlite3_backup_init( + sqlite3 *pDest, /* Destination database handle */ + const char *zDestName, /* Destination database name */ + sqlite3 *pSource, /* Source database handle */ + const char *zSourceName /* Source database name */ +); +SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage); +SQLITE_API int sqlite3_backup_finish(sqlite3_backup *p); +SQLITE_API int sqlite3_backup_remaining(sqlite3_backup *p); +SQLITE_API int sqlite3_backup_pagecount(sqlite3_backup *p); + +/* +** CAPI3REF: Unlock Notification +** METHOD: sqlite3 +** +** ^When running in shared-cache mode, a database operation may fail with +** an [SQLITE_LOCKED] error if the required locks on the shared-cache or +** individual tables within the shared-cache cannot be obtained. See +** [SQLite Shared-Cache Mode] for a description of shared-cache locking. +** ^This API may be used to register a callback that SQLite will invoke +** when the connection currently holding the required lock relinquishes it. +** ^This API is only available if the library was compiled with the +** [SQLITE_ENABLE_UNLOCK_NOTIFY] C-preprocessor symbol defined. +** +** See Also: [Using the SQLite Unlock Notification Feature]. +** +** ^Shared-cache locks are released when a database connection concludes +** its current transaction, either by committing it or rolling it back. +** +** ^When a connection (known as the blocked connection) fails to obtain a +** shared-cache lock and SQLITE_LOCKED is returned to the caller, the +** identity of the database connection (the blocking connection) that +** has locked the required resource is stored internally. ^After an +** application receives an SQLITE_LOCKED error, it may call the +** sqlite3_unlock_notify() method with the blocked connection handle as +** the first argument to register for a callback that will be invoked +** when the blocking connections current transaction is concluded. ^The +** callback is invoked from within the [sqlite3_step] or [sqlite3_close] +** call that concludes the blocking connection's transaction. +** +** ^(If sqlite3_unlock_notify() is called in a multi-threaded application, +** there is a chance that the blocking connection will have already +** concluded its transaction by the time sqlite3_unlock_notify() is invoked. +** If this happens, then the specified callback is invoked immediately, +** from within the call to sqlite3_unlock_notify().)^ +** +** ^If the blocked connection is attempting to obtain a write-lock on a +** shared-cache table, and more than one other connection currently holds +** a read-lock on the same table, then SQLite arbitrarily selects one of +** the other connections to use as the blocking connection. +** +** ^(There may be at most one unlock-notify callback registered by a +** blocked connection. If sqlite3_unlock_notify() is called when the +** blocked connection already has a registered unlock-notify callback, +** then the new callback replaces the old.)^ ^If sqlite3_unlock_notify() is +** called with a NULL pointer as its second argument, then any existing +** unlock-notify callback is canceled. ^The blocked connections +** unlock-notify callback may also be canceled by closing the blocked +** connection using [sqlite3_close()]. +** +** The unlock-notify callback is not reentrant. If an application invokes +** any sqlite3_xxx API functions from within an unlock-notify callback, a +** crash or deadlock may be the result. +** +** ^Unless deadlock is detected (see below), sqlite3_unlock_notify() always +** returns SQLITE_OK. +** +** Callback Invocation Details +** +** When an unlock-notify callback is registered, the application provides a +** single void* pointer that is passed to the callback when it is invoked. +** However, the signature of the callback function allows SQLite to pass +** it an array of void* context pointers. The first argument passed to +** an unlock-notify callback is a pointer to an array of void* pointers, +** and the second is the number of entries in the array. +** +** When a blocking connection's transaction is concluded, there may be +** more than one blocked connection that has registered for an unlock-notify +** callback. ^If two or more such blocked connections have specified the +** same callback function, then instead of invoking the callback function +** multiple times, it is invoked once with the set of void* context pointers +** specified by the blocked connections bundled together into an array. +** This gives the application an opportunity to prioritize any actions +** related to the set of unblocked database connections. +** +** Deadlock Detection +** +** Assuming that after registering for an unlock-notify callback a +** database waits for the callback to be issued before taking any further +** action (a reasonable assumption), then using this API may cause the +** application to deadlock. For example, if connection X is waiting for +** connection Y's transaction to be concluded, and similarly connection +** Y is waiting on connection X's transaction, then neither connection +** will proceed and the system may remain deadlocked indefinitely. +** +** To avoid this scenario, the sqlite3_unlock_notify() performs deadlock +** detection. ^If a given call to sqlite3_unlock_notify() would put the +** system in a deadlocked state, then SQLITE_LOCKED is returned and no +** unlock-notify callback is registered. The system is said to be in +** a deadlocked state if connection A has registered for an unlock-notify +** callback on the conclusion of connection B's transaction, and connection +** B has itself registered for an unlock-notify callback when connection +** A's transaction is concluded. ^Indirect deadlock is also detected, so +** the system is also considered to be deadlocked if connection B has +** registered for an unlock-notify callback on the conclusion of connection +** C's transaction, where connection C is waiting on connection A. ^Any +** number of levels of indirection are allowed. +** +** The "DROP TABLE" Exception +** +** When a call to [sqlite3_step()] returns SQLITE_LOCKED, it is almost +** always appropriate to call sqlite3_unlock_notify(). There is however, +** one exception. When executing a "DROP TABLE" or "DROP INDEX" statement, +** SQLite checks if there are any currently executing SELECT statements +** that belong to the same connection. If there are, SQLITE_LOCKED is +** returned. In this case there is no "blocking connection", so invoking +** sqlite3_unlock_notify() results in the unlock-notify callback being +** invoked immediately. If the application then re-attempts the "DROP TABLE" +** or "DROP INDEX" query, an infinite loop might be the result. +** +** One way around this problem is to check the extended error code returned +** by an sqlite3_step() call. ^(If there is a blocking connection, then the +** extended error code is set to SQLITE_LOCKED_SHAREDCACHE. Otherwise, in +** the special "DROP TABLE/INDEX" case, the extended error code is just +** SQLITE_LOCKED.)^ +*/ +SQLITE_API int sqlite3_unlock_notify( + sqlite3 *pBlocked, /* Waiting connection */ + void (*xNotify)(void **apArg, int nArg), /* Callback function to invoke */ + void *pNotifyArg /* Argument to pass to xNotify */ +); + + +/* +** CAPI3REF: String Comparison +** +** ^The [sqlite3_stricmp()] and [sqlite3_strnicmp()] APIs allow applications +** and extensions to compare the contents of two buffers containing UTF-8 +** strings in a case-independent fashion, using the same definition of "case +** independence" that SQLite uses internally when comparing identifiers. +*/ +SQLITE_API int sqlite3_stricmp(const char *, const char *); +SQLITE_API int sqlite3_strnicmp(const char *, const char *, int); + +/* +** CAPI3REF: String Globbing +* +** ^The [sqlite3_strglob(P,X)] interface returns zero if and only if +** string X matches the [GLOB] pattern P. +** ^The definition of [GLOB] pattern matching used in +** [sqlite3_strglob(P,X)] is the same as for the "X GLOB P" operator in the +** SQL dialect understood by SQLite. ^The [sqlite3_strglob(P,X)] function +** is case sensitive. +** +** Note that this routine returns zero on a match and non-zero if the strings +** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. +** +** See also: [sqlite3_strlike()]. +*/ +SQLITE_API int sqlite3_strglob(const char *zGlob, const char *zStr); + +/* +** CAPI3REF: String LIKE Matching +* +** ^The [sqlite3_strlike(P,X,E)] interface returns zero if and only if +** string X matches the [LIKE] pattern P with escape character E. +** ^The definition of [LIKE] pattern matching used in +** [sqlite3_strlike(P,X,E)] is the same as for the "X LIKE P ESCAPE E" +** operator in the SQL dialect understood by SQLite. ^For "X LIKE P" without +** the ESCAPE clause, set the E parameter of [sqlite3_strlike(P,X,E)] to 0. +** ^As with the LIKE operator, the [sqlite3_strlike(P,X,E)] function is case +** insensitive - equivalent upper and lower case ASCII characters match +** one another. +** +** ^The [sqlite3_strlike(P,X,E)] function matches Unicode characters, though +** only ASCII characters are case folded. +** +** Note that this routine returns zero on a match and non-zero if the strings +** do not match, the same as [sqlite3_stricmp()] and [sqlite3_strnicmp()]. +** +** See also: [sqlite3_strglob()]. +*/ +SQLITE_API int sqlite3_strlike(const char *zGlob, const char *zStr, unsigned int cEsc); + +/* +** CAPI3REF: Error Logging Interface +** +** ^The [sqlite3_log()] interface writes a message into the [error log] +** established by the [SQLITE_CONFIG_LOG] option to [sqlite3_config()]. +** ^If logging is enabled, the zFormat string and subsequent arguments are +** used with [sqlite3_snprintf()] to generate the final output string. +** +** The sqlite3_log() interface is intended for use by extensions such as +** virtual tables, collating functions, and SQL functions. While there is +** nothing to prevent an application from calling sqlite3_log(), doing so +** is considered bad form. +** +** The zFormat string must not be NULL. +** +** To avoid deadlocks and other threading problems, the sqlite3_log() routine +** will not use dynamically allocated memory. The log message is stored in +** a fixed-length buffer on the stack. If the log message is longer than +** a few hundred characters, it will be truncated to the length of the +** buffer. +*/ +SQLITE_API void sqlite3_log(int iErrCode, const char *zFormat, ...); + +/* +** CAPI3REF: Write-Ahead Log Commit Hook +** METHOD: sqlite3 +** +** ^The [sqlite3_wal_hook()] function is used to register a callback that +** is invoked each time data is committed to a database in wal mode. +** +** ^(The callback is invoked by SQLite after the commit has taken place and +** the associated write-lock on the database released)^, so the implementation +** may read, write or [checkpoint] the database as required. +** +** ^The first parameter passed to the callback function when it is invoked +** is a copy of the third parameter passed to sqlite3_wal_hook() when +** registering the callback. ^The second is a copy of the database handle. +** ^The third parameter is the name of the database that was written to - +** either "main" or the name of an [ATTACH]-ed database. ^The fourth parameter +** is the number of pages currently in the write-ahead log file, +** including those that were just committed. +** +** The callback function should normally return [SQLITE_OK]. ^If an error +** code is returned, that error will propagate back up through the +** SQLite code base to cause the statement that provoked the callback +** to report an error, though the commit will have still occurred. If the +** callback returns [SQLITE_ROW] or [SQLITE_DONE], or if it returns a value +** that does not correspond to any valid SQLite error code, the results +** are undefined. +** +** A single database handle may have at most a single write-ahead log callback +** registered at one time. ^Calling [sqlite3_wal_hook()] replaces any +** previously registered write-ahead log callback. ^The return value is +** a copy of the third parameter from the previous call, if any, or 0. +** ^Note that the [sqlite3_wal_autocheckpoint()] interface and the +** [wal_autocheckpoint pragma] both invoke [sqlite3_wal_hook()] and will +** overwrite any prior [sqlite3_wal_hook()] settings. +*/ +SQLITE_API void *sqlite3_wal_hook( + sqlite3*, + int(*)(void *,sqlite3*,const char*,int), + void* +); + +/* +** CAPI3REF: Configure an auto-checkpoint +** METHOD: sqlite3 +** +** ^The [sqlite3_wal_autocheckpoint(D,N)] is a wrapper around +** [sqlite3_wal_hook()] that causes any database on [database connection] D +** to automatically [checkpoint] +** after committing a transaction if there are N or +** more frames in the [write-ahead log] file. ^Passing zero or +** a negative value as the nFrame parameter disables automatic +** checkpoints entirely. +** +** ^The callback registered by this function replaces any existing callback +** registered using [sqlite3_wal_hook()]. ^Likewise, registering a callback +** using [sqlite3_wal_hook()] disables the automatic checkpoint mechanism +** configured by this function. +** +** ^The [wal_autocheckpoint pragma] can be used to invoke this interface +** from SQL. +** +** ^Checkpoints initiated by this mechanism are +** [sqlite3_wal_checkpoint_v2|PASSIVE]. +** +** ^Every new [database connection] defaults to having the auto-checkpoint +** enabled with a threshold of 1000 or [SQLITE_DEFAULT_WAL_AUTOCHECKPOINT] +** pages. The use of this interface +** is only necessary if the default setting is found to be suboptimal +** for a particular application. +*/ +SQLITE_API int sqlite3_wal_autocheckpoint(sqlite3 *db, int N); + +/* +** CAPI3REF: Checkpoint a database +** METHOD: sqlite3 +** +** ^(The sqlite3_wal_checkpoint(D,X) is equivalent to +** [sqlite3_wal_checkpoint_v2](D,X,[SQLITE_CHECKPOINT_PASSIVE],0,0).)^ +** +** In brief, sqlite3_wal_checkpoint(D,X) causes the content in the +** [write-ahead log] for database X on [database connection] D to be +** transferred into the database file and for the write-ahead log to +** be reset. See the [checkpointing] documentation for addition +** information. +** +** This interface used to be the only way to cause a checkpoint to +** occur. But then the newer and more powerful [sqlite3_wal_checkpoint_v2()] +** interface was added. This interface is retained for backwards +** compatibility and as a convenience for applications that need to manually +** start a callback but which do not need the full power (and corresponding +** complication) of [sqlite3_wal_checkpoint_v2()]. +*/ +SQLITE_API int sqlite3_wal_checkpoint(sqlite3 *db, const char *zDb); + +/* +** CAPI3REF: Checkpoint a database +** METHOD: sqlite3 +** +** ^(The sqlite3_wal_checkpoint_v2(D,X,M,L,C) interface runs a checkpoint +** operation on database X of [database connection] D in mode M. Status +** information is written back into integers pointed to by L and C.)^ +** ^(The M parameter must be a valid [checkpoint mode]:)^ +** +**
+**
SQLITE_CHECKPOINT_PASSIVE
+** ^Checkpoint as many frames as possible without waiting for any database +** readers or writers to finish, then sync the database file if all frames +** in the log were checkpointed. ^The [busy-handler callback] +** is never invoked in the SQLITE_CHECKPOINT_PASSIVE mode. +** ^On the other hand, passive mode might leave the checkpoint unfinished +** if there are concurrent readers or writers. +** +**
SQLITE_CHECKPOINT_FULL
+** ^This mode blocks (it invokes the +** [sqlite3_busy_handler|busy-handler callback]) until there is no +** database writer and all readers are reading from the most recent database +** snapshot. ^It then checkpoints all frames in the log file and syncs the +** database file. ^This mode blocks new database writers while it is pending, +** but new database readers are allowed to continue unimpeded. +** +**
SQLITE_CHECKPOINT_RESTART
+** ^This mode works the same way as SQLITE_CHECKPOINT_FULL with the addition +** that after checkpointing the log file it blocks (calls the +** [busy-handler callback]) +** until all readers are reading from the database file only. ^This ensures +** that the next writer will restart the log file from the beginning. +** ^Like SQLITE_CHECKPOINT_FULL, this mode blocks new +** database writer attempts while it is pending, but does not impede readers. +** +**
SQLITE_CHECKPOINT_TRUNCATE
+** ^This mode works the same way as SQLITE_CHECKPOINT_RESTART with the +** addition that it also truncates the log file to zero bytes just prior +** to a successful return. +**
+** +** ^If pnLog is not NULL, then *pnLog is set to the total number of frames in +** the log file or to -1 if the checkpoint could not run because +** of an error or because the database is not in [WAL mode]. ^If pnCkpt is not +** NULL,then *pnCkpt is set to the total number of checkpointed frames in the +** log file (including any that were already checkpointed before the function +** was called) or to -1 if the checkpoint could not run due to an error or +** because the database is not in WAL mode. ^Note that upon successful +** completion of an SQLITE_CHECKPOINT_TRUNCATE, the log file will have been +** truncated to zero bytes and so both *pnLog and *pnCkpt will be set to zero. +** +** ^All calls obtain an exclusive "checkpoint" lock on the database file. ^If +** any other process is running a checkpoint operation at the same time, the +** lock cannot be obtained and SQLITE_BUSY is returned. ^Even if there is a +** busy-handler configured, it will not be invoked in this case. +** +** ^The SQLITE_CHECKPOINT_FULL, RESTART and TRUNCATE modes also obtain the +** exclusive "writer" lock on the database file. ^If the writer lock cannot be +** obtained immediately, and a busy-handler is configured, it is invoked and +** the writer lock retried until either the busy-handler returns 0 or the lock +** is successfully obtained. ^The busy-handler is also invoked while waiting for +** database readers as described above. ^If the busy-handler returns 0 before +** the writer lock is obtained or while waiting for database readers, the +** checkpoint operation proceeds from that point in the same way as +** SQLITE_CHECKPOINT_PASSIVE - checkpointing as many frames as possible +** without blocking any further. ^SQLITE_BUSY is returned in this case. +** +** ^If parameter zDb is NULL or points to a zero length string, then the +** specified operation is attempted on all WAL databases [attached] to +** [database connection] db. In this case the +** values written to output parameters *pnLog and *pnCkpt are undefined. ^If +** an SQLITE_BUSY error is encountered when processing one or more of the +** attached WAL databases, the operation is still attempted on any remaining +** attached databases and SQLITE_BUSY is returned at the end. ^If any other +** error occurs while processing an attached database, processing is abandoned +** and the error code is returned to the caller immediately. ^If no error +** (SQLITE_BUSY or otherwise) is encountered while processing the attached +** databases, SQLITE_OK is returned. +** +** ^If database zDb is the name of an attached database that is not in WAL +** mode, SQLITE_OK is returned and both *pnLog and *pnCkpt set to -1. ^If +** zDb is not NULL (or a zero length string) and is not the name of any +** attached database, SQLITE_ERROR is returned to the caller. +** +** ^Unless it returns SQLITE_MISUSE, +** the sqlite3_wal_checkpoint_v2() interface +** sets the error information that is queried by +** [sqlite3_errcode()] and [sqlite3_errmsg()]. +** +** ^The [PRAGMA wal_checkpoint] command can be used to invoke this interface +** from SQL. +*/ +SQLITE_API int sqlite3_wal_checkpoint_v2( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of attached database (or NULL) */ + int eMode, /* SQLITE_CHECKPOINT_* value */ + int *pnLog, /* OUT: Size of WAL log in frames */ + int *pnCkpt /* OUT: Total number of frames checkpointed */ +); + +/* +** CAPI3REF: Checkpoint Mode Values +** KEYWORDS: {checkpoint mode} +** +** These constants define all valid values for the "checkpoint mode" passed +** as the third parameter to the [sqlite3_wal_checkpoint_v2()] interface. +** See the [sqlite3_wal_checkpoint_v2()] documentation for details on the +** meaning of each of these checkpoint modes. +*/ +#define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ +#define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ +#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */ +#define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */ + +/* +** CAPI3REF: Virtual Table Interface Configuration +** +** This function may be called by either the [xConnect] or [xCreate] method +** of a [virtual table] implementation to configure +** various facets of the virtual table interface. +** +** If this interface is invoked outside the context of an xConnect or +** xCreate virtual table method then the behavior is undefined. +** +** In the call sqlite3_vtab_config(D,C,...) the D parameter is the +** [database connection] in which the virtual table is being created and +** which is passed in as the first argument to the [xConnect] or [xCreate] +** method that is invoking sqlite3_vtab_config(). The C parameter is one +** of the [virtual table configuration options]. The presence and meaning +** of parameters after C depend on which [virtual table configuration option] +** is used. +*/ +SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); + +/* +** CAPI3REF: Virtual Table Configuration Options +** KEYWORDS: {virtual table configuration options} +** KEYWORDS: {virtual table configuration option} +** +** These macros define the various options to the +** [sqlite3_vtab_config()] interface that [virtual table] implementations +** can use to customize and optimize their behavior. +** +**
+** [[SQLITE_VTAB_CONSTRAINT_SUPPORT]] +**
SQLITE_VTAB_CONSTRAINT_SUPPORT
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_CONSTRAINT_SUPPORT,X) are supported, +** where X is an integer. If X is zero, then the [virtual table] whose +** [xCreate] or [xConnect] method invoked [sqlite3_vtab_config()] does not +** support constraints. In this configuration (which is the default) if +** a call to the [xUpdate] method returns [SQLITE_CONSTRAINT], then the entire +** statement is rolled back as if [ON CONFLICT | OR ABORT] had been +** specified as part of the users SQL statement, regardless of the actual +** ON CONFLICT mode specified. +** +** If X is non-zero, then the virtual table implementation guarantees +** that if [xUpdate] returns [SQLITE_CONSTRAINT], it will do so before +** any modifications to internal or persistent data structures have been made. +** If the [ON CONFLICT] mode is ABORT, FAIL, IGNORE or ROLLBACK, SQLite +** is able to roll back a statement or database transaction, and abandon +** or continue processing the current SQL statement as appropriate. +** If the ON CONFLICT mode is REPLACE and the [xUpdate] method returns +** [SQLITE_CONSTRAINT], SQLite handles this as if the ON CONFLICT mode +** had been ABORT. +** +** Virtual table implementations that are required to handle OR REPLACE +** must do so within the [xUpdate] method. If a call to the +** [sqlite3_vtab_on_conflict()] function indicates that the current ON +** CONFLICT policy is REPLACE, the virtual table implementation should +** silently replace the appropriate rows within the xUpdate callback and +** return SQLITE_OK. Or, if this is not possible, it may return +** SQLITE_CONSTRAINT, in which case SQLite falls back to OR ABORT +** constraint handling. +**
+** +** [[SQLITE_VTAB_DIRECTONLY]]
SQLITE_VTAB_DIRECTONLY
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** prohibits that virtual table from being used from within triggers and +** views. +**
+** +** [[SQLITE_VTAB_INNOCUOUS]]
SQLITE_VTAB_INNOCUOUS
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** identify that virtual table as being safe to use from within triggers +** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the +** virtual table can do no serious harm even if it is controlled by a +** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS +** flag unless absolutely necessary. +**
+** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]
SQLITE_VTAB_USES_ALL_SCHEMAS
+**
Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +**
+**
+*/ +#define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 +#define SQLITE_VTAB_INNOCUOUS 2 +#define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 + +/* +** CAPI3REF: Determine The Virtual Table Conflict Policy +** +** This function may only be called from within a call to the [xUpdate] method +** of a [virtual table] implementation for an INSERT or UPDATE operation. ^The +** value returned is one of [SQLITE_ROLLBACK], [SQLITE_IGNORE], [SQLITE_FAIL], +** [SQLITE_ABORT], or [SQLITE_REPLACE], according to the [ON CONFLICT] mode +** of the SQL statement that triggered the call to the [xUpdate] method of the +** [virtual table]. +*/ +SQLITE_API int sqlite3_vtab_on_conflict(sqlite3 *); + +/* +** CAPI3REF: Determine If Virtual Table Column Access Is For UPDATE +** +** If the sqlite3_vtab_nochange(X) routine is called within the [xColumn] +** method of a [virtual table], then it might return true if the +** column is being fetched as part of an UPDATE operation during which the +** column value will not change. The virtual table implementation can use +** this hint as permission to substitute a return value that is less +** expensive to compute and that the corresponding +** [xUpdate] method understands as a "no-change" value. +** +** If the [xColumn] method calls sqlite3_vtab_nochange() and finds that +** the column is not changed by the UPDATE statement, then the xColumn +** method can optionally return without setting a result, without calling +** any of the [sqlite3_result_int|sqlite3_result_xxxxx() interfaces]. +** In that case, [sqlite3_value_nochange(X)] will return true for the +** same column in the [xUpdate] method. +** +** The sqlite3_vtab_nochange() routine is an optimization. Virtual table +** implementations should continue to give a correct answer even if the +** sqlite3_vtab_nochange() interface were to always return false. In the +** current implementation, the sqlite3_vtab_nochange() interface does always +** returns false for the enhanced [UPDATE FROM] statement. +*/ +SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); + +/* +** CAPI3REF: Determine The Collation For a Virtual Table Constraint +** METHOD: sqlite3_index_info +** +** This function may only be called from within a call to the [xBestIndex] +** method of a [virtual table]. This function returns a pointer to a string +** that is the name of the appropriate collation sequence to use for text +** comparisons on the constraint identified by its arguments. +** +** The first argument must be the pointer to the [sqlite3_index_info] object +** that is the first parameter to the xBestIndex() method. The second argument +** must be an index into the aConstraint[] array belonging to the +** sqlite3_index_info structure passed to xBestIndex. +** +** Important: +** The first parameter must be the same pointer that is passed into the +** xBestMethod() method. The first parameter may not be a pointer to a +** different [sqlite3_index_info] object, even an exact copy. +** +** The return value is computed as follows: +** +**
    +**
  1. If the constraint comes from a WHERE clause expression that contains +** a [COLLATE operator], then the name of the collation specified by +** that COLLATE operator is returned. +**

  2. If there is no COLLATE operator, but the column that is the subject +** of the constraint specifies an alternative collating sequence via +** a [COLLATE clause] on the column definition within the CREATE TABLE +** statement that was passed into [sqlite3_declare_vtab()], then the +** name of that alternative collating sequence is returned. +**

  3. Otherwise, "BINARY" is returned. +**

+*/ +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); + +/* +** CAPI3REF: Determine if a virtual table query is DISTINCT +** METHOD: sqlite3_index_info +** +** This API may only be used from within an [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this +** interface from outside of xBestIndex() is undefined and probably harmful. +** +** ^The sqlite3_vtab_distinct() interface returns an integer between 0 and +** 3. The integer returned by sqlite3_vtab_distinct() +** gives the virtual table additional information about how the query +** planner wants the output to be ordered. As long as the virtual table +** can meet the ordering requirements of the query planner, it may set +** the "orderByConsumed" flag. +** +**
  1. +** ^If the sqlite3_vtab_distinct() interface returns 0, that means +** that the query planner needs the virtual table to return all rows in the +** sort order defined by the "nOrderBy" and "aOrderBy" fields of the +** [sqlite3_index_info] object. This is the default expectation. If the +** virtual table outputs all rows in sorted order, then it is always safe for +** the xBestIndex method to set the "orderByConsumed" flag, regardless of +** the return value from sqlite3_vtab_distinct(). +**

  2. +** ^(If the sqlite3_vtab_distinct() interface returns 1, that means +** that the query planner does not need the rows to be returned in sorted order +** as long as all rows with the same values in all columns identified by the +** "aOrderBy" field are adjacent.)^ This mode is used when the query planner +** is doing a GROUP BY. +**

  3. +** ^(If the sqlite3_vtab_distinct() interface returns 2, that means +** that the query planner does not need the rows returned in any particular +** order, as long as rows with the same values in all "aOrderBy" columns +** are adjacent.)^ ^(Furthermore, only a single row for each particular +** combination of values in the columns identified by the "aOrderBy" field +** needs to be returned.)^ ^It is always ok for two or more rows with the same +** values in all "aOrderBy" columns to be returned, as long as all such rows +** are adjacent. ^The virtual table may, if it chooses, omit extra rows +** that have the same value for all columns identified by "aOrderBy". +** ^However omitting the extra rows is optional. +** This mode is used for a DISTINCT query. +**

  4. +** ^(If the sqlite3_vtab_distinct() interface returns 3, that means +** that the query planner needs only distinct rows but it does need the +** rows to be sorted.)^ ^The virtual table implementation is free to omit +** rows that are identical in all aOrderBy columns, if it wants to, but +** it is not required to omit any rows. This mode is used for queries +** that have both DISTINCT and ORDER BY clauses. +**

+** +** ^For the purposes of comparing virtual table output values to see if the +** values are same value for sorting purposes, two NULL values are considered +** to be the same. In other words, the comparison operator is "IS" +** (or "IS NOT DISTINCT FROM") and not "==". +** +** If a virtual table implementation is unable to meet the requirements +** specified above, then it must not set the "orderByConsumed" flag in the +** [sqlite3_index_info] object or an incorrect answer may result. +** +** ^A virtual table implementation is always free to return rows in any order +** it wants, as long as the "orderByConsumed" flag is not set. ^When the +** the "orderByConsumed" flag is unset, the query planner will add extra +** [bytecode] to ensure that the final results returned by the SQL query are +** ordered correctly. The use of the "orderByConsumed" flag and the +** sqlite3_vtab_distinct() interface is merely an optimization. ^Careful +** use of the sqlite3_vtab_distinct() interface and the "orderByConsumed" +** flag might help queries against a virtual table to run faster. Being +** overly aggressive and setting the "orderByConsumed" flag when it is not +** valid to do so, on the other hand, might cause SQLite to return incorrect +** results. +*/ +SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*); + +/* +** CAPI3REF: Identify and handle IN constraints in xBestIndex +** +** This interface may only be used from within an +** [xBestIndex|xBestIndex() method] of a [virtual table] implementation. +** The result of invoking this interface from any other context is +** undefined and probably harmful. +** +** ^(A constraint on a virtual table of the form +** "[IN operator|column IN (...)]" is +** communicated to the xBestIndex method as a +** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use +** this constraint, it must set the corresponding +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under +** the usual mode of handling IN operators, SQLite generates [bytecode] +** that invokes the [xFilter|xFilter() method] once for each value +** on the right-hand side of the IN operator.)^ Thus the virtual table +** only sees a single value from the right-hand side of the IN operator +** at a time. +** +** In some cases, however, it would be advantageous for the virtual +** table to see all values on the right-hand of the IN operator all at +** once. The sqlite3_vtab_in() interfaces facilitates this in two ways: +** +**
    +**
  1. +** ^A call to sqlite3_vtab_in(P,N,-1) will return true (non-zero) +** if and only if the [sqlite3_index_info|P->aConstraint][N] constraint +** is an [IN operator] that can be processed all at once. ^In other words, +** sqlite3_vtab_in() with -1 in the third argument is a mechanism +** by which the virtual table can ask SQLite if all-at-once processing +** of the IN operator is even possible. +** +**

  2. +** ^A call to sqlite3_vtab_in(P,N,F) with F==1 or F==0 indicates +** to SQLite that the virtual table does or does not want to process +** the IN operator all-at-once, respectively. ^Thus when the third +** parameter (F) is non-negative, this interface is the mechanism by +** which the virtual table tells SQLite how it wants to process the +** IN operator. +**

+** +** ^The sqlite3_vtab_in(P,N,F) interface can be invoked multiple times +** within the same xBestIndex method call. ^For any given P,N pair, +** the return value from sqlite3_vtab_in(P,N,F) will always be the same +** within the same xBestIndex call. ^If the interface returns true +** (non-zero), that means that the constraint is an IN operator +** that can be processed all-at-once. ^If the constraint is not an IN +** operator or cannot be processed all-at-once, then the interface returns +** false. +** +** ^(All-at-once processing of the IN operator is selected if both of the +** following conditions are met: +** +**
    +**
  1. The P->aConstraintUsage[N].argvIndex value is set to a positive +** integer. This is how the virtual table tells SQLite that it wants to +** use the N-th constraint. +** +**

  2. The last call to sqlite3_vtab_in(P,N,F) for which F was +** non-negative had F>=1. +**

)^ +** +** ^If either or both of the conditions above are false, then SQLite uses +** the traditional one-at-a-time processing strategy for the IN constraint. +** ^If both conditions are true, then the argvIndex-th parameter to the +** xFilter method will be an [sqlite3_value] that appears to be NULL, +** but which can be passed to [sqlite3_vtab_in_first()] and +** [sqlite3_vtab_in_next()] to find all values on the right-hand side +** of the IN constraint. +*/ +SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); + +/* +** CAPI3REF: Find all elements on the right-hand side of an IN constraint. +** +** These interfaces are only useful from within the +** [xFilter|xFilter() method] of a [virtual table] implementation. +** The result of invoking these interfaces from any other context +** is undefined and probably harmful. +** +** The X parameter in a call to sqlite3_vtab_in_first(X,P) or +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the +** xFilter method which invokes these routines, and specifically +** a parameter that was previously selected for all-at-once IN constraint +** processing use the [sqlite3_vtab_in()] interface in the +** [xBestIndex|xBestIndex method]. ^(If the X parameter is not +** an xFilter argument that was selected for all-at-once IN constraint +** processing, then these routines return [SQLITE_ERROR].)^ +** +** ^(Use these routines to access all values on the right-hand side +** of the IN constraint using code like the following: +** +**
+**    for(rc=sqlite3_vtab_in_first(pList, &pVal);
+**        rc==SQLITE_OK && pVal;
+**        rc=sqlite3_vtab_in_next(pList, &pVal)
+**    ){
+**      // do something with pVal
+**    }
+**    if( rc!=SQLITE_OK ){
+**      // an error has occurred
+**    }
+** 
)^ +** +** ^On success, the sqlite3_vtab_in_first(X,P) and sqlite3_vtab_in_next(X,P) +** routines return SQLITE_OK and set *P to point to the first or next value +** on the RHS of the IN constraint. ^If there are no more values on the +** right hand side of the IN constraint, then *P is set to NULL and these +** routines return [SQLITE_DONE]. ^The return value might be +** some other value, such as SQLITE_NOMEM, in the event of a malfunction. +** +** The *ppOut values returned by these routines are only valid until the +** next call to either of these routines or until the end of the xFilter +** method from which these routines were called. If the virtual table +** implementation needs to retain the *ppOut values for longer, it must make +** copies. The *ppOut values are [protected sqlite3_value|protected]. +*/ +SQLITE_API int sqlite3_vtab_in_first(sqlite3_value *pVal, sqlite3_value **ppOut); +SQLITE_API int sqlite3_vtab_in_next(sqlite3_value *pVal, sqlite3_value **ppOut); + +/* +** CAPI3REF: Constraint values in xBestIndex() +** METHOD: sqlite3_index_info +** +** This API may only be used from within the [xBestIndex|xBestIndex method] +** of a [virtual table] implementation. The result of calling this interface +** from outside of an xBestIndex method are undefined and probably harmful. +** +** ^When the sqlite3_vtab_rhs_value(P,J,V) interface is invoked from within +** the [xBestIndex] method of a [virtual table] implementation, with P being +** a copy of the [sqlite3_index_info] object pointer passed into xBestIndex and +** J being a 0-based index into P->aConstraint[], then this routine +** attempts to set *V to the value of the right-hand operand of +** that constraint if the right-hand operand is known. ^If the +** right-hand operand is not known, then *V is set to a NULL pointer. +** ^The sqlite3_vtab_rhs_value(P,J,V) interface returns SQLITE_OK if +** and only if *V is set to a value. ^The sqlite3_vtab_rhs_value(P,J,V) +** inteface returns SQLITE_NOTFOUND if the right-hand side of the J-th +** constraint is not available. ^The sqlite3_vtab_rhs_value() interface +** can return an result code other than SQLITE_OK or SQLITE_NOTFOUND if +** something goes wrong. +** +** The sqlite3_vtab_rhs_value() interface is usually only successful if +** the right-hand operand of a constraint is a literal value in the original +** SQL statement. If the right-hand operand is an expression or a reference +** to some other column or a [host parameter], then sqlite3_vtab_rhs_value() +** will probably return [SQLITE_NOTFOUND]. +** +** ^(Some constraints, such as [SQLITE_INDEX_CONSTRAINT_ISNULL] and +** [SQLITE_INDEX_CONSTRAINT_ISNOTNULL], have no right-hand operand. For such +** constraints, sqlite3_vtab_rhs_value() always returns SQLITE_NOTFOUND.)^ +** +** ^The [sqlite3_value] object returned in *V is a protected sqlite3_value +** and remains valid for the duration of the xBestIndex method call. +** ^When xBestIndex returns, the sqlite3_value object returned by +** sqlite3_vtab_rhs_value() is automatically deallocated. +** +** The "_rhs_" in the name of this routine is an abbreviation for +** "Right-Hand Side". +*/ +SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **ppVal); + +/* +** CAPI3REF: Conflict resolution modes +** KEYWORDS: {conflict resolution mode} +** +** These constants are returned by [sqlite3_vtab_on_conflict()] to +** inform a [virtual table] implementation what the [ON CONFLICT] mode +** is for the SQL statement being evaluated. +** +** Note that the [SQLITE_IGNORE] constant is also used as a potential +** return value from the [sqlite3_set_authorizer()] callback and that +** [SQLITE_ABORT] is also a [result code]. +*/ +#define SQLITE_ROLLBACK 1 +/* #define SQLITE_IGNORE 2 // Also used by sqlite3_authorizer() callback */ +#define SQLITE_FAIL 3 +/* #define SQLITE_ABORT 4 // Also an error code */ +#define SQLITE_REPLACE 5 + +/* +** CAPI3REF: Prepared Statement Scan Status Opcodes +** KEYWORDS: {scanstatus options} +** +** The following constants can be used for the T parameter to the +** [sqlite3_stmt_scanstatus(S,X,T,V)] interface. Each constant designates a +** different metric for sqlite3_stmt_scanstatus() to return. +** +** When the value returned to V is a string, space to hold that string is +** managed by the prepared statement S and will be automatically freed when +** S is finalized. +** +** Not all values are available for all query elements. When a value is +** not available, the output variable is set to -1 if the value is numeric, +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME). +** +**
+** [[SQLITE_SCANSTAT_NLOOP]]
SQLITE_SCANSTAT_NLOOP
+**
^The [sqlite3_int64] variable pointed to by the V parameter will be +** set to the total number of times that the X-th loop has run.
+** +** [[SQLITE_SCANSTAT_NVISIT]]
SQLITE_SCANSTAT_NVISIT
+**
^The [sqlite3_int64] variable pointed to by the V parameter will be set +** to the total number of rows examined by all iterations of the X-th loop.
+** +** [[SQLITE_SCANSTAT_EST]]
SQLITE_SCANSTAT_EST
+**
^The "double" variable pointed to by the V parameter will be set to the +** query planner's estimate for the average number of rows output from each +** iteration of the X-th loop. If the query planner's estimates was accurate, +** then this value will approximate the quotient NVISIT/NLOOP and the +** product of this value for all prior loops with the same SELECTID will +** be the NLOOP value for the current loop. +** +** [[SQLITE_SCANSTAT_NAME]]
SQLITE_SCANSTAT_NAME
+**
^The "const char *" variable pointed to by the V parameter will be set +** to a zero-terminated UTF-8 string containing the name of the index or table +** used for the X-th loop. +** +** [[SQLITE_SCANSTAT_EXPLAIN]]
SQLITE_SCANSTAT_EXPLAIN
+**
^The "const char *" variable pointed to by the V parameter will be set +** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] +** description for the X-th loop. +** +** [[SQLITE_SCANSTAT_SELECTID]]
SQLITE_SCANSTAT_SELECTID
+**
^The "int" variable pointed to by the V parameter will be set to the +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]]
SQLITE_SCANSTAT_PARENTID
+**
The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]]
SQLITE_SCANSTAT_NCYCLE
+**
The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. +**
+*/ +#define SQLITE_SCANSTAT_NLOOP 0 +#define SQLITE_SCANSTAT_NVISIT 1 +#define SQLITE_SCANSTAT_EST 2 +#define SQLITE_SCANSTAT_NAME 3 +#define SQLITE_SCANSTAT_EXPLAIN 4 +#define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 + +/* +** CAPI3REF: Prepared Statement Scan Status +** METHOD: sqlite3_stmt +** +** These interfaces return information about the predicted and measured +** performance for pStmt. Advanced applications can use this +** interface to compare the predicted and the measured performance and +** issue warnings and/or rerun [ANALYZE] if discrepancies are found. +** +** Since this interface is expected to be rarely used, it is only +** available if SQLite is compiled using the [SQLITE_ENABLE_STMT_SCANSTATUS] +** compile-time option. +** +** The "iScanStatusOp" parameter determines which status information to return. +** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. +** +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. +** +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. +** +** See also: [sqlite3_stmt_scanstatus_reset()] +*/ +SQLITE_API int sqlite3_stmt_scanstatus( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + void *pOut /* Result written here */ +); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 + +/* +** CAPI3REF: Zero Scan-Status Counters +** METHOD: sqlite3_stmt +** +** ^Zero all [sqlite3_stmt_scanstatus()] related event counters. +** +** This API is only available if the library is built with pre-processor +** symbol [SQLITE_ENABLE_STMT_SCANSTATUS] defined. +*/ +SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt*); + +/* +** CAPI3REF: Flush caches to disk mid-transaction +** METHOD: sqlite3 +** +** ^If a write-transaction is open on [database connection] D when the +** [sqlite3_db_cacheflush(D)] interface invoked, any dirty +** pages in the pager-cache that are not currently in use are written out +** to disk. A dirty page may be in use if a database cursor created by an +** active SQL statement is reading from it, or if it is page 1 of a database +** file (page 1 is always "in use"). ^The [sqlite3_db_cacheflush(D)] +** interface flushes caches for all schemas - "main", "temp", and +** any [attached] databases. +** +** ^If this function needs to obtain extra database locks before dirty pages +** can be flushed to disk, it does so. ^If those locks cannot be obtained +** immediately and there is a busy-handler callback configured, it is invoked +** in the usual manner. ^If the required lock still cannot be obtained, then +** the database is skipped and an attempt made to flush any dirty pages +** belonging to the next (if any) database. ^If any databases are skipped +** because locks cannot be obtained, but no other error occurs, this +** function returns SQLITE_BUSY. +** +** ^If any other error occurs while flushing dirty pages to disk (for +** example an IO error or out-of-memory condition), then processing is +** abandoned and an SQLite [error code] is returned to the caller immediately. +** +** ^Otherwise, if no error occurs, [sqlite3_db_cacheflush()] returns SQLITE_OK. +** +** ^This function does not set the database handle error code or message +** returned by the [sqlite3_errcode()] and [sqlite3_errmsg()] functions. +*/ +SQLITE_API int sqlite3_db_cacheflush(sqlite3*); + +/* +** CAPI3REF: The pre-update hook. +** METHOD: sqlite3 +** +** ^These interfaces are only available if SQLite is compiled using the +** [SQLITE_ENABLE_PREUPDATE_HOOK] compile-time option. +** +** ^The [sqlite3_preupdate_hook()] interface registers a callback function +** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation +** on a database table. +** ^At most one preupdate hook may be registered at a time on a single +** [database connection]; each call to [sqlite3_preupdate_hook()] overrides +** the previous setting. +** ^The preupdate hook is disabled by invoking [sqlite3_preupdate_hook()] +** with a NULL pointer as the second parameter. +** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as +** the first parameter to callbacks. +** +** ^The preupdate hook only fires for changes to real database tables; the +** preupdate hook is not invoked for changes to [virtual tables] or to +** system tables like sqlite_sequence or sqlite_stat1. +** +** ^The second parameter to the preupdate callback is a pointer to +** the [database connection] that registered the preupdate hook. +** ^The third parameter to the preupdate callback is one of the constants +** [SQLITE_INSERT], [SQLITE_DELETE], or [SQLITE_UPDATE] to identify the +** kind of update operation that is about to occur. +** ^(The fourth parameter to the preupdate callback is the name of the +** database within the database connection that is being modified. This +** will be "main" for the main database or "temp" for TEMP tables or +** the name given after the AS keyword in the [ATTACH] statement for attached +** databases.)^ +** ^The fifth parameter to the preupdate callback is the name of the +** table that is being modified. +** +** For an UPDATE or DELETE operation on a [rowid table], the sixth +** parameter passed to the preupdate callback is the initial [rowid] of the +** row being modified or deleted. For an INSERT operation on a rowid table, +** or any operation on a WITHOUT ROWID table, the value of the sixth +** parameter is undefined. For an INSERT or UPDATE on a rowid table the +** seventh parameter is the final rowid value of the row being inserted +** or updated. The value of the seventh parameter passed to the callback +** function is not defined for operations on WITHOUT ROWID tables, or for +** DELETE operations on rowid tables. +** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** +** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], +** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces +** provide additional information about a preupdate event. These routines +** may only be called from within a preupdate callback. Invoking any of +** these routines from outside of a preupdate callback or with a +** [database connection] pointer that is different from the one supplied +** to the preupdate callback results in undefined and probably undesirable +** behavior. +** +** ^The [sqlite3_preupdate_count(D)] interface returns the number of columns +** in the row that is being inserted, updated, or deleted. +** +** ^The [sqlite3_preupdate_old(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row before it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_UPDATE and SQLITE_DELETE +** preupdate callbacks; if it is used by an SQLITE_INSERT callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_new(D,N,P)] interface writes into P a pointer to +** a [protected sqlite3_value] that contains the value of the Nth column of +** the table row after it is updated. The N parameter must be between 0 +** and one less than the number of columns or the behavior will be +** undefined. This must only be used within SQLITE_INSERT and SQLITE_UPDATE +** preupdate callbacks; if it is used by an SQLITE_DELETE callback then the +** behavior is undefined. The [sqlite3_value] that P points to +** will be destroyed when the preupdate callback returns. +** +** ^The [sqlite3_preupdate_depth(D)] interface returns 0 if the preupdate +** callback was invoked as a result of a direct insert, update, or delete +** operation; or 1 for inserts, updates, or deletes invoked by top-level +** triggers; or 2 for changes resulting from triggers called by top-level +** triggers; and so forth. +** +** When the [sqlite3_blob_write()] API is used to update a blob column, +** the pre-update hook is invoked with SQLITE_DELETE. This is because the +** in this case the new values are not available. In this case, when a +** callback made with op==SQLITE_DELETE is actually a write using the +** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns +** the index of the column being written. In other cases, where the +** pre-update hook is being invoked for some other reason, including a +** regular DELETE, sqlite3_preupdate_blobwrite() returns -1. +** +** See also: [sqlite3_update_hook()] +*/ +#if defined(SQLITE_ENABLE_PREUPDATE_HOOK) +SQLITE_API void *sqlite3_preupdate_hook( + sqlite3 *db, + void(*xPreUpdate)( + void *pCtx, /* Copy of third arg to preupdate_hook() */ + sqlite3 *db, /* Database handle */ + int op, /* SQLITE_UPDATE, DELETE or INSERT */ + char const *zDb, /* Database name */ + char const *zName, /* Table name */ + sqlite3_int64 iKey1, /* Rowid of row about to be deleted/updated */ + sqlite3_int64 iKey2 /* New rowid value (for a rowid UPDATE) */ + ), + void* +); +SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_count(sqlite3 *); +SQLITE_API int sqlite3_preupdate_depth(sqlite3 *); +SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **); +SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *); +#endif + +/* +** CAPI3REF: Low-level system error code +** METHOD: sqlite3 +** +** ^Attempt to return the underlying operating system error code or error +** number that caused the most recent I/O error or failure to open a file. +** The return value is OS-dependent. For example, on unix systems, after +** [sqlite3_open_v2()] returns [SQLITE_CANTOPEN], this interface could be +** called to get back the underlying "errno" that caused the problem, such +** as ENOSPC, EAUTH, EISDIR, and so forth. +*/ +SQLITE_API int sqlite3_system_errno(sqlite3*); + +/* +** CAPI3REF: Database Snapshot +** KEYWORDS: {snapshot} {sqlite3_snapshot} +** +** An instance of the snapshot object records the state of a [WAL mode] +** database for some specific point in history. +** +** In [WAL mode], multiple [database connections] that are open on the +** same database file can each be reading a different historical version +** of the database file. When a [database connection] begins a read +** transaction, that connection sees an unchanging copy of the database +** as it existed for the point in time when the transaction first started. +** Subsequent changes to the database from other connections are not seen +** by the reader until a new read transaction is started. +** +** The sqlite3_snapshot object records state information about an historical +** version of the database file so that it is possible to later open a new read +** transaction that sees that historical version of the database rather than +** the most recent version. +*/ +typedef struct sqlite3_snapshot { + unsigned char hidden[48]; +} sqlite3_snapshot; + +/* +** CAPI3REF: Record A Database Snapshot +** CONSTRUCTOR: sqlite3_snapshot +** +** ^The [sqlite3_snapshot_get(D,S,P)] interface attempts to make a +** new [sqlite3_snapshot] object that records the current state of +** schema S in database connection D. ^On success, the +** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly +** created [sqlite3_snapshot] object into *P and returns SQLITE_OK. +** If there is not already a read-transaction open on schema S when +** this function is called, one is opened automatically. +** +** The following must be true for this function to succeed. If any of +** the following statements are false when sqlite3_snapshot_get() is +** called, SQLITE_ERROR is returned. The final value of *P is undefined +** in this case. +** +**
    +**
  • The database handle must not be in [autocommit mode]. +** +**
  • Schema S of [database connection] D must be a [WAL mode] database. +** +**
  • There must not be a write transaction open on schema S of database +** connection D. +** +**
  • One or more transactions must have been written to the current wal +** file since it was created on disk (by any connection). This means +** that a snapshot cannot be taken on a wal mode database with no wal +** file immediately after it is first opened. At least one transaction +** must be written to it first. +**
+** +** This function may also return SQLITE_NOMEM. If it is called with the +** database handle in autocommit mode but fails for some other reason, +** whether or not a read transaction is opened on schema S is undefined. +** +** The [sqlite3_snapshot] object returned from a successful call to +** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()] +** to avoid a memory leak. +** +** The [sqlite3_snapshot_get()] interface is only available when the +** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_get( + sqlite3 *db, + const char *zSchema, + sqlite3_snapshot **ppSnapshot +); + +/* +** CAPI3REF: Start a read transaction on an historical snapshot +** METHOD: sqlite3_snapshot +** +** ^The [sqlite3_snapshot_open(D,S,P)] interface either starts a new read +** transaction or upgrades an existing one for schema S of +** [database connection] D such that the read transaction refers to +** historical [snapshot] P, rather than the most recent change to the +** database. ^The [sqlite3_snapshot_open()] interface returns SQLITE_OK +** on success or an appropriate [error code] if it fails. +** +** ^In order to succeed, the database connection must not be in +** [autocommit mode] when [sqlite3_snapshot_open(D,S,P)] is called. If there +** is already a read transaction open on schema S, then the database handle +** must have no active statements (SELECT statements that have been passed +** to sqlite3_step() but not sqlite3_reset() or sqlite3_finalize()). +** SQLITE_ERROR is returned if either of these conditions is violated, or +** if schema S does not exist, or if the snapshot object is invalid. +** +** ^A call to sqlite3_snapshot_open() will fail to open if the specified +** snapshot has been overwritten by a [checkpoint]. In this case +** SQLITE_ERROR_SNAPSHOT is returned. +** +** If there is already a read transaction open when this function is +** invoked, then the same read transaction remains open (on the same +** database snapshot) if SQLITE_ERROR, SQLITE_BUSY or SQLITE_ERROR_SNAPSHOT +** is returned. If another error code - for example SQLITE_PROTOCOL or an +** SQLITE_IOERR error code - is returned, then the final state of the +** read transaction is undefined. If SQLITE_OK is returned, then the +** read transaction is now open on database snapshot P. +** +** ^(A call to [sqlite3_snapshot_open(D,S,P)] will fail if the +** database connection D does not know that the database file for +** schema S is in [WAL mode]. A database connection might not know +** that the database file is in [WAL mode] if there has been no prior +** I/O on that database connection, or if the database entered [WAL mode] +** after the most recent I/O on the database connection.)^ +** (Hint: Run "[PRAGMA application_id]" against a newly opened +** database connection in order to make it ready to use snapshots.) +** +** The [sqlite3_snapshot_open()] interface is only available when the +** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_open( + sqlite3 *db, + const char *zSchema, + sqlite3_snapshot *pSnapshot +); + +/* +** CAPI3REF: Destroy a snapshot +** DESTRUCTOR: sqlite3_snapshot +** +** ^The [sqlite3_snapshot_free(P)] interface destroys [sqlite3_snapshot] P. +** The application must eventually free every [sqlite3_snapshot] object +** using this routine to avoid a memory leak. +** +** The [sqlite3_snapshot_free()] interface is only available when the +** [SQLITE_ENABLE_SNAPSHOT] compile-time option is used. +*/ +SQLITE_API SQLITE_EXPERIMENTAL void sqlite3_snapshot_free(sqlite3_snapshot*); + +/* +** CAPI3REF: Compare the ages of two snapshot handles. +** METHOD: sqlite3_snapshot +** +** The sqlite3_snapshot_cmp(P1, P2) interface is used to compare the ages +** of two valid snapshot handles. +** +** If the two snapshot handles are not associated with the same database +** file, the result of the comparison is undefined. +** +** Additionally, the result of the comparison is only valid if both of the +** snapshot handles were obtained by calling sqlite3_snapshot_get() since the +** last time the wal file was deleted. The wal file is deleted when the +** database is changed back to rollback mode or when the number of database +** clients drops to zero. If either snapshot handle was obtained before the +** wal file was last deleted, the value returned by this function +** is undefined. +** +** Otherwise, this API returns a negative value if P1 refers to an older +** snapshot than P2, zero if the two handles refer to the same database +** snapshot, and a positive value if P1 is a newer snapshot than P2. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_SNAPSHOT] option. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp( + sqlite3_snapshot *p1, + sqlite3_snapshot *p2 +); + +/* +** CAPI3REF: Recover snapshots from a wal file +** METHOD: sqlite3_snapshot +** +** If a [WAL file] remains on disk after all database connections close +** (either through the use of the [SQLITE_FCNTL_PERSIST_WAL] [file control] +** or because the last process to have the database opened exited without +** calling [sqlite3_close()]) and a new connection is subsequently opened +** on that database and [WAL file], the [sqlite3_snapshot_open()] interface +** will only be able to open the last transaction added to the WAL file +** even though the WAL file contains other valid transactions. +** +** This function attempts to scan the WAL file associated with database zDb +** of database handle db and make all valid snapshots available to +** sqlite3_snapshot_open(). It is an error if there is already a read +** transaction open on the database, or if the database is not a WAL mode +** database. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +** +** This interface is only available if SQLite is compiled with the +** [SQLITE_ENABLE_SNAPSHOT] option. +*/ +SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb); + +/* +** CAPI3REF: Serialize a database +** +** The sqlite3_serialize(D,S,P,F) interface returns a pointer to memory +** that is a serialization of the S database on [database connection] D. +** If P is not a NULL pointer, then the size of the database in bytes +** is written into *P. +** +** For an ordinary on-disk database file, the serialization is just a +** copy of the disk file. For an in-memory database or a "TEMP" database, +** the serialization is the same sequence of bytes which would be written +** to disk if that database where backed up to disk. +** +** The usual case is that sqlite3_serialize() copies the serialization of +** the database into memory obtained from [sqlite3_malloc64()] and returns +** a pointer to that memory. The caller is responsible for freeing the +** returned value to avoid a memory leak. However, if the F argument +** contains the SQLITE_SERIALIZE_NOCOPY bit, then no memory allocations +** are made, and the sqlite3_serialize() function will return a pointer +** to the contiguous memory representation of the database that SQLite +** is currently using for that database, or NULL if the no such contiguous +** memory representation of the database exists. A contiguous memory +** representation of the database will usually only exist if there has +** been a prior call to [sqlite3_deserialize(D,S,...)] with the same +** values of D and S. +** The size of the database is written into *P even if the +** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy +** of the database exists. +** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** +** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the +** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory +** allocation error occurs. +** +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. +*/ +SQLITE_API unsigned char *sqlite3_serialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to serialize. ex: "main", "temp", ... */ + sqlite3_int64 *piSize, /* Write size of the DB here, if not NULL */ + unsigned int mFlags /* Zero or more SQLITE_SERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_serialize +** +** Zero or more of the following constants can be OR-ed together for +** the F argument to [sqlite3_serialize(D,S,P,F)]. +** +** SQLITE_SERIALIZE_NOCOPY means that [sqlite3_serialize()] will return +** a pointer to contiguous in-memory database that it is currently using, +** without making a copy of the database. If SQLite is not currently using +** a contiguous in-memory database, then this option causes +** [sqlite3_serialize()] to return a NULL pointer. SQLite will only be +** using a contiguous in-memory database if it has been initialized by a +** prior call to [sqlite3_deserialize()]. +*/ +#define SQLITE_SERIALIZE_NOCOPY 0x001 /* Do no memory allocations */ + +/* +** CAPI3REF: Deserialize a database +** +** The sqlite3_deserialize(D,S,P,N,M,F) interface causes the +** [database connection] D to disconnect from database S and then +** reopen S as an in-memory database based on the serialization contained +** in P. The serialized database P is N bytes in size. M is the size of +** the buffer P, which might be larger than N. If M is larger than N, and +** the SQLITE_DESERIALIZE_READONLY bit is not set in F, then SQLite is +** permitted to add content to the in-memory database as long as the total +** size does not exceed M bytes. +** +** If the SQLITE_DESERIALIZE_FREEONCLOSE bit is set in F, then SQLite will +** invoke sqlite3_free() on the serialization buffer when the database +** connection closes. If the SQLITE_DESERIALIZE_RESIZEABLE bit is set, then +** SQLite will try to increase the buffer size using sqlite3_realloc64() +** if writes on the database cause it to grow larger than M bytes. +** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** +** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the +** database is currently in a read transaction or is involved in a backup +** operation. +** +** It is not possible to deserialized into the TEMP database. If the +** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the +** function returns SQLITE_ERROR. +** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** +** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the +** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then +** [sqlite3_free()] is invoked on argument P prior to returning. +** +** This interface is omitted if SQLite is compiled with the +** [SQLITE_OMIT_DESERIALIZE] option. +*/ +SQLITE_API int sqlite3_deserialize( + sqlite3 *db, /* The database connection */ + const char *zSchema, /* Which DB to reopen with the deserialization */ + unsigned char *pData, /* The serialized database content */ + sqlite3_int64 szDb, /* Number bytes in the deserialization */ + sqlite3_int64 szBuf, /* Total size of buffer pData[] */ + unsigned mFlags /* Zero or more SQLITE_DESERIALIZE_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3_deserialize() +** +** The following are allowed values for 6th argument (the F argument) to +** the [sqlite3_deserialize(D,S,P,N,M,F)] interface. +** +** The SQLITE_DESERIALIZE_FREEONCLOSE means that the database serialization +** in the P argument is held in memory obtained from [sqlite3_malloc64()] +** and that SQLite should take ownership of this memory and automatically +** free it when it has finished using it. Without this flag, the caller +** is responsible for freeing any dynamically allocated memory. +** +** The SQLITE_DESERIALIZE_RESIZEABLE flag means that SQLite is allowed to +** grow the size of the database using calls to [sqlite3_realloc64()]. This +** flag should only be used if SQLITE_DESERIALIZE_FREEONCLOSE is also used. +** Without this flag, the deserialized database cannot increase in size beyond +** the number of bytes specified by the M parameter. +** +** The SQLITE_DESERIALIZE_READONLY flag means that the deserialized database +** should be treated as read-only. +*/ +#define SQLITE_DESERIALIZE_FREEONCLOSE 1 /* Call sqlite3_free() on close */ +#define SQLITE_DESERIALIZE_RESIZEABLE 2 /* Resize using sqlite3_realloc64() */ +#define SQLITE_DESERIALIZE_READONLY 4 /* Database is read-only */ + +/* +** Undo the hack that converts floating point types to integer for +** builds on processors without floating point support. +*/ +#ifdef SQLITE_OMIT_FLOATING_POINT +# undef double +#endif + +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif +#endif + +#ifdef __cplusplus +} /* End of the 'extern "C"' block */ +#endif +#endif /* SQLITE3_H */ + +/******** Begin file sqlite3rtree.h *********/ +/* +** 2010 August 30 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +*/ + +#ifndef _SQLITE3RTREE_H_ +#define _SQLITE3RTREE_H_ + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct sqlite3_rtree_geometry sqlite3_rtree_geometry; +typedef struct sqlite3_rtree_query_info sqlite3_rtree_query_info; + +/* The double-precision datatype used by RTree depends on the +** SQLITE_RTREE_INT_ONLY compile-time option. +*/ +#ifdef SQLITE_RTREE_INT_ONLY + typedef sqlite3_int64 sqlite3_rtree_dbl; +#else + typedef double sqlite3_rtree_dbl; +#endif + +/* +** Register a geometry callback named zGeom that can be used as part of an +** R-Tree geometry query as follows: +** +** SELECT ... FROM WHERE MATCH $zGeom(... params ...) +*/ +SQLITE_API int sqlite3_rtree_geometry_callback( + sqlite3 *db, + const char *zGeom, + int (*xGeom)(sqlite3_rtree_geometry*, int, sqlite3_rtree_dbl*,int*), + void *pContext +); + + +/* +** A pointer to a structure of the following type is passed as the first +** argument to callbacks registered using rtree_geometry_callback(). +*/ +struct sqlite3_rtree_geometry { + void *pContext; /* Copy of pContext passed to s_r_g_c() */ + int nParam; /* Size of array aParam[] */ + sqlite3_rtree_dbl *aParam; /* Parameters passed to SQL geom function */ + void *pUser; /* Callback implementation user data */ + void (*xDelUser)(void *); /* Called by SQLite to clean up pUser */ +}; + +/* +** Register a 2nd-generation geometry callback named zScore that can be +** used as part of an R-Tree geometry query as follows: +** +** SELECT ... FROM WHERE MATCH $zQueryFunc(... params ...) +*/ +SQLITE_API int sqlite3_rtree_query_callback( + sqlite3 *db, + const char *zQueryFunc, + int (*xQueryFunc)(sqlite3_rtree_query_info*), + void *pContext, + void (*xDestructor)(void*) +); + + +/* +** A pointer to a structure of the following type is passed as the +** argument to scored geometry callback registered using +** sqlite3_rtree_query_callback(). +** +** Note that the first 5 fields of this structure are identical to +** sqlite3_rtree_geometry. This structure is a subclass of +** sqlite3_rtree_geometry. +*/ +struct sqlite3_rtree_query_info { + void *pContext; /* pContext from when function registered */ + int nParam; /* Number of function parameters */ + sqlite3_rtree_dbl *aParam; /* value of function parameters */ + void *pUser; /* callback can use this, if desired */ + void (*xDelUser)(void*); /* function to free pUser */ + sqlite3_rtree_dbl *aCoord; /* Coordinates of node or entry to check */ + unsigned int *anQueue; /* Number of pending entries in the queue */ + int nCoord; /* Number of coordinates */ + int iLevel; /* Level of current node or entry */ + int mxLevel; /* The largest iLevel value in the tree */ + sqlite3_int64 iRowid; /* Rowid for current entry */ + sqlite3_rtree_dbl rParentScore; /* Score of parent node */ + int eParentWithin; /* Visibility of parent node */ + int eWithin; /* OUT: Visibility */ + sqlite3_rtree_dbl rScore; /* OUT: Write the score here */ + /* The following fields are only available in 3.8.11 and later */ + sqlite3_value **apSqlParam; /* Original SQL values of parameters */ +}; + +/* +** Allowed values for sqlite3_rtree_query.eWithin and .eParentWithin. +*/ +#define NOT_WITHIN 0 /* Object completely outside of query region */ +#define PARTLY_WITHIN 1 /* Object partially overlaps query region */ +#define FULLY_WITHIN 2 /* Object fully contained within query region */ + + +#ifdef __cplusplus +} /* end of the 'extern "C"' block */ +#endif + +#endif /* ifndef _SQLITE3RTREE_H_ */ + +/******** End of sqlite3rtree.h *********/ +/******** Begin file sqlite3session.h *********/ + +#if !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) +#define __SQLITESESSION_H_ 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +extern "C" { +#endif + + +/* +** CAPI3REF: Session Object Handle +** +** An instance of this object is a [session] that can be used to +** record changes to a database. +*/ +typedef struct sqlite3_session sqlite3_session; + +/* +** CAPI3REF: Changeset Iterator Handle +** +** An instance of this object acts as a cursor for iterating +** over the elements of a [changeset] or [patchset]. +*/ +typedef struct sqlite3_changeset_iter sqlite3_changeset_iter; + +/* +** CAPI3REF: Create A New Session Object +** CONSTRUCTOR: sqlite3_session +** +** Create a new session object attached to database handle db. If successful, +** a pointer to the new object is written to *ppSession and SQLITE_OK is +** returned. If an error occurs, *ppSession is set to NULL and an SQLite +** error code (e.g. SQLITE_NOMEM) is returned. +** +** It is possible to create multiple session objects attached to a single +** database handle. +** +** Session objects created using this function should be deleted using the +** [sqlite3session_delete()] function before the database handle that they +** are attached to is itself closed. If the database handle is closed before +** the session object is deleted, then the results of calling any session +** module function, including [sqlite3session_delete()] on the session object +** are undefined. +** +** Because the session module uses the [sqlite3_preupdate_hook()] API, it +** is not possible for an application to register a pre-update hook on a +** database handle that has one or more session objects attached. Nor is +** it possible to create a session object attached to a database handle for +** which a pre-update hook is already defined. The results of attempting +** either of these things are undefined. +** +** The session object will be used to create changesets for tables in +** database zDb, where zDb is either "main", or "temp", or the name of an +** attached database. It is not an error if database zDb is not attached +** to the database when the session object is created. +*/ +SQLITE_API int sqlite3session_create( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Name of db (e.g. "main") */ + sqlite3_session **ppSession /* OUT: New session object */ +); + +/* +** CAPI3REF: Delete A Session Object +** DESTRUCTOR: sqlite3_session +** +** Delete a session object previously allocated using +** [sqlite3session_create()]. Once a session object has been deleted, the +** results of attempting to use pSession with any other session module +** function are undefined. +** +** Session objects must be deleted before the database handle to which they +** are attached is closed. Refer to the documentation for +** [sqlite3session_create()] for details. +*/ +SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); + +/* +** CAPI3REF: Configure a Session Object +** METHOD: sqlite3_session +** +** This method is used to configure a session object after it has been +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. +** +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config +** +** The following values may passed as the the 2nd parameter to +** sqlite3session_object_config(). +** +**
SQLITE_SESSION_OBJCONFIG_SIZE
+** This option is used to set, clear or query the flag that enables +** the [sqlite3session_changeset_size()] API. Because it imposes some +** computational overhead, this API is disabled by default. Argument +** pArg must point to a value of type (int). If the value is initially +** 0, then the sqlite3session_changeset_size() API is disabled. If it +** is greater than 0, then the same API is enabled. Or, if the initial +** value is less than zero, no change is made. In all cases the (int) +** variable is set to 1 if the sqlite3session_changeset_size() API is +** enabled following the current call, or 0 otherwise. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. +** +**
SQLITE_SESSION_OBJCONFIG_ROWID
+** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. +*/ +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 + +/* +** CAPI3REF: Enable Or Disable A Session Object +** METHOD: sqlite3_session +** +** Enable or disable the recording of changes by a session object. When +** enabled, a session object records changes made to the database. When +** disabled - it does not. A newly created session object is enabled. +** Refer to the documentation for [sqlite3session_changeset()] for further +** details regarding how enabling and disabling a session object affects +** the eventual changesets. +** +** Passing zero to this function disables the session. Passing a value +** greater than zero enables it. Passing a value less than zero is a +** no-op, and may be used to query the current state of the session. +** +** The return value indicates the final state of the session object: 0 if +** the session is disabled, or 1 if it is enabled. +*/ +SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable); + +/* +** CAPI3REF: Set Or Clear the Indirect Change Flag +** METHOD: sqlite3_session +** +** Each change recorded by a session object is marked as either direct or +** indirect. A change is marked as indirect if either: +** +**
    +**
  • The session object "indirect" flag is set when the change is +** made, or +**
  • The change is made by an SQL trigger or foreign key action +** instead of directly as a result of a users SQL statement. +**
+** +** If a single row is affected by more than one operation within a session, +** then the change is considered indirect if all operations meet the criteria +** for an indirect change above, or direct otherwise. +** +** This function is used to set, clear or query the session object indirect +** flag. If the second argument passed to this function is zero, then the +** indirect flag is cleared. If it is greater than zero, the indirect flag +** is set. Passing a value less than zero does not modify the current value +** of the indirect flag, and may be used to query the current state of the +** indirect flag for the specified session object. +** +** The return value indicates the final state of the indirect flag: 0 if +** it is clear, or 1 if it is set. +*/ +SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect); + +/* +** CAPI3REF: Attach A Table To A Session Object +** METHOD: sqlite3_session +** +** If argument zTab is not NULL, then it is the name of a table to attach +** to the session object passed as the first argument. All subsequent changes +** made to the table while the session object is enabled will be recorded. See +** documentation for [sqlite3session_changeset()] for further details. +** +** Or, if argument zTab is NULL, then changes are recorded for all tables +** in the database. If additional tables are added to the database (by +** executing "CREATE TABLE" statements) after this call is made, changes for +** the new tables are also recorded. +** +** Changes can only be recorded for tables that have a PRIMARY KEY explicitly +** defined as part of their CREATE TABLE statement. It does not matter if the +** PRIMARY KEY is an "INTEGER PRIMARY KEY" (rowid alias) or not. The PRIMARY +** KEY may consist of a single column, or may be a composite key. +** +** It is not an error if the named table does not exist in the database. Nor +** is it an error if the named table does not have a PRIMARY KEY. However, +** no changes will be recorded in either of these scenarios. +** +** Changes are not recorded for individual rows that have NULL values stored +** in one or more of their PRIMARY KEY columns. +** +** SQLITE_OK is returned if the call completes without error. Or, if an error +** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned. +** +**

Special sqlite_stat1 Handling

+** +** As of SQLite version 3.22.0, the "sqlite_stat1" table is an exception to +** some of the rules above. In SQLite, the schema of sqlite_stat1 is: +**
+**        CREATE TABLE sqlite_stat1(tbl,idx,stat)
+**  
+** +** Even though sqlite_stat1 does not have a PRIMARY KEY, changes are +** recorded for it as if the PRIMARY KEY is (tbl,idx). Additionally, changes +** are recorded for rows for which (idx IS NULL) is true. However, for such +** rows a zero-length blob (SQL value X'') is stored in the changeset or +** patchset instead of a NULL value. This allows such changesets to be +** manipulated by legacy implementations of sqlite3changeset_invert(), +** concat() and similar. +** +** The sqlite3changeset_apply() function automatically converts the +** zero-length blob back to a NULL value when updating the sqlite_stat1 +** table. However, if the application calls sqlite3changeset_new(), +** sqlite3changeset_old() or sqlite3changeset_conflict on a changeset +** iterator directly (including on a changeset iterator passed to a +** conflict-handler callback) then the X'' value is returned. The application +** must translate X'' to NULL itself if required. +** +** Legacy (older than 3.22.0) versions of the sessions module cannot capture +** changes made to the sqlite_stat1 table. Legacy versions of the +** sqlite3changeset_apply() function silently ignore any modifications to the +** sqlite_stat1 table that are part of a changeset or patchset. +*/ +SQLITE_API int sqlite3session_attach( + sqlite3_session *pSession, /* Session object */ + const char *zTab /* Table name */ +); + +/* +** CAPI3REF: Set a table filter on a Session Object. +** METHOD: sqlite3_session +** +** The second argument (xFilter) is the "filter callback". For changes to rows +** in tables that are not attached to the Session object, the filter is called +** to determine whether changes to the table's rows should be tracked or not. +** If xFilter returns 0, changes are not tracked. Note that once a table is +** attached, xFilter will not be called again. +*/ +SQLITE_API void sqlite3session_table_filter( + sqlite3_session *pSession, /* Session object */ + int(*xFilter)( + void *pCtx, /* Copy of third arg to _filter_table() */ + const char *zTab /* Table name */ + ), + void *pCtx /* First argument passed to xFilter */ +); + +/* +** CAPI3REF: Generate A Changeset From A Session Object +** METHOD: sqlite3_session +** +** Obtain a changeset containing changes to the tables attached to the +** session object passed as the first argument. If successful, +** set *ppChangeset to point to a buffer containing the changeset +** and *pnChangeset to the size of the changeset in bytes before returning +** SQLITE_OK. If an error occurs, set both *ppChangeset and *pnChangeset to +** zero and return an SQLite error code. +** +** A changeset consists of zero or more INSERT, UPDATE and/or DELETE changes, +** each representing a change to a single row of an attached table. An INSERT +** change contains the values of each field of a new database row. A DELETE +** contains the original values of each field of a deleted database row. An +** UPDATE change contains the original values of each field of an updated +** database row along with the updated values for each updated non-primary-key +** column. It is not possible for an UPDATE change to represent a change that +** modifies the values of primary key columns. If such a change is made, it +** is represented in a changeset as a DELETE followed by an INSERT. +** +** Changes are not recorded for rows that have NULL values stored in one or +** more of their PRIMARY KEY columns. If such a row is inserted or deleted, +** no corresponding change is present in the changesets returned by this +** function. If an existing row with one or more NULL values stored in +** PRIMARY KEY columns is updated so that all PRIMARY KEY columns are non-NULL, +** only an INSERT is appears in the changeset. Similarly, if an existing row +** with non-NULL PRIMARY KEY values is updated so that one or more of its +** PRIMARY KEY columns are set to NULL, the resulting changeset contains a +** DELETE change only. +** +** The contents of a changeset may be traversed using an iterator created +** using the [sqlite3changeset_start()] API. A changeset may be applied to +** a database with a compatible schema using the [sqlite3changeset_apply()] +** API. +** +** Within a changeset generated by this function, all changes related to a +** single table are grouped together. In other words, when iterating through +** a changeset or when applying a changeset to a database, all changes related +** to a single table are processed before moving on to the next table. Tables +** are sorted in the same order in which they were attached (or auto-attached) +** to the sqlite3_session object. The order in which the changes related to +** a single table are stored is undefined. +** +** Following a successful call to this function, it is the responsibility of +** the caller to eventually free the buffer that *ppChangeset points to using +** [sqlite3_free()]. +** +**

Changeset Generation

+** +** Once a table has been attached to a session object, the session object +** records the primary key values of all new rows inserted into the table. +** It also records the original primary key and other column values of any +** deleted or updated rows. For each unique primary key value, data is only +** recorded once - the first time a row with said primary key is inserted, +** updated or deleted in the lifetime of the session. +** +** There is one exception to the previous paragraph: when a row is inserted, +** updated or deleted, if one or more of its primary key columns contain a +** NULL value, no record of the change is made. +** +** The session object therefore accumulates two types of records - those +** that consist of primary key values only (created when the user inserts +** a new record) and those that consist of the primary key values and the +** original values of other table columns (created when the users deletes +** or updates a record). +** +** When this function is called, the requested changeset is created using +** both the accumulated records and the current contents of the database +** file. Specifically: +** +**
    +**
  • For each record generated by an insert, the database is queried +** for a row with a matching primary key. If one is found, an INSERT +** change is added to the changeset. If no such row is found, no change +** is added to the changeset. +** +**
  • For each record generated by an update or delete, the database is +** queried for a row with a matching primary key. If such a row is +** found and one or more of the non-primary key fields have been +** modified from their original values, an UPDATE change is added to +** the changeset. Or, if no such row is found in the table, a DELETE +** change is added to the changeset. If there is a row with a matching +** primary key in the database, but all fields contain their original +** values, no change is added to the changeset. +**
+** +** This means, amongst other things, that if a row is inserted and then later +** deleted while a session object is active, neither the insert nor the delete +** will be present in the changeset. Or if a row is deleted and then later a +** row with the same primary key values inserted while a session object is +** active, the resulting changeset will contain an UPDATE change instead of +** a DELETE and an INSERT. +** +** When a session object is disabled (see the [sqlite3session_enable()] API), +** it does not accumulate records when rows are inserted, updated or deleted. +** This may appear to have some counter-intuitive effects if a single row +** is written to more than once during a session. For example, if a row +** is inserted while a session object is enabled, then later deleted while +** the same session object is disabled, no INSERT record will appear in the +** changeset, even though the delete took place while the session was disabled. +** Or, if one field of a row is updated while a session is disabled, and +** another field of the same row is updated while the session is enabled, the +** resulting changeset will contain an UPDATE change that updates both fields. +*/ +SQLITE_API int sqlite3session_changeset( + sqlite3_session *pSession, /* Session object */ + int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */ + void **ppChangeset /* OUT: Buffer containing changeset */ +); + +/* +** CAPI3REF: Return An Upper-limit For The Size Of The Changeset +** METHOD: sqlite3_session +** +** By default, this function always returns 0. For it to return +** a useful result, the sqlite3_session object must have been configured +** to enable this API using sqlite3session_object_config() with the +** SQLITE_SESSION_OBJCONFIG_SIZE verb. +** +** When enabled, this function returns an upper limit, in bytes, for the size +** of the changeset that might be produced if sqlite3session_changeset() were +** called. The final changeset size might be equal to or smaller than the +** size in bytes returned by this function. +*/ +SQLITE_API sqlite3_int64 sqlite3session_changeset_size(sqlite3_session *pSession); + +/* +** CAPI3REF: Load The Difference Between Tables Into A Session +** METHOD: sqlite3_session +** +** If it is not already attached to the session object passed as the first +** argument, this function attaches table zTbl in the same manner as the +** [sqlite3session_attach()] function. If zTbl does not exist, or if it +** does not have a primary key, this function is a no-op (but does not return +** an error). +** +** Argument zFromDb must be the name of a database ("main", "temp" etc.) +** attached to the same database handle as the session object that contains +** a table compatible with the table attached to the session by this function. +** A table is considered compatible if it: +** +**
    +**
  • Has the same name, +**
  • Has the same set of columns declared in the same order, and +**
  • Has the same PRIMARY KEY definition. +**
+** +** If the tables are not compatible, SQLITE_SCHEMA is returned. If the tables +** are compatible but do not have any PRIMARY KEY columns, it is not an error +** but no changes are added to the session object. As with other session +** APIs, tables without PRIMARY KEYs are simply ignored. +** +** This function adds a set of changes to the session object that could be +** used to update the table in database zFrom (call this the "from-table") +** so that its content is the same as the table attached to the session +** object (call this the "to-table"). Specifically: +** +**
    +**
  • For each row (primary key) that exists in the to-table but not in +** the from-table, an INSERT record is added to the session object. +** +**
  • For each row (primary key) that exists in the to-table but not in +** the from-table, a DELETE record is added to the session object. +** +**
  • For each row (primary key) that exists in both tables, but features +** different non-PK values in each, an UPDATE record is added to the +** session. +**
+** +** To clarify, if this function is called and then a changeset constructed +** using [sqlite3session_changeset()], then after applying that changeset to +** database zFrom the contents of the two compatible tables would be +** identical. +** +** It an error if database zFrom does not exist or does not contain the +** required compatible table. +** +** If the operation is successful, SQLITE_OK is returned. Otherwise, an SQLite +** error code. In this case, if argument pzErrMsg is not NULL, *pzErrMsg +** may be set to point to a buffer containing an English language error +** message. It is the responsibility of the caller to free this buffer using +** sqlite3_free(). +*/ +SQLITE_API int sqlite3session_diff( + sqlite3_session *pSession, + const char *zFromDb, + const char *zTbl, + char **pzErrMsg +); + + +/* +** CAPI3REF: Generate A Patchset From A Session Object +** METHOD: sqlite3_session +** +** The differences between a patchset and a changeset are that: +** +**
    +**
  • DELETE records consist of the primary key fields only. The +** original values of other fields are omitted. +**
  • The original values of any modified fields are omitted from +** UPDATE records. +**
+** +** A patchset blob may be used with up to date versions of all +** sqlite3changeset_xxx API functions except for sqlite3changeset_invert(), +** which returns SQLITE_CORRUPT if it is passed a patchset. Similarly, +** attempting to use a patchset blob with old versions of the +** sqlite3changeset_xxx APIs also provokes an SQLITE_CORRUPT error. +** +** Because the non-primary key "old.*" fields are omitted, no +** SQLITE_CHANGESET_DATA conflicts can be detected or reported if a patchset +** is passed to the sqlite3changeset_apply() API. Other conflict types work +** in the same way as for changesets. +** +** Changes within a patchset are ordered in the same way as for changesets +** generated by the sqlite3session_changeset() function (i.e. all changes for +** a single table are grouped together, tables appear in the order in which +** they were attached to the session object). +*/ +SQLITE_API int sqlite3session_patchset( + sqlite3_session *pSession, /* Session object */ + int *pnPatchset, /* OUT: Size of buffer at *ppPatchset */ + void **ppPatchset /* OUT: Buffer containing patchset */ +); + +/* +** CAPI3REF: Test if a changeset has recorded any changes. +** +** Return non-zero if no changes to attached tables have been recorded by +** the session object passed as the first argument. Otherwise, if one or +** more changes have been recorded, return zero. +** +** Even if this function returns zero, it is possible that calling +** [sqlite3session_changeset()] on the session handle may still return a +** changeset that contains no changes. This can happen when a row in +** an attached table is modified and then later on the original values +** are restored. However, if this function returns non-zero, then it is +** guaranteed that a call to sqlite3session_changeset() will return a +** changeset containing zero changes. +*/ +SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession); + +/* +** CAPI3REF: Query for the amount of heap memory used by a session object. +** +** This API returns the total amount of heap memory in bytes currently +** used by the session object passed as the only argument. +*/ +SQLITE_API sqlite3_int64 sqlite3session_memory_used(sqlite3_session *pSession); + +/* +** CAPI3REF: Create An Iterator To Traverse A Changeset +** CONSTRUCTOR: sqlite3_changeset_iter +** +** Create an iterator used to iterate through the contents of a changeset. +** If successful, *pp is set to point to the iterator handle and SQLITE_OK +** is returned. Otherwise, if an error occurs, *pp is set to zero and an +** SQLite error code is returned. +** +** The following functions can be used to advance and query a changeset +** iterator created by this function: +** +**
    +**
  • [sqlite3changeset_next()] +**
  • [sqlite3changeset_op()] +**
  • [sqlite3changeset_new()] +**
  • [sqlite3changeset_old()] +**
+** +** It is the responsibility of the caller to eventually destroy the iterator +** by passing it to [sqlite3changeset_finalize()]. The buffer containing the +** changeset (pChangeset) must remain valid until after the iterator is +** destroyed. +** +** Assuming the changeset blob was created by one of the +** [sqlite3session_changeset()], [sqlite3changeset_concat()] or +** [sqlite3changeset_invert()] functions, all changes within the changeset +** that apply to a single table are grouped together. This means that when +** an application iterates through a changeset using an iterator created by +** this function, all changes that relate to a single table are visited +** consecutively. There is no chance that the iterator will visit a change +** the applies to table X, then one for table Y, and then later on visit +** another change for table X. +** +** The behavior of sqlite3changeset_start_v2() and its streaming equivalent +** may be modified by passing a combination of +** [SQLITE_CHANGESETSTART_INVERT | supported flags] as the 4th parameter. +** +** Note that the sqlite3changeset_start_v2() API is still experimental +** and therefore subject to change. +*/ +SQLITE_API int sqlite3changeset_start( + sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ + int nChangeset, /* Size of changeset blob in bytes */ + void *pChangeset /* Pointer to blob containing changeset */ +); +SQLITE_API int sqlite3changeset_start_v2( + sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */ + int nChangeset, /* Size of changeset blob in bytes */ + void *pChangeset, /* Pointer to blob containing changeset */ + int flags /* SESSION_CHANGESETSTART_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3changeset_start_v2 +** +** The following flags may passed via the 4th parameter to +** [sqlite3changeset_start_v2] and [sqlite3changeset_start_v2_strm]: +** +**
SQLITE_CHANGESETAPPLY_INVERT
+** Invert the changeset while iterating through it. This is equivalent to +** inverting a changeset using sqlite3changeset_invert() before applying it. +** It is an error to specify this flag with a patchset. +*/ +#define SQLITE_CHANGESETSTART_INVERT 0x0002 + + +/* +** CAPI3REF: Advance A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** This function may only be used with iterators created by the function +** [sqlite3changeset_start()]. If it is called on an iterator passed to +** a conflict-handler callback by [sqlite3changeset_apply()], SQLITE_MISUSE +** is returned and the call has no effect. +** +** Immediately after an iterator is created by sqlite3changeset_start(), it +** does not point to any change in the changeset. Assuming the changeset +** is not empty, the first call to this function advances the iterator to +** point to the first change in the changeset. Each subsequent call advances +** the iterator to point to the next change in the changeset (if any). If +** no error occurs and the iterator points to a valid change after a call +** to sqlite3changeset_next() has advanced it, SQLITE_ROW is returned. +** Otherwise, if all changes in the changeset have already been visited, +** SQLITE_DONE is returned. +** +** If an error occurs, an SQLite error code is returned. Possible error +** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or +** SQLITE_NOMEM. +*/ +SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Obtain The Current Operation From A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned [SQLITE_ROW]. If this +** is not the case, this function returns [SQLITE_MISUSE]. +** +** Arguments pOp, pnCol and pzTab may not be NULL. Upon return, three +** outputs are set through these pointers: +** +** *pOp is set to one of [SQLITE_INSERT], [SQLITE_DELETE] or [SQLITE_UPDATE], +** depending on the type of change that the iterator currently points to; +** +** *pnCol is set to the number of columns in the table affected by the change; and +** +** *pzTab is set to point to a nul-terminated utf-8 encoded string containing +** the name of the table affected by the current change. The buffer remains +** valid until either sqlite3changeset_next() is called on the iterator +** or until the conflict-handler function returns. +** +** If pbIndirect is not NULL, then *pbIndirect is set to true (1) if the change +** is an indirect change, or false (0) otherwise. See the documentation for +** [sqlite3session_indirect()] for a description of direct and indirect +** changes. +** +** If no error occurs, SQLITE_OK is returned. If an error does occur, an +** SQLite error code is returned. The values of the output variables may not +** be trusted in this case. +*/ +SQLITE_API int sqlite3changeset_op( + sqlite3_changeset_iter *pIter, /* Iterator object */ + const char **pzTab, /* OUT: Pointer to table name */ + int *pnCol, /* OUT: Number of columns in table */ + int *pOp, /* OUT: SQLITE_INSERT, DELETE or UPDATE */ + int *pbIndirect /* OUT: True for an 'indirect' change */ +); + +/* +** CAPI3REF: Obtain The Primary Key Definition Of A Table +** METHOD: sqlite3_changeset_iter +** +** For each modified table, a changeset includes the following: +** +**
    +**
  • The number of columns in the table, and +**
  • Which of those columns make up the tables PRIMARY KEY. +**
+** +** This function is used to find which columns comprise the PRIMARY KEY of +** the table modified by the change that iterator pIter currently points to. +** If successful, *pabPK is set to point to an array of nCol entries, where +** nCol is the number of columns in the table. Elements of *pabPK are set to +** 0x01 if the corresponding column is part of the tables primary key, or +** 0x00 if it is not. +** +** If argument pnCol is not NULL, then *pnCol is set to the number of columns +** in the table. +** +** If this function is called when the iterator does not point to a valid +** entry, SQLITE_MISUSE is returned and the output variables zeroed. Otherwise, +** SQLITE_OK is returned and the output variables populated as described +** above. +*/ +SQLITE_API int sqlite3changeset_pk( + sqlite3_changeset_iter *pIter, /* Iterator object */ + unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */ + int *pnCol /* OUT: Number of entries in output array */ +); + +/* +** CAPI3REF: Obtain old.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_DELETE] or [SQLITE_UPDATE]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** original row values stored as part of the UPDATE or DELETE change and +** returns SQLITE_OK. The name of the function comes from the fact that this +** is similar to the "old.*" columns available to update or delete triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_old( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain new.* Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** The pIter argument passed to this function may either be an iterator +** passed to a conflict-handler by [sqlite3changeset_apply()], or an iterator +** created by [sqlite3changeset_start()]. In the latter case, the most recent +** call to [sqlite3changeset_next()] must have returned SQLITE_ROW. +** Furthermore, it may only be called if the type of change that the iterator +** currently points to is either [SQLITE_UPDATE] or [SQLITE_INSERT]. Otherwise, +** this function returns [SQLITE_MISUSE] and sets *ppValue to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the vector of +** new row values stored as part of the UPDATE or INSERT change and +** returns SQLITE_OK. If the change is an UPDATE and does not include +** a new value for the requested column, *ppValue is set to NULL and +** SQLITE_OK returned. The name of the function comes from the fact that +** this is similar to the "new.*" columns available to update or delete +** triggers. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_new( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */ +); + +/* +** CAPI3REF: Obtain Conflicting Row Values From A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** This function should only be used with iterator objects passed to a +** conflict-handler callback by [sqlite3changeset_apply()] with either +** [SQLITE_CHANGESET_DATA] or [SQLITE_CHANGESET_CONFLICT]. If this function +** is called on any other iterator, [SQLITE_MISUSE] is returned and *ppValue +** is set to NULL. +** +** Argument iVal must be greater than or equal to 0, and less than the number +** of columns in the table affected by the current change. Otherwise, +** [SQLITE_RANGE] is returned and *ppValue is set to NULL. +** +** If successful, this function sets *ppValue to point to a protected +** sqlite3_value object containing the iVal'th value from the +** "conflicting row" associated with the current conflict-handler callback +** and returns SQLITE_OK. +** +** If some other error occurs (e.g. an OOM condition), an SQLite error code +** is returned and *ppValue is set to NULL. +*/ +SQLITE_API int sqlite3changeset_conflict( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int iVal, /* Column number */ + sqlite3_value **ppValue /* OUT: Value from conflicting row */ +); + +/* +** CAPI3REF: Determine The Number Of Foreign Key Constraint Violations +** METHOD: sqlite3_changeset_iter +** +** This function may only be called with an iterator passed to an +** SQLITE_CHANGESET_FOREIGN_KEY conflict handler callback. In this case +** it sets the output variable to the total number of known foreign key +** violations in the destination database and returns SQLITE_OK. +** +** In all other cases this function returns SQLITE_MISUSE. +*/ +SQLITE_API int sqlite3changeset_fk_conflicts( + sqlite3_changeset_iter *pIter, /* Changeset iterator */ + int *pnOut /* OUT: Number of FK violations */ +); + + +/* +** CAPI3REF: Finalize A Changeset Iterator +** METHOD: sqlite3_changeset_iter +** +** This function is used to finalize an iterator allocated with +** [sqlite3changeset_start()]. +** +** This function should only be called on iterators created using the +** [sqlite3changeset_start()] function. If an application calls this +** function with an iterator passed to a conflict-handler by +** [sqlite3changeset_apply()], [SQLITE_MISUSE] is immediately returned and the +** call has no effect. +** +** If an error was encountered within a call to an sqlite3changeset_xxx() +** function (for example an [SQLITE_CORRUPT] in [sqlite3changeset_next()] or an +** [SQLITE_NOMEM] in [sqlite3changeset_new()]) then an error code corresponding +** to that error is returned by this function. Otherwise, SQLITE_OK is +** returned. This is to allow the following pattern (pseudo-code): +** +**
+**   sqlite3changeset_start();
+**   while( SQLITE_ROW==sqlite3changeset_next() ){
+**     // Do something with change.
+**   }
+**   rc = sqlite3changeset_finalize();
+**   if( rc!=SQLITE_OK ){
+**     // An error has occurred
+**   }
+** 
+*/ +SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter); + +/* +** CAPI3REF: Invert A Changeset +** +** This function is used to "invert" a changeset object. Applying an inverted +** changeset to a database reverses the effects of applying the uninverted +** changeset. Specifically: +** +**
    +**
  • Each DELETE change is changed to an INSERT, and +**
  • Each INSERT change is changed to a DELETE, and +**
  • For each UPDATE change, the old.* and new.* values are exchanged. +**
+** +** This function does not change the order in which changes appear within +** the changeset. It merely reverses the sense of each individual change. +** +** If successful, a pointer to a buffer containing the inverted changeset +** is stored in *ppOut, the size of the same buffer is stored in *pnOut, and +** SQLITE_OK is returned. If an error occurs, both *pnOut and *ppOut are +** zeroed and an SQLite error code returned. +** +** It is the responsibility of the caller to eventually call sqlite3_free() +** on the *ppOut pointer to free the buffer allocation following a successful +** call to this function. +** +** WARNING/TODO: This function currently assumes that the input is a valid +** changeset. If it is not, the results are undefined. +*/ +SQLITE_API int sqlite3changeset_invert( + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + +/* +** CAPI3REF: Concatenate Two Changeset Objects +** +** This function is used to concatenate two changesets, A and B, into a +** single changeset. The result is a changeset equivalent to applying +** changeset A followed by changeset B. +** +** This function combines the two input changesets using an +** sqlite3_changegroup object. Calling it produces similar results as the +** following code fragment: +** +**
+**   sqlite3_changegroup *pGrp;
+**   rc = sqlite3_changegroup_new(&pGrp);
+**   if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nA, pA);
+**   if( rc==SQLITE_OK ) rc = sqlite3changegroup_add(pGrp, nB, pB);
+**   if( rc==SQLITE_OK ){
+**     rc = sqlite3changegroup_output(pGrp, pnOut, ppOut);
+**   }else{
+**     *ppOut = 0;
+**     *pnOut = 0;
+**   }
+** 
+** +** Refer to the sqlite3_changegroup documentation below for details. +*/ +SQLITE_API int sqlite3changeset_concat( + int nA, /* Number of bytes in buffer pA */ + void *pA, /* Pointer to buffer containing changeset A */ + int nB, /* Number of bytes in buffer pB */ + void *pB, /* Pointer to buffer containing changeset B */ + int *pnOut, /* OUT: Number of bytes in output changeset */ + void **ppOut /* OUT: Buffer containing output changeset */ +); + + +/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + +/* +** CAPI3REF: Changegroup Handle +** +** A changegroup is an object used to combine two or more +** [changesets] or [patchsets] +*/ +typedef struct sqlite3_changegroup sqlite3_changegroup; + +/* +** CAPI3REF: Create A New Changegroup Object +** CONSTRUCTOR: sqlite3_changegroup +** +** An sqlite3_changegroup object is used to combine two or more changesets +** (or patchsets) into a single changeset (or patchset). A single changegroup +** object may combine changesets or patchsets, but not both. The output is +** always in the same format as the input. +** +** If successful, this function returns SQLITE_OK and populates (*pp) with +** a pointer to a new sqlite3_changegroup object before returning. The caller +** should eventually free the returned object using a call to +** sqlite3changegroup_delete(). If an error occurs, an SQLite error code +** (i.e. SQLITE_NOMEM) is returned and *pp is set to NULL. +** +** The usual usage pattern for an sqlite3_changegroup object is as follows: +** +**
    +**
  • It is created using a call to sqlite3changegroup_new(). +** +**
  • Zero or more changesets (or patchsets) are added to the object +** by calling sqlite3changegroup_add(). +** +**
  • The result of combining all input changesets together is obtained +** by the application via a call to sqlite3changegroup_output(). +** +**
  • The object is deleted using a call to sqlite3changegroup_delete(). +**
+** +** Any number of calls to add() and output() may be made between the calls to +** new() and delete(), and in any order. +** +** As well as the regular sqlite3changegroup_add() and +** sqlite3changegroup_output() functions, also available are the streaming +** versions sqlite3changegroup_add_strm() and sqlite3changegroup_output_strm(). +*/ +SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); + +/* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +**
    +**
  • The name identified by the changeset, and +**
  • at least as many columns as recorded in the changeset, and +**
  • the primary key columns in the same position as recorded in +** the changeset. +**
+** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + +/* +** CAPI3REF: Add A Changeset To A Changegroup +** METHOD: sqlite3_changegroup +** +** Add all changes within the changeset (or patchset) in buffer pData (size +** nData bytes) to the changegroup. +** +** If the buffer contains a patchset, then all prior calls to this function +** on the same changegroup object must also have specified patchsets. Or, if +** the buffer contains a changeset, so must have the earlier calls to this +** function. Otherwise, SQLITE_ERROR is returned and no changes are added +** to the changegroup. +** +** Rows within the changeset and changegroup are identified by the values in +** their PRIMARY KEY columns. A change in the changeset is considered to +** apply to the same row as a change already present in the changegroup if +** the two rows have the same primary key. +** +** Changes to rows that do not already appear in the changegroup are +** simply copied into it. Or, if both the new changeset and the changegroup +** contain changes that apply to a single row, the final contents of the +** changegroup depends on the type of each change, as follows: +** +** +** +** +**
Existing Change New Change Output Change +**
INSERT INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
INSERT UPDATE +** The INSERT change remains in the changegroup. The values in the +** INSERT change are modified as if the row was inserted by the +** existing change and then updated according to the new change. +**
INSERT DELETE +** The existing INSERT is removed from the changegroup. The DELETE is +** not added. +**
UPDATE INSERT +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
UPDATE UPDATE +** The existing UPDATE remains within the changegroup. It is amended +** so that the accompanying values are as if the row was updated once +** by the existing change and then again by the new change. +**
UPDATE DELETE +** The existing UPDATE is replaced by the new DELETE within the +** changegroup. +**
DELETE INSERT +** If one or more of the column values in the row inserted by the +** new change differ from those in the row deleted by the existing +** change, the existing DELETE is replaced by an UPDATE within the +** changegroup. Otherwise, if the inserted row is exactly the same +** as the deleted row, the existing DELETE is simply discarded. +**
DELETE UPDATE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
DELETE DELETE +** The new change is ignored. This case does not occur if the new +** changeset was recorded immediately after the changesets already +** added to the changegroup. +**
+** +** If the new changeset contains changes to a table that is already present +** in the changegroup, then the number of columns and the position of the +** primary key columns for the table must be consistent. If this is not the +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. +** +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. +*/ +SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); + +/* +** CAPI3REF: Obtain A Composite Changeset From A Changegroup +** METHOD: sqlite3_changegroup +** +** Obtain a buffer containing a changeset (or patchset) representing the +** current contents of the changegroup. If the inputs to the changegroup +** were themselves changesets, the output is a changeset. Or, if the +** inputs were patchsets, the output is also a patchset. +** +** As with the output of the sqlite3session_changeset() and +** sqlite3session_patchset() functions, all changes related to a single +** table are grouped together in the output of this function. Tables appear +** in the same order as for the very first changeset added to the changegroup. +** If the second or subsequent changesets added to the changegroup contain +** changes for tables that do not appear in the first changeset, they are +** appended onto the end of the output changeset, again in the order in +** which they are first encountered. +** +** If an error occurs, an SQLite error code is returned and the output +** variables (*pnData) and (*ppData) are set to 0. Otherwise, SQLITE_OK +** is returned and the output variables are set to the size of and a +** pointer to the output buffer, respectively. In this case it is the +** responsibility of the caller to eventually free the buffer using a +** call to sqlite3_free(). +*/ +SQLITE_API int sqlite3changegroup_output( + sqlite3_changegroup*, + int *pnData, /* OUT: Size of output buffer in bytes */ + void **ppData /* OUT: Pointer to output buffer */ +); + +/* +** CAPI3REF: Delete A Changegroup Object +** DESTRUCTOR: sqlite3_changegroup +*/ +SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup*); + +/* +** CAPI3REF: Apply A Changeset To A Database +** +** Apply a changeset or patchset to a database. These functions attempt to +** update the "main" database attached to handle db with the changes found in +** the changeset passed via the second and third arguments. +** +** The fourth argument (xFilter) passed to these functions is the "filter +** callback". If it is not NULL, then for each table affected by at least one +** change in the changeset, the filter callback is invoked with +** the table name as the second argument, and a copy of the context pointer +** passed as the sixth argument as the first. If the "filter callback" +** returns zero, then no attempt is made to apply any changes to the table. +** Otherwise, if the return value is non-zero or the xFilter argument to +** is NULL, all changes related to the table are attempted. +** +** For each table that is not excluded by the filter callback, this function +** tests that the target database contains a compatible table. A table is +** considered compatible if all of the following are true: +** +**
    +**
  • The table has the same name as the name recorded in the +** changeset, and +**
  • The table has at least as many columns as recorded in the +** changeset, and +**
  • The table has primary key columns in the same position as +** recorded in the changeset. +**
+** +** If there is no compatible table, it is not an error, but none of the +** changes associated with the table are applied. A warning message is issued +** via the sqlite3_log() mechanism with the error code SQLITE_SCHEMA. At most +** one such warning is issued for each table in the changeset. +** +** For each change for which there is a compatible table, an attempt is made +** to modify the table contents according to the UPDATE, INSERT or DELETE +** change. If a change cannot be applied cleanly, the conflict handler +** function passed as the fifth argument to sqlite3changeset_apply() may be +** invoked. A description of exactly when the conflict handler is invoked for +** each type of change is below. +** +** Unlike the xFilter argument, xConflict may not be passed NULL. The results +** of passing anything other than a valid function pointer as the xConflict +** argument are undefined. +** +** Each time the conflict handler function is invoked, it must return one +** of [SQLITE_CHANGESET_OMIT], [SQLITE_CHANGESET_ABORT] or +** [SQLITE_CHANGESET_REPLACE]. SQLITE_CHANGESET_REPLACE may only be returned +** if the second argument passed to the conflict handler is either +** SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If the conflict-handler +** returns an illegal value, any changes already made are rolled back and +** the call to sqlite3changeset_apply() returns SQLITE_MISUSE. Different +** actions are taken by sqlite3changeset_apply() depending on the value +** returned by each invocation of the conflict-handler function. Refer to +** the documentation for the three +** [SQLITE_CHANGESET_OMIT|available return values] for details. +** +**
+**
DELETE Changes
+** For each DELETE change, the function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all non-primary key columns also match the values stored in +** the changeset the row is deleted from the target database. +** +** If a row with matching primary key values is found, but one or more of +** the non-primary key fields contains a value different from the original +** row value stored in the changeset, the conflict-handler function is +** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the +** database table has more columns than are recorded in the changeset, +** only the values of those non-primary key fields are compared against +** the current database contents - any trailing database table columns +** are ignored. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the DELETE operation is attempted, but SQLite returns SQLITE_CONSTRAINT +** (which can only happen if a foreign key constraint is violated), the +** conflict-handler function is invoked with [SQLITE_CHANGESET_CONSTRAINT] +** passed as the second argument. This includes the case where the DELETE +** operation is attempted because an earlier call to the conflict handler +** function returned [SQLITE_CHANGESET_REPLACE]. +** +**
INSERT Changes
+** For each INSERT change, an attempt is made to insert the new row into +** the database. If the changeset row contains fewer fields than the +** database table, the trailing fields are populated with their default +** values. +** +** If the attempt to insert the row fails because the database already +** contains a row with the same primary key values, the conflict handler +** function is invoked with the second argument set to +** [SQLITE_CHANGESET_CONFLICT]. +** +** If the attempt to insert the row fails because of some other constraint +** violation (e.g. NOT NULL or UNIQUE), the conflict handler function is +** invoked with the second argument set to [SQLITE_CHANGESET_CONSTRAINT]. +** This includes the case where the INSERT operation is re-attempted because +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +** +**
UPDATE Changes
+** For each UPDATE change, the function checks if the target database +** contains a row with the same primary key value (or values) as the +** original row values stored in the changeset. If it does, and the values +** stored in all modified non-primary key columns also match the values +** stored in the changeset the row is updated within the target database. +** +** If a row with matching primary key values is found, but one or more of +** the modified non-primary key fields contains a value different from an +** original row value stored in the changeset, the conflict-handler function +** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since +** UPDATE changes only contain values for non-primary key fields that are +** to be modified, only those fields need to match the original values to +** avoid the SQLITE_CHANGESET_DATA conflict-handler callback. +** +** If no row with matching primary key values is found in the database, +** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND] +** passed as the second argument. +** +** If the UPDATE operation is attempted, but SQLite returns +** SQLITE_CONSTRAINT, the conflict-handler function is invoked with +** [SQLITE_CHANGESET_CONSTRAINT] passed as the second argument. +** This includes the case where the UPDATE operation is attempted after +** an earlier call to the conflict handler function returned +** [SQLITE_CHANGESET_REPLACE]. +**
+** +** It is safe to execute SQL statements, including those that write to the +** table that the callback related to, from within the xConflict callback. +** This can be used to further customize the application's conflict +** resolution strategy. +** +** All changes made by these functions are enclosed in a savepoint transaction. +** If any other error (aside from a constraint failure when attempting to +** write to the target database) occurs, then the savepoint transaction is +** rolled back, restoring the target database to its original state, and an +** SQLite error code returned. +** +** If the output parameters (ppRebase) and (pnRebase) are non-NULL and +** the input is a changeset (not a patchset), then sqlite3changeset_apply_v2() +** may set (*ppRebase) to point to a "rebase" that may be used with the +** sqlite3_rebaser APIs buffer before returning. In this case (*pnRebase) +** is set to the size of the buffer in bytes. It is the responsibility of the +** caller to eventually free any such buffer using sqlite3_free(). The buffer +** is only allocated and populated if one or more conflicts were encountered +** while applying the patchset. See comments surrounding the sqlite3_rebaser +** APIs for further details. +** +** The behavior of sqlite3changeset_apply_v2() and its streaming equivalent +** may be modified by passing a combination of +** [SQLITE_CHANGESETAPPLY_NOSAVEPOINT | supported flags] as the 9th parameter. +** +** Note that the sqlite3changeset_apply_v2() API is still experimental +** and therefore subject to change. +*/ +SQLITE_API int sqlite3changeset_apply( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); +SQLITE_API int sqlite3changeset_apply_v2( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int nChangeset, /* Size of changeset in bytes */ + void *pChangeset, /* Changeset blob */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, /* OUT: Rebase data */ + int flags /* SESSION_CHANGESETAPPLY_* flags */ +); + +/* +** CAPI3REF: Flags for sqlite3changeset_apply_v2 +** +** The following flags may passed via the 9th parameter to +** [sqlite3changeset_apply_v2] and [sqlite3changeset_apply_v2_strm]: +** +**
+**
SQLITE_CHANGESETAPPLY_NOSAVEPOINT
+** Usually, the sessions module encloses all operations performed by +** a single call to apply_v2() or apply_v2_strm() in a [SAVEPOINT]. The +** SAVEPOINT is committed if the changeset or patchset is successfully +** applied, or rolled back if an error occurs. Specifying this flag +** causes the sessions module to omit this savepoint. In this case, if the +** caller has an open transaction or savepoint when apply_v2() is called, +** it may revert the partially applied changeset by rolling it back. +** +**
SQLITE_CHANGESETAPPLY_INVERT
+** Invert the changeset before applying it. This is equivalent to inverting +** a changeset using sqlite3changeset_invert() before applying it. It is +** an error to specify this flag with a patchset. +** +**
SQLITE_CHANGESETAPPLY_IGNORENOOP
+** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +**
    +**
  • a delete change if the row being deleted cannot be found, +**
  • an update change if the modified fields are already set to +** their new values in the conflicting row, or +**
  • an insert change if all fields of the conflicting row match +** the row being inserted. +**
+** +**
SQLITE_CHANGESETAPPLY_FKNOACTION
+** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. +*/ +#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 +#define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 + +/* +** CAPI3REF: Constants Passed To The Conflict Handler +** +** Values that may be passed as the second argument to a conflict-handler. +** +**
+**
SQLITE_CHANGESET_DATA
+** The conflict handler is invoked with CHANGESET_DATA as the second argument +** when processing a DELETE or UPDATE change if a row with the required +** PRIMARY KEY fields is present in the database, but one or more other +** (non primary-key) fields modified by the update do not contain the +** expected "before" values. +** +** The conflicting row, in this case, is the database row with the matching +** primary key. +** +**
SQLITE_CHANGESET_NOTFOUND
+** The conflict handler is invoked with CHANGESET_NOTFOUND as the second +** argument when processing a DELETE or UPDATE change if a row with the +** required PRIMARY KEY fields is not present in the database. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
SQLITE_CHANGESET_CONFLICT
+** CHANGESET_CONFLICT is passed as the second argument to the conflict +** handler while processing an INSERT change if the operation would result +** in duplicate primary key values. +** +** The conflicting row in this case is the database row with the matching +** primary key. +** +**
SQLITE_CHANGESET_FOREIGN_KEY
+** If foreign key handling is enabled, and applying a changeset leaves the +** database in a state containing foreign key violations, the conflict +** handler is invoked with CHANGESET_FOREIGN_KEY as the second argument +** exactly once before the changeset is committed. If the conflict handler +** returns CHANGESET_OMIT, the changes, including those that caused the +** foreign key constraint violation, are committed. Or, if it returns +** CHANGESET_ABORT, the changeset is rolled back. +** +** No current or conflicting row information is provided. The only function +** it is possible to call on the supplied sqlite3_changeset_iter handle +** is sqlite3changeset_fk_conflicts(). +** +**
SQLITE_CHANGESET_CONSTRAINT
+** If any other constraint violation occurs while applying a change (i.e. +** a UNIQUE, CHECK or NOT NULL constraint), the conflict handler is +** invoked with CHANGESET_CONSTRAINT as the second argument. +** +** There is no conflicting row in this case. The results of invoking the +** sqlite3changeset_conflict() API are undefined. +** +**
+*/ +#define SQLITE_CHANGESET_DATA 1 +#define SQLITE_CHANGESET_NOTFOUND 2 +#define SQLITE_CHANGESET_CONFLICT 3 +#define SQLITE_CHANGESET_CONSTRAINT 4 +#define SQLITE_CHANGESET_FOREIGN_KEY 5 + +/* +** CAPI3REF: Constants Returned By The Conflict Handler +** +** A conflict handler callback must return one of the following three values. +** +**
+**
SQLITE_CHANGESET_OMIT
+** If a conflict handler returns this value no special action is taken. The +** change that caused the conflict is not applied. The session module +** continues to the next change in the changeset. +** +**
SQLITE_CHANGESET_REPLACE
+** This value may only be returned if the second argument to the conflict +** handler was SQLITE_CHANGESET_DATA or SQLITE_CHANGESET_CONFLICT. If this +** is not the case, any changes applied so far are rolled back and the +** call to sqlite3changeset_apply() returns SQLITE_MISUSE. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_DATA conflict +** handler, then the conflicting row is either updated or deleted, depending +** on the type of change. +** +** If CHANGESET_REPLACE is returned by an SQLITE_CHANGESET_CONFLICT conflict +** handler, then the conflicting row is removed from the database and a +** second attempt to apply the change is made. If this second attempt fails, +** the original row is restored to the database before continuing. +** +**
SQLITE_CHANGESET_ABORT
+** If this value is returned, any changes applied so far are rolled back +** and the call to sqlite3changeset_apply() returns SQLITE_ABORT. +**
+*/ +#define SQLITE_CHANGESET_OMIT 0 +#define SQLITE_CHANGESET_REPLACE 1 +#define SQLITE_CHANGESET_ABORT 2 + +/* +** CAPI3REF: Rebasing changesets +** EXPERIMENTAL +** +** Suppose there is a site hosting a database in state S0. And that +** modifications are made that move that database to state S1 and a +** changeset recorded (the "local" changeset). Then, a changeset based +** on S0 is received from another site (the "remote" changeset) and +** applied to the database. The database is then in state +** (S1+"remote"), where the exact state depends on any conflict +** resolution decisions (OMIT or REPLACE) made while applying "remote". +** Rebasing a changeset is to update it to take those conflict +** resolution decisions into account, so that the same conflicts +** do not have to be resolved elsewhere in the network. +** +** For example, if both the local and remote changesets contain an +** INSERT of the same key on "CREATE TABLE t1(a PRIMARY KEY, b)": +** +** local: INSERT INTO t1 VALUES(1, 'v1'); +** remote: INSERT INTO t1 VALUES(1, 'v2'); +** +** and the conflict resolution is REPLACE, then the INSERT change is +** removed from the local changeset (it was overridden). Or, if the +** conflict resolution was "OMIT", then the local changeset is modified +** to instead contain: +** +** UPDATE t1 SET b = 'v2' WHERE a=1; +** +** Changes within the local changeset are rebased as follows: +** +**
+**
Local INSERT
+** This may only conflict with a remote INSERT. If the conflict +** resolution was OMIT, then add an UPDATE change to the rebased +** changeset. Or, if the conflict resolution was REPLACE, add +** nothing to the rebased changeset. +** +**
Local DELETE
+** This may conflict with a remote UPDATE or DELETE. In both cases the +** only possible resolution is OMIT. If the remote operation was a +** DELETE, then add no change to the rebased changeset. If the remote +** operation was an UPDATE, then the old.* fields of change are updated +** to reflect the new.* values in the UPDATE. +** +**
Local UPDATE
+** This may conflict with a remote UPDATE or DELETE. If it conflicts +** with a DELETE, and the conflict resolution was OMIT, then the update +** is changed into an INSERT. Any undefined values in the new.* record +** from the update change are filled in using the old.* values from +** the conflicting DELETE. Or, if the conflict resolution was REPLACE, +** the UPDATE change is simply omitted from the rebased changeset. +** +** If conflict is with a remote UPDATE and the resolution is OMIT, then +** the old.* values are rebased using the new.* values in the remote +** change. Or, if the resolution is REPLACE, then the change is copied +** into the rebased changeset with updates to columns also updated by +** the conflicting remote UPDATE removed. If this means no columns would +** be updated, the change is omitted. +**
+** +** A local change may be rebased against multiple remote changes +** simultaneously. If a single key is modified by multiple remote +** changesets, they are combined as follows before the local changeset +** is rebased: +** +**
    +**
  • If there has been one or more REPLACE resolutions on a +** key, it is rebased according to a REPLACE. +** +**
  • If there have been no REPLACE resolutions on a key, then +** the local changeset is rebased according to the most recent +** of the OMIT resolutions. +**
+** +** Note that conflict resolutions from multiple remote changesets are +** combined on a per-field basis, not per-row. This means that in the +** case of multiple remote UPDATE operations, some fields of a single +** local change may be rebased for REPLACE while others are rebased for +** OMIT. +** +** In order to rebase a local changeset, the remote changeset must first +** be applied to the local database using sqlite3changeset_apply_v2() and +** the buffer of rebase information captured. Then: +** +**
    +**
  1. An sqlite3_rebaser object is created by calling +** sqlite3rebaser_create(). +**
  2. The new object is configured with the rebase buffer obtained from +** sqlite3changeset_apply_v2() by calling sqlite3rebaser_configure(). +** If the local changeset is to be rebased against multiple remote +** changesets, then sqlite3rebaser_configure() should be called +** multiple times, in the same order that the multiple +** sqlite3changeset_apply_v2() calls were made. +**
  3. Each local changeset is rebased by calling sqlite3rebaser_rebase(). +**
  4. The sqlite3_rebaser object is deleted by calling +** sqlite3rebaser_delete(). +**
+*/ +typedef struct sqlite3_rebaser sqlite3_rebaser; + +/* +** CAPI3REF: Create a changeset rebaser object. +** EXPERIMENTAL +** +** Allocate a new changeset rebaser object. If successful, set (*ppNew) to +** point to the new object and return SQLITE_OK. Otherwise, if an error +** occurs, return an SQLite error code (e.g. SQLITE_NOMEM) and set (*ppNew) +** to NULL. +*/ +SQLITE_API int sqlite3rebaser_create(sqlite3_rebaser **ppNew); + +/* +** CAPI3REF: Configure a changeset rebaser object. +** EXPERIMENTAL +** +** Configure the changeset rebaser object to rebase changesets according +** to the conflict resolutions described by buffer pRebase (size nRebase +** bytes), which must have been obtained from a previous call to +** sqlite3changeset_apply_v2(). +*/ +SQLITE_API int sqlite3rebaser_configure( + sqlite3_rebaser*, + int nRebase, const void *pRebase +); + +/* +** CAPI3REF: Rebase a changeset +** EXPERIMENTAL +** +** Argument pIn must point to a buffer containing a changeset nIn bytes +** in size. This function allocates and populates a buffer with a copy +** of the changeset rebased according to the configuration of the +** rebaser object passed as the first argument. If successful, (*ppOut) +** is set to point to the new buffer containing the rebased changeset and +** (*pnOut) to its size in bytes and SQLITE_OK returned. It is the +** responsibility of the caller to eventually free the new buffer using +** sqlite3_free(). Otherwise, if an error occurs, (*ppOut) and (*pnOut) +** are set to zero and an SQLite error code returned. +*/ +SQLITE_API int sqlite3rebaser_rebase( + sqlite3_rebaser*, + int nIn, const void *pIn, + int *pnOut, void **ppOut +); + +/* +** CAPI3REF: Delete a changeset rebaser object. +** EXPERIMENTAL +** +** Delete the changeset rebaser object and all associated resources. There +** should be one call to this function for each successful invocation +** of sqlite3rebaser_create(). +*/ +SQLITE_API void sqlite3rebaser_delete(sqlite3_rebaser *p); + +/* +** CAPI3REF: Streaming Versions of API functions. +** +** The six streaming API xxx_strm() functions serve similar purposes to the +** corresponding non-streaming API functions: +** +** +** +**
Streaming functionNon-streaming equivalent
sqlite3changeset_apply_strm[sqlite3changeset_apply] +**
sqlite3changeset_apply_strm_v2[sqlite3changeset_apply_v2] +**
sqlite3changeset_concat_strm[sqlite3changeset_concat] +**
sqlite3changeset_invert_strm[sqlite3changeset_invert] +**
sqlite3changeset_start_strm[sqlite3changeset_start] +**
sqlite3session_changeset_strm[sqlite3session_changeset] +**
sqlite3session_patchset_strm[sqlite3session_patchset] +**
+** +** Non-streaming functions that accept changesets (or patchsets) as input +** require that the entire changeset be stored in a single buffer in memory. +** Similarly, those that return a changeset or patchset do so by returning +** a pointer to a single large buffer allocated using sqlite3_malloc(). +** Normally this is convenient. However, if an application running in a +** low-memory environment is required to handle very large changesets, the +** large contiguous memory allocations required can become onerous. +** +** In order to avoid this problem, instead of a single large buffer, input +** is passed to a streaming API functions by way of a callback function that +** the sessions module invokes to incrementally request input data as it is +** required. In all cases, a pair of API function parameters such as +** +**
+**        int nChangeset,
+**        void *pChangeset,
+**  
+** +** Is replaced by: +** +**
+**        int (*xInput)(void *pIn, void *pData, int *pnData),
+**        void *pIn,
+**  
+** +** Each time the xInput callback is invoked by the sessions module, the first +** argument passed is a copy of the supplied pIn context pointer. The second +** argument, pData, points to a buffer (*pnData) bytes in size. Assuming no +** error occurs the xInput method should copy up to (*pnData) bytes of data +** into the buffer and set (*pnData) to the actual number of bytes copied +** before returning SQLITE_OK. If the input is completely exhausted, (*pnData) +** should be set to zero to indicate this. Or, if an error occurs, an SQLite +** error code should be returned. In all cases, if an xInput callback returns +** an error, all processing is abandoned and the streaming API function +** returns a copy of the error code to the caller. +** +** In the case of sqlite3changeset_start_strm(), the xInput callback may be +** invoked by the sessions module at any point during the lifetime of the +** iterator. If such an xInput callback returns an error, the iterator enters +** an error state, whereby all subsequent calls to iterator functions +** immediately fail with the same error code as returned by xInput. +** +** Similarly, streaming API functions that return changesets (or patchsets) +** return them in chunks by way of a callback function instead of via a +** pointer to a single large buffer. In this case, a pair of parameters such +** as: +** +**
+**        int *pnChangeset,
+**        void **ppChangeset,
+**  
+** +** Is replaced by: +** +**
+**        int (*xOutput)(void *pOut, const void *pData, int nData),
+**        void *pOut
+**  
+** +** The xOutput callback is invoked zero or more times to return data to +** the application. The first parameter passed to each call is a copy of the +** pOut pointer supplied by the application. The second parameter, pData, +** points to a buffer nData bytes in size containing the chunk of output +** data being returned. If the xOutput callback successfully processes the +** supplied data, it should return SQLITE_OK to indicate success. Otherwise, +** it should return some other SQLite error code. In this case processing +** is immediately abandoned and the streaming API function returns a copy +** of the xOutput error code to the application. +** +** The sessions module never invokes an xOutput callback with the third +** parameter set to a value less than or equal to zero. Other than this, +** no guarantees are made as to the size of the chunks of data returned. +*/ +SQLITE_API int sqlite3changeset_apply_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx /* First argument passed to xConflict */ +); +SQLITE_API int sqlite3changeset_apply_v2_strm( + sqlite3 *db, /* Apply change to "main" db of this handle */ + int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */ + void *pIn, /* First arg for xInput */ + int(*xFilter)( + void *pCtx, /* Copy of sixth arg to _apply() */ + const char *zTab /* Table name */ + ), + int(*xConflict)( + void *pCtx, /* Copy of sixth arg to _apply() */ + int eConflict, /* DATA, MISSING, CONFLICT, CONSTRAINT */ + sqlite3_changeset_iter *p /* Handle describing change and conflict */ + ), + void *pCtx, /* First argument passed to xConflict */ + void **ppRebase, int *pnRebase, + int flags +); +SQLITE_API int sqlite3changeset_concat_strm( + int (*xInputA)(void *pIn, void *pData, int *pnData), + void *pInA, + int (*xInputB)(void *pIn, void *pData, int *pnData), + void *pInB, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3changeset_invert_strm( + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3changeset_start_strm( + sqlite3_changeset_iter **pp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +SQLITE_API int sqlite3changeset_start_v2_strm( + sqlite3_changeset_iter **pp, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int flags +); +SQLITE_API int sqlite3session_changeset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3session_patchset_strm( + sqlite3_session *pSession, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3changegroup_add_strm(sqlite3_changegroup*, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn +); +SQLITE_API int sqlite3changegroup_output_strm(sqlite3_changegroup*, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); +SQLITE_API int sqlite3rebaser_rebase_strm( + sqlite3_rebaser *pRebaser, + int (*xInput)(void *pIn, void *pData, int *pnData), + void *pIn, + int (*xOutput)(void *pOut, const void *pData, int nData), + void *pOut +); + +/* +** CAPI3REF: Configure global parameters +** +** The sqlite3session_config() interface is used to make global configuration +** changes to the sessions module in order to tune it to the specific needs +** of the application. +** +** The sqlite3session_config() interface is not threadsafe. If it is invoked +** while any other thread is inside any other sessions method then the +** results are undefined. Furthermore, if it is invoked after any sessions +** related objects have been created, the results are also undefined. +** +** The first argument to the sqlite3session_config() function must be one +** of the SQLITE_SESSION_CONFIG_XXX constants defined below. The +** interpretation of the (void*) value passed as the second parameter and +** the effect of calling this function depends on the value of the first +** parameter. +** +**
+**
SQLITE_SESSION_CONFIG_STRMSIZE
+** By default, the sessions module streaming interfaces attempt to input +** and output data in approximately 1 KiB chunks. This operand may be used +** to set and query the value of this configuration setting. The pointer +** passed as the second argument must point to a value of type (int). +** If this value is greater than 0, it is used as the new streaming data +** chunk size for both input and output. Before returning, the (int) value +** pointed to by pArg is set to the final value of the streaming interface +** chunk size. +**
+** +** This function returns SQLITE_OK if successful, or an SQLite error code +** otherwise. +*/ +SQLITE_API int sqlite3session_config(int op, void *pArg); + +/* +** CAPI3REF: Values for sqlite3session_config(). +*/ +#define SQLITE_SESSION_CONFIG_STRMSIZE 1 + +/* +** Make sure we can call this stuff from C++. +*/ +#ifdef __cplusplus +} +#endif + +#endif /* !defined(__SQLITESESSION_H_) && defined(SQLITE_ENABLE_SESSION) */ + +/******** End of sqlite3session.h *********/ +/******** Begin file fts5.h *********/ +/* +** 2014 May 31 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** Interfaces to extend FTS5. Using the interfaces defined in this file, +** FTS5 may be extended with: +** +** * custom tokenizers, and +** * custom auxiliary functions. +*/ + + +#ifndef _FTS5_H +#define _FTS5_H + + +#ifdef __cplusplus +extern "C" { +#endif + +/************************************************************************* +** CUSTOM AUXILIARY FUNCTIONS +** +** Virtual table implementations may overload SQL functions by implementing +** the sqlite3_module.xFindFunction() method. +*/ + +typedef struct Fts5ExtensionApi Fts5ExtensionApi; +typedef struct Fts5Context Fts5Context; +typedef struct Fts5PhraseIter Fts5PhraseIter; + +typedef void (*fts5_extension_function)( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +); + +struct Fts5PhraseIter { + const unsigned char *a; + const unsigned char *b; +}; + +/* +** EXTENSION API FUNCTIONS +** +** xUserData(pFts): +** Return a copy of the context pointer the extension function was +** registered with. +** +** xColumnTotalSize(pFts, iCol, pnToken): +** If parameter iCol is less than zero, set output variable *pnToken +** to the total number of tokens in the FTS5 table. Or, if iCol is +** non-negative but less than the number of columns in the table, return +** the total number of tokens in column iCol, considering all rows in +** the FTS5 table. +** +** If parameter iCol is greater than or equal to the number of columns +** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g. +** an OOM condition or IO error), an appropriate SQLite error code is +** returned. +** +** xColumnCount(pFts): +** Return the number of columns in the table. +** +** xColumnSize(pFts, iCol, pnToken): +** If parameter iCol is less than zero, set output variable *pnToken +** to the total number of tokens in the current row. Or, if iCol is +** non-negative but less than the number of columns in the table, set +** *pnToken to the number of tokens in column iCol of the current row. +** +** If parameter iCol is greater than or equal to the number of columns +** in the table, SQLITE_RANGE is returned. Or, if an error occurs (e.g. +** an OOM condition or IO error), an appropriate SQLite error code is +** returned. +** +** This function may be quite inefficient if used with an FTS5 table +** created with the "columnsize=0" option. +** +** xColumnText: +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer +** containing the text in utf-8 encoding, (*pn) is set to the size in bytes +** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, +** if an error occurs, an SQLite error code is returned and the final values +** of (*pz) and (*pn) are undefined. +** +** xPhraseCount: +** Returns the number of phrases in the current query expression. +** +** xPhraseSize: +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. +** +** xInstCount: +** Set *pnInst to the total number of occurrences of all phrases within +** the query within the current row. Return SQLITE_OK if successful, or +** an error code (i.e. SQLITE_NOMEM) if an error occurs. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. If the FTS5 table is created +** with either "detail=none" or "detail=column" and "content=" option +** (i.e. if it is a contentless table), then this API always returns 0. +** +** xInst: +** Query for the details of phrase match iIdx within the current row. +** Phrase matches are numbered starting from zero, so the iIdx argument +** should be greater than or equal to zero and smaller than the value +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. +** +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol +** to the column in which it occurs and *piOff the token offset of the +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. +** +** xRowid: +** Returns the rowid of the current row. +** +** xTokenize: +** Tokenize text using the tokenizer belonging to the FTS5 table. +** +** xQueryPhrase(pFts5, iPhrase, pUserData, xCallback): +** This API function is used to query the FTS table for phrase iPhrase +** of the current query. Specifically, a query equivalent to: +** +** ... FROM ftstable WHERE ftstable MATCH $p ORDER BY rowid +** +** with $p set to a phrase equivalent to the phrase iPhrase of the +** current query is executed. Any column filter that applies to +** phrase iPhrase of the current query is included in $p. For each +** row visited, the callback function passed as the fourth argument +** is invoked. The context and API objects passed to the callback +** function may be used to access the properties of each matched row. +** Invoking Api.xUserData() returns a copy of the pointer passed as +** the third argument to pUserData. +** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** +** If the callback function returns any value other than SQLITE_OK, the +** query is abandoned and the xQueryPhrase function returns immediately. +** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. +** Otherwise, the error code is propagated upwards. +** +** If the query runs to completion without incident, SQLITE_OK is returned. +** Or, if some error occurs before the query completes or is aborted by +** the callback, an SQLite error code is returned. +** +** +** xSetAuxdata(pFts5, pAux, xDelete) +** +** Save the pointer passed as the second argument as the extension function's +** "auxiliary data". The pointer may then be retrieved by the current or any +** future invocation of the same fts5 extension function made as part of +** the same MATCH query using the xGetAuxdata() API. +** +** Each extension function is allocated a single auxiliary data slot for +** each FTS query (MATCH expression). If the extension function is invoked +** more than once for a single FTS query, then all invocations share a +** single auxiliary data context. +** +** If there is already an auxiliary data pointer when this function is +** invoked, then it is replaced by the new pointer. If an xDelete callback +** was specified along with the original pointer, it is invoked at this +** point. +** +** The xDelete callback, if one is specified, is also invoked on the +** auxiliary data pointer after the FTS5 query has finished. +** +** If an error (e.g. an OOM condition) occurs within this function, +** the auxiliary data is set to NULL and an error code returned. If the +** xDelete parameter was not NULL, it is invoked on the auxiliary data +** pointer before returning. +** +** +** xGetAuxdata(pFts5, bClear) +** +** Returns the current auxiliary data pointer for the fts5 extension +** function. See the xSetAuxdata() method for details. +** +** If the bClear argument is non-zero, then the auxiliary data is cleared +** (set to NULL) before this function returns. In this case the xDelete, +** if any, is not invoked. +** +** +** xRowCount(pFts5, pnRow) +** +** This function is used to retrieve the total number of rows in the table. +** In other words, the same value that would be returned by: +** +** SELECT count(*) FROM ftstable; +** +** xPhraseFirst() +** This function is used, along with type Fts5PhraseIter and the xPhraseNext +** method, to iterate through all instances of a single query phrase within +** the current row. This is the same information as is accessible via the +** xInstCount/xInst APIs. While the xInstCount/xInst APIs are more convenient +** to use, this API may be faster under some circumstances. To iterate +** through instances of phrase iPhrase, use the following code: +** +** Fts5PhraseIter iter; +** int iCol, iOff; +** for(pApi->xPhraseFirst(pFts, iPhrase, &iter, &iCol, &iOff); +** iCol>=0; +** pApi->xPhraseNext(pFts, &iter, &iCol, &iOff) +** ){ +** // An instance of phrase iPhrase at offset iOff of column iCol +** } +** +** The Fts5PhraseIter structure is defined above. Applications should not +** modify this structure directly - it should only be used as shown above +** with the xPhraseFirst() and xPhraseNext() API methods (and by +** xPhraseFirstColumn() and xPhraseNextColumn() as illustrated below). +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. If the FTS5 table is created +** with either "detail=none" or "detail=column" and "content=" option +** (i.e. if it is a contentless table), then this API always iterates +** through an empty set (all calls to xPhraseFirst() set iCol to -1). +** +** xPhraseNext() +** See xPhraseFirst above. +** +** xPhraseFirstColumn() +** This function and xPhraseNextColumn() are similar to the xPhraseFirst() +** and xPhraseNext() APIs described above. The difference is that instead +** of iterating through all instances of a phrase in the current row, these +** APIs are used to iterate through the set of columns in the current row +** that contain one or more instances of a specified phrase. For example: +** +** Fts5PhraseIter iter; +** int iCol; +** for(pApi->xPhraseFirstColumn(pFts, iPhrase, &iter, &iCol); +** iCol>=0; +** pApi->xPhraseNextColumn(pFts, &iter, &iCol) +** ){ +** // Column iCol contains at least one instance of phrase iPhrase +** } +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" option. If the FTS5 table is created with either +** "detail=none" "content=" option (i.e. if it is a contentless table), +** then this API always iterates through an empty set (all calls to +** xPhraseFirstColumn() set iCol to -1). +** +** The information accessed using this API and its companion +** xPhraseFirstColumn() may also be obtained using xPhraseFirst/xPhraseNext +** (or xInst/xInstCount). The chief advantage of this API is that it is +** significantly more efficient than those alternatives when used with +** "detail=column" tables. +** +** xPhraseNextColumn() +** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. +*/ +struct Fts5ExtensionApi { + int iVersion; /* Currently always set to 3 */ + + void *(*xUserData)(Fts5Context*); + + int (*xColumnCount)(Fts5Context*); + int (*xRowCount)(Fts5Context*, sqlite3_int64 *pnRow); + int (*xColumnTotalSize)(Fts5Context*, int iCol, sqlite3_int64 *pnToken); + + int (*xTokenize)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); + + int (*xPhraseCount)(Fts5Context*); + int (*xPhraseSize)(Fts5Context*, int iPhrase); + + int (*xInstCount)(Fts5Context*, int *pnInst); + int (*xInst)(Fts5Context*, int iIdx, int *piPhrase, int *piCol, int *piOff); + + sqlite3_int64 (*xRowid)(Fts5Context*); + int (*xColumnText)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xColumnSize)(Fts5Context*, int iCol, int *pnToken); + + int (*xQueryPhrase)(Fts5Context*, int iPhrase, void *pUserData, + int(*)(const Fts5ExtensionApi*,Fts5Context*,void*) + ); + int (*xSetAuxdata)(Fts5Context*, void *pAux, void(*xDelete)(void*)); + void *(*xGetAuxdata)(Fts5Context*, int bClear); + + int (*xPhraseFirst)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*, int*); + void (*xPhraseNext)(Fts5Context*, Fts5PhraseIter*, int *piCol, int *piOff); + + int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); + void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); +}; + +/* +** CUSTOM AUXILIARY FUNCTIONS +*************************************************************************/ + +/************************************************************************* +** CUSTOM TOKENIZERS +** +** Applications may also register custom tokenizer types. A tokenizer +** is registered by providing fts5 with a populated instance of the +** following structure. All structure methods must be defined, setting +** any member of the fts5_tokenizer struct to NULL leads to undefined +** behaviour. The structure methods are expected to function as follows: +** +** xCreate: +** This function is used to allocate and initialize a tokenizer instance. +** A tokenizer instance is required to actually tokenize text. +** +** The first argument passed to this function is a copy of the (void*) +** pointer provided by the application when the fts5_tokenizer object +** was registered with FTS5 (the third argument to xCreateTokenizer()). +** The second and third arguments are an array of nul-terminated strings +** containing the tokenizer arguments, if any, specified following the +** tokenizer name as part of the CREATE VIRTUAL TABLE statement used +** to create the FTS5 table. +** +** The final argument is an output variable. If successful, (*ppOut) +** should be set to point to the new tokenizer handle and SQLITE_OK +** returned. If an error occurs, some value other than SQLITE_OK should +** be returned. In this case, fts5 assumes that the final value of *ppOut +** is undefined. +** +** xDelete: +** This function is invoked to delete a tokenizer handle previously +** allocated using xCreate(). Fts5 guarantees that this function will +** be invoked exactly once for each successful call to xCreate(). +** +** xTokenize: +** This function is expected to tokenize the nText byte string indicated +** by argument pText. pText may or may not be nul-terminated. The first +** argument passed to this function is a pointer to an Fts5Tokenizer object +** returned by an earlier call to xCreate(). +** +** The second argument indicates the reason that FTS5 is requesting +** tokenization of the supplied text. This is always one of the following +** four values: +** +**
  • FTS5_TOKENIZE_DOCUMENT - A document is being inserted into +** or removed from the FTS table. The tokenizer is being invoked to +** determine the set of tokens to add to (or delete from) the +** FTS index. +** +**
  • FTS5_TOKENIZE_QUERY - A MATCH query is being executed +** against the FTS index. The tokenizer is being called to tokenize +** a bareword or quoted string specified as part of the query. +** +**
  • (FTS5_TOKENIZE_QUERY | FTS5_TOKENIZE_PREFIX) - Same as +** FTS5_TOKENIZE_QUERY, except that the bareword or quoted string is +** followed by a "*" character, indicating that the last token +** returned by the tokenizer will be treated as a token prefix. +** +**
  • FTS5_TOKENIZE_AUX - The tokenizer is being invoked to +** satisfy an fts5_api.xTokenize() request made by an auxiliary +** function. Or an fts5_api.xColumnSize() request made by the same +** on a columnsize=0 database. +**
+** +** For each token in the input string, the supplied callback xToken() must +** be invoked. The first argument to it should be a copy of the pointer +** passed as the second argument to xTokenize(). The third and fourth +** arguments are a pointer to a buffer containing the token text, and the +** size of the token in bytes. The 4th and 5th arguments are the byte offsets +** of the first byte of and first byte immediately following the text from +** which the token is derived within the input. +** +** The second argument passed to the xToken() callback ("tflags") should +** normally be set to 0. The exception is if the tokenizer supports +** synonyms. In this case see the discussion below for details. +** +** FTS5 assumes the xToken() callback is invoked for each token in the +** order that they occur within the input text. +** +** If an xToken() callback returns any value other than SQLITE_OK, then +** the tokenization should be abandoned and the xTokenize() method should +** immediately return a copy of the xToken() return value. Or, if the +** input buffer is exhausted, xTokenize() should return SQLITE_OK. Finally, +** if an error occurs with the xTokenize() implementation itself, it +** may abandon the tokenization and return any error code other than +** SQLITE_OK or SQLITE_DONE. +** +** SYNONYM SUPPORT +** +** Custom tokenizers may also support synonyms. Consider a case in which a +** user wishes to query for a phrase such as "first place". Using the +** built-in tokenizers, the FTS5 query 'first + place' will match instances +** of "first place" within the document set, but not alternative forms +** such as "1st place". In some applications, it would be better to match +** all instances of "first place" or "1st place" regardless of which form +** the user specified in the MATCH query text. +** +** There are several ways to approach this in FTS5: +** +**
  1. By mapping all synonyms to a single token. In this case, using +** the above example, this means that the tokenizer returns the +** same token for inputs "first" and "1st". Say that token is in +** fact "first", so that when the user inserts the document "I won +** 1st place" entries are added to the index for tokens "i", "won", +** "first" and "place". If the user then queries for '1st + place', +** the tokenizer substitutes "first" for "1st" and the query works +** as expected. +** +**
  2. By querying the index for all synonyms of each query term +** separately. In this case, when tokenizing query text, the +** tokenizer may provide multiple synonyms for a single term +** within the document. FTS5 then queries the index for each +** synonym individually. For example, faced with the query: +** +** +** ... MATCH 'first place' +** +** the tokenizer offers both "1st" and "first" as synonyms for the +** first token in the MATCH query and FTS5 effectively runs a query +** similar to: +** +** +** ... MATCH '(first OR 1st) place' +** +** except that, for the purposes of auxiliary functions, the query +** still appears to contain just two phrases - "(first OR 1st)" +** being treated as a single phrase. +** +**
  3. By adding multiple synonyms for a single term to the FTS index. +** Using this method, when tokenizing document text, the tokenizer +** provides multiple synonyms for each token. So that when a +** document such as "I won first place" is tokenized, entries are +** added to the FTS index for "i", "won", "first", "1st" and +** "place". +** +** This way, even if the tokenizer does not provide synonyms +** when tokenizing query text (it should not - to do so would be +** inefficient), it doesn't matter if the user queries for +** 'first + place' or '1st + place', as there are entries in the +** FTS index corresponding to both forms of the first token. +**
+** +** Whether it is parsing document or query text, any call to xToken that +** specifies a tflags argument with the FTS5_TOKEN_COLOCATED bit +** is considered to supply a synonym for the previous token. For example, +** when parsing the document "I won first place", a tokenizer that supports +** synonyms would call xToken() 5 times, as follows: +** +** +** xToken(pCtx, 0, "i", 1, 0, 1); +** xToken(pCtx, 0, "won", 3, 2, 5); +** xToken(pCtx, 0, "first", 5, 6, 11); +** xToken(pCtx, FTS5_TOKEN_COLOCATED, "1st", 3, 6, 11); +** xToken(pCtx, 0, "place", 5, 12, 17); +** +** +** It is an error to specify the FTS5_TOKEN_COLOCATED flag the first time +** xToken() is called. Multiple synonyms may be specified for a single token +** by making multiple calls to xToken(FTS5_TOKEN_COLOCATED) in sequence. +** There is no limit to the number of synonyms that may be provided for a +** single token. +** +** In many cases, method (1) above is the best approach. It does not add +** extra data to the FTS index or require FTS5 to query for multiple terms, +** so it is efficient in terms of disk space and query speed. However, it +** does not support prefix queries very well. If, as suggested above, the +** token "first" is substituted for "1st" by the tokenizer, then the query: +** +** +** ... MATCH '1s*' +** +** will not match documents that contain the token "1st" (as the tokenizer +** will probably not map "1s" to any prefix of "first"). +** +** For full prefix support, method (3) may be preferred. In this case, +** because the index contains entries for both "first" and "1st", prefix +** queries such as 'fi*' or '1s*' will match correctly. However, because +** extra entries are added to the FTS index, this method uses more space +** within the database. +** +** Method (2) offers a midpoint between (1) and (3). Using this method, +** a query such as '1s*' will match documents that contain the literal +** token "1st", but not "first" (assuming the tokenizer is not able to +** provide synonyms for prefixes). However, a non-prefix query like '1st' +** will match against "1st" and "first". This method does not require +** extra disk space, as no extra entries are added to the FTS index. +** On the other hand, it may require more CPU cycles to run MATCH queries, +** as separate queries of the FTS index are required for each synonym. +** +** When using methods (2) or (3), it is important that the tokenizer only +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is +** inefficient. +*/ +typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer fts5_tokenizer; +struct fts5_tokenizer { + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* Flags that may be passed as the third argument to xTokenize() */ +#define FTS5_TOKENIZE_QUERY 0x0001 +#define FTS5_TOKENIZE_PREFIX 0x0002 +#define FTS5_TOKENIZE_DOCUMENT 0x0004 +#define FTS5_TOKENIZE_AUX 0x0008 + +/* Flags that may be passed by the tokenizer implementation back to FTS5 +** as the third argument to the supplied xToken callback. */ +#define FTS5_TOKEN_COLOCATED 0x0001 /* Same position as prev. token */ + +/* +** END OF CUSTOM TOKENIZERS +*************************************************************************/ + +/************************************************************************* +** FTS5 EXTENSION REGISTRATION API +*/ +typedef struct fts5_api fts5_api; +struct fts5_api { + int iVersion; /* Currently always set to 2 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer *pTokenizer + ); + + /* Create a new auxiliary function */ + int (*xCreateFunction)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_extension_function xFunction, + void (*xDestroy)(void*) + ); +}; + +/* +** END OF REGISTRATION API +*************************************************************************/ + +#ifdef __cplusplus +} /* end of the 'extern "C"' block */ +#endif + +#endif /* _FTS5_H */ + +/******** End of fts5.h *********/ diff --git a/llava_next/include/tclOOIntDecls.h b/llava_next/include/tclOOIntDecls.h new file mode 100644 index 0000000000000000000000000000000000000000..6a5cfd3baa28ba8a3640cfdf80f7d52efe32e8d3 --- /dev/null +++ b/llava_next/include/tclOOIntDecls.h @@ -0,0 +1,166 @@ +/* + * This file is (mostly) automatically generated from tclOO.decls. + */ + +#ifndef _TCLOOINTDECLS +#define _TCLOOINTDECLS + +/* !BEGIN!: Do not edit below this line. */ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Exported function declarations: + */ + +/* 0 */ +TCLAPI Tcl_Object TclOOGetDefineCmdContext(Tcl_Interp *interp); +/* 1 */ +TCLAPI Tcl_Method TclOOMakeProcInstanceMethod(Tcl_Interp *interp, + Object *oPtr, int flags, Tcl_Obj *nameObj, + Tcl_Obj *argsObj, Tcl_Obj *bodyObj, + const Tcl_MethodType *typePtr, + void *clientData, Proc **procPtrPtr); +/* 2 */ +TCLAPI Tcl_Method TclOOMakeProcMethod(Tcl_Interp *interp, + Class *clsPtr, int flags, Tcl_Obj *nameObj, + const char *namePtr, Tcl_Obj *argsObj, + Tcl_Obj *bodyObj, + const Tcl_MethodType *typePtr, + void *clientData, Proc **procPtrPtr); +/* 3 */ +TCLAPI Method * TclOONewProcInstanceMethod(Tcl_Interp *interp, + Object *oPtr, int flags, Tcl_Obj *nameObj, + Tcl_Obj *argsObj, Tcl_Obj *bodyObj, + ProcedureMethod **pmPtrPtr); +/* 4 */ +TCLAPI Method * TclOONewProcMethod(Tcl_Interp *interp, Class *clsPtr, + int flags, Tcl_Obj *nameObj, + Tcl_Obj *argsObj, Tcl_Obj *bodyObj, + ProcedureMethod **pmPtrPtr); +/* 5 */ +TCLAPI int TclOOObjectCmdCore(Object *oPtr, Tcl_Interp *interp, + int objc, Tcl_Obj *const *objv, + int publicOnly, Class *startCls); +/* 6 */ +TCLAPI int TclOOIsReachable(Class *targetPtr, Class *startPtr); +/* 7 */ +TCLAPI Method * TclOONewForwardMethod(Tcl_Interp *interp, + Class *clsPtr, int isPublic, + Tcl_Obj *nameObj, Tcl_Obj *prefixObj); +/* 8 */ +TCLAPI Method * TclOONewForwardInstanceMethod(Tcl_Interp *interp, + Object *oPtr, int isPublic, Tcl_Obj *nameObj, + Tcl_Obj *prefixObj); +/* 9 */ +TCLAPI Tcl_Method TclOONewProcInstanceMethodEx(Tcl_Interp *interp, + Tcl_Object oPtr, + TclOO_PreCallProc *preCallPtr, + TclOO_PostCallProc *postCallPtr, + ProcErrorProc *errProc, void *clientData, + Tcl_Obj *nameObj, Tcl_Obj *argsObj, + Tcl_Obj *bodyObj, int flags, + void **internalTokenPtr); +/* 10 */ +TCLAPI Tcl_Method TclOONewProcMethodEx(Tcl_Interp *interp, + Tcl_Class clsPtr, + TclOO_PreCallProc *preCallPtr, + TclOO_PostCallProc *postCallPtr, + ProcErrorProc *errProc, void *clientData, + Tcl_Obj *nameObj, Tcl_Obj *argsObj, + Tcl_Obj *bodyObj, int flags, + void **internalTokenPtr); +/* 11 */ +TCLAPI int TclOOInvokeObject(Tcl_Interp *interp, + Tcl_Object object, Tcl_Class startCls, + int publicPrivate, int objc, + Tcl_Obj *const *objv); +/* 12 */ +TCLAPI void TclOOObjectSetFilters(Object *oPtr, int numFilters, + Tcl_Obj *const *filters); +/* 13 */ +TCLAPI void TclOOClassSetFilters(Tcl_Interp *interp, + Class *classPtr, int numFilters, + Tcl_Obj *const *filters); +/* 14 */ +TCLAPI void TclOOObjectSetMixins(Object *oPtr, int numMixins, + Class *const *mixins); +/* 15 */ +TCLAPI void TclOOClassSetMixins(Tcl_Interp *interp, + Class *classPtr, int numMixins, + Class *const *mixins); + +typedef struct TclOOIntStubs { + int magic; + void *hooks; + + Tcl_Object (*tclOOGetDefineCmdContext) (Tcl_Interp *interp); /* 0 */ + Tcl_Method (*tclOOMakeProcInstanceMethod) (Tcl_Interp *interp, Object *oPtr, int flags, Tcl_Obj *nameObj, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, const Tcl_MethodType *typePtr, void *clientData, Proc **procPtrPtr); /* 1 */ + Tcl_Method (*tclOOMakeProcMethod) (Tcl_Interp *interp, Class *clsPtr, int flags, Tcl_Obj *nameObj, const char *namePtr, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, const Tcl_MethodType *typePtr, void *clientData, Proc **procPtrPtr); /* 2 */ + Method * (*tclOONewProcInstanceMethod) (Tcl_Interp *interp, Object *oPtr, int flags, Tcl_Obj *nameObj, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, ProcedureMethod **pmPtrPtr); /* 3 */ + Method * (*tclOONewProcMethod) (Tcl_Interp *interp, Class *clsPtr, int flags, Tcl_Obj *nameObj, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, ProcedureMethod **pmPtrPtr); /* 4 */ + int (*tclOOObjectCmdCore) (Object *oPtr, Tcl_Interp *interp, int objc, Tcl_Obj *const *objv, int publicOnly, Class *startCls); /* 5 */ + int (*tclOOIsReachable) (Class *targetPtr, Class *startPtr); /* 6 */ + Method * (*tclOONewForwardMethod) (Tcl_Interp *interp, Class *clsPtr, int isPublic, Tcl_Obj *nameObj, Tcl_Obj *prefixObj); /* 7 */ + Method * (*tclOONewForwardInstanceMethod) (Tcl_Interp *interp, Object *oPtr, int isPublic, Tcl_Obj *nameObj, Tcl_Obj *prefixObj); /* 8 */ + Tcl_Method (*tclOONewProcInstanceMethodEx) (Tcl_Interp *interp, Tcl_Object oPtr, TclOO_PreCallProc *preCallPtr, TclOO_PostCallProc *postCallPtr, ProcErrorProc *errProc, void *clientData, Tcl_Obj *nameObj, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, int flags, void **internalTokenPtr); /* 9 */ + Tcl_Method (*tclOONewProcMethodEx) (Tcl_Interp *interp, Tcl_Class clsPtr, TclOO_PreCallProc *preCallPtr, TclOO_PostCallProc *postCallPtr, ProcErrorProc *errProc, void *clientData, Tcl_Obj *nameObj, Tcl_Obj *argsObj, Tcl_Obj *bodyObj, int flags, void **internalTokenPtr); /* 10 */ + int (*tclOOInvokeObject) (Tcl_Interp *interp, Tcl_Object object, Tcl_Class startCls, int publicPrivate, int objc, Tcl_Obj *const *objv); /* 11 */ + void (*tclOOObjectSetFilters) (Object *oPtr, int numFilters, Tcl_Obj *const *filters); /* 12 */ + void (*tclOOClassSetFilters) (Tcl_Interp *interp, Class *classPtr, int numFilters, Tcl_Obj *const *filters); /* 13 */ + void (*tclOOObjectSetMixins) (Object *oPtr, int numMixins, Class *const *mixins); /* 14 */ + void (*tclOOClassSetMixins) (Tcl_Interp *interp, Class *classPtr, int numMixins, Class *const *mixins); /* 15 */ +} TclOOIntStubs; + +extern const TclOOIntStubs *tclOOIntStubsPtr; + +#ifdef __cplusplus +} +#endif + +#if defined(USE_TCLOO_STUBS) + +/* + * Inline function declarations: + */ + +#define TclOOGetDefineCmdContext \ + (tclOOIntStubsPtr->tclOOGetDefineCmdContext) /* 0 */ +#define TclOOMakeProcInstanceMethod \ + (tclOOIntStubsPtr->tclOOMakeProcInstanceMethod) /* 1 */ +#define TclOOMakeProcMethod \ + (tclOOIntStubsPtr->tclOOMakeProcMethod) /* 2 */ +#define TclOONewProcInstanceMethod \ + (tclOOIntStubsPtr->tclOONewProcInstanceMethod) /* 3 */ +#define TclOONewProcMethod \ + (tclOOIntStubsPtr->tclOONewProcMethod) /* 4 */ +#define TclOOObjectCmdCore \ + (tclOOIntStubsPtr->tclOOObjectCmdCore) /* 5 */ +#define TclOOIsReachable \ + (tclOOIntStubsPtr->tclOOIsReachable) /* 6 */ +#define TclOONewForwardMethod \ + (tclOOIntStubsPtr->tclOONewForwardMethod) /* 7 */ +#define TclOONewForwardInstanceMethod \ + (tclOOIntStubsPtr->tclOONewForwardInstanceMethod) /* 8 */ +#define TclOONewProcInstanceMethodEx \ + (tclOOIntStubsPtr->tclOONewProcInstanceMethodEx) /* 9 */ +#define TclOONewProcMethodEx \ + (tclOOIntStubsPtr->tclOONewProcMethodEx) /* 10 */ +#define TclOOInvokeObject \ + (tclOOIntStubsPtr->tclOOInvokeObject) /* 11 */ +#define TclOOObjectSetFilters \ + (tclOOIntStubsPtr->tclOOObjectSetFilters) /* 12 */ +#define TclOOClassSetFilters \ + (tclOOIntStubsPtr->tclOOClassSetFilters) /* 13 */ +#define TclOOObjectSetMixins \ + (tclOOIntStubsPtr->tclOOObjectSetMixins) /* 14 */ +#define TclOOClassSetMixins \ + (tclOOIntStubsPtr->tclOOClassSetMixins) /* 15 */ + +#endif /* defined(USE_TCLOO_STUBS) */ + +/* !END!: Do not edit above this line. */ + +#endif /* _TCLOOINTDECLS */ diff --git a/llava_next/include/term.h b/llava_next/include/term.h new file mode 100644 index 0000000000000000000000000000000000000000..02bbd6edef3c3bbb6ced1d0e40bbedf8f032de0f --- /dev/null +++ b/llava_next/include/term.h @@ -0,0 +1,893 @@ +/**************************************************************************** + * Copyright 2018-2020,2021 Thomas E. Dickey * + * Copyright 1998-2013,2017 Free Software Foundation, Inc. * + * * + * Permission is hereby granted, free of charge, to any person obtaining a * + * copy of this software and associated documentation files (the * + * "Software"), to deal in the Software without restriction, including * + * without limitation the rights to use, copy, modify, merge, publish, * + * distribute, distribute with modifications, sublicense, and/or sell * + * copies of the Software, and to permit persons to whom the Software is * + * furnished to do so, subject to the following conditions: * + * * + * The above copyright notice and this permission notice shall be included * + * in all copies or substantial portions of the Software. * + * * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * + * IN NO EVENT SHALL THE ABOVE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR * + * THE USE OR OTHER DEALINGS IN THE SOFTWARE. * + * * + * Except as contained in this notice, the name(s) of the above copyright * + * holders shall not be used in advertising or otherwise to promote the * + * sale, use or other dealings in this Software without prior written * + * authorization. * + ****************************************************************************/ + +/****************************************************************************/ +/* Author: Zeyd M. Ben-Halim 1992,1995 */ +/* and: Eric S. Raymond */ +/* and: Thomas E. Dickey 1995-on */ +/****************************************************************************/ + +/* $Id: MKterm.h.awk.in,v 1.82 2021/09/24 17:02:46 tom Exp $ */ + +/* +** term.h -- Definition of struct term +*/ + +#ifndef NCURSES_TERM_H_incl +#define NCURSES_TERM_H_incl 1 + +#undef NCURSES_VERSION +#define NCURSES_VERSION "6.4" + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* Make this file self-contained by providing defaults for the HAVE_TERMIO[S]_H + * definition (based on the system for which this was configured). + */ + +#ifndef __NCURSES_H + +typedef struct screen SCREEN; + +#if 1 +#undef NCURSES_SP_FUNCS +#define NCURSES_SP_FUNCS 20221231 +#undef NCURSES_SP_NAME +#define NCURSES_SP_NAME(name) name##_sp + +/* Define the sp-funcs helper function */ +#undef NCURSES_SP_OUTC +#define NCURSES_SP_OUTC NCURSES_SP_NAME(NCURSES_OUTC) +typedef int (*NCURSES_SP_OUTC)(SCREEN*, int); +#endif + +#endif /* __NCURSES_H */ + +#undef NCURSES_CONST +#define NCURSES_CONST const + +#undef NCURSES_SBOOL +#define NCURSES_SBOOL char + +#undef NCURSES_USE_DATABASE +#define NCURSES_USE_DATABASE 1 + +#undef NCURSES_USE_TERMCAP +#define NCURSES_USE_TERMCAP 1 + +#undef NCURSES_XNAMES +#define NCURSES_XNAMES 1 + +/* We will use these symbols to hide differences between + * termios/termio/sgttyb interfaces. + */ +#undef TTY +#undef SET_TTY +#undef GET_TTY + +/* Assume POSIX termio if we have the header and function */ +/* #if HAVE_TERMIOS_H && HAVE_TCGETATTR */ +#if 1 && 1 + +#undef TERMIOS +#define TERMIOS 1 + +#include +#define TTY struct termios + +#else /* !HAVE_TERMIOS_H */ + +/* #if HAVE_TERMIO_H */ +#if 1 + +#undef TERMIOS +#define TERMIOS 1 + +#include +#define TTY struct termio + +#else /* !HAVE_TERMIO_H */ + +#if (defined(_WIN32) || defined(_WIN64)) +#if 0 +#include +#define TTY struct winconmode +#else +#include +#define TTY struct termios +#endif +#else +#undef TERMIOS +#include +#include +#define TTY struct sgttyb +#endif /* MINGW32 */ +#endif /* HAVE_TERMIO_H */ + +#endif /* HAVE_TERMIOS_H */ + +#ifdef TERMIOS +#define GET_TTY(fd, buf) tcgetattr(fd, buf) +#define SET_TTY(fd, buf) tcsetattr(fd, TCSADRAIN, buf) +#elif 0 && (defined(_WIN32) || defined(_WIN64)) +#define GET_TTY(fd, buf) _nc_console_getmode(_nc_console_fd2handle(fd),buf) +#define SET_TTY(fd, buf) _nc_console_setmode(_nc_console_fd2handle(fd),buf) +#else +#define GET_TTY(fd, buf) gtty(fd, buf) +#define SET_TTY(fd, buf) stty(fd, buf) +#endif + +#ifndef GCC_NORETURN +#define GCC_NORETURN /* nothing */ +#endif + +#define NAMESIZE 256 + +/* The cast works because TERMTYPE is the first data in TERMINAL */ +#define CUR ((TERMTYPE *)(cur_term))-> + +#define auto_left_margin CUR Booleans[0] +#define auto_right_margin CUR Booleans[1] +#define no_esc_ctlc CUR Booleans[2] +#define ceol_standout_glitch CUR Booleans[3] +#define eat_newline_glitch CUR Booleans[4] +#define erase_overstrike CUR Booleans[5] +#define generic_type CUR Booleans[6] +#define hard_copy CUR Booleans[7] +#define has_meta_key CUR Booleans[8] +#define has_status_line CUR Booleans[9] +#define insert_null_glitch CUR Booleans[10] +#define memory_above CUR Booleans[11] +#define memory_below CUR Booleans[12] +#define move_insert_mode CUR Booleans[13] +#define move_standout_mode CUR Booleans[14] +#define over_strike CUR Booleans[15] +#define status_line_esc_ok CUR Booleans[16] +#define dest_tabs_magic_smso CUR Booleans[17] +#define tilde_glitch CUR Booleans[18] +#define transparent_underline CUR Booleans[19] +#define xon_xoff CUR Booleans[20] +#define needs_xon_xoff CUR Booleans[21] +#define prtr_silent CUR Booleans[22] +#define hard_cursor CUR Booleans[23] +#define non_rev_rmcup CUR Booleans[24] +#define no_pad_char CUR Booleans[25] +#define non_dest_scroll_region CUR Booleans[26] +#define can_change CUR Booleans[27] +#define back_color_erase CUR Booleans[28] +#define hue_lightness_saturation CUR Booleans[29] +#define col_addr_glitch CUR Booleans[30] +#define cr_cancels_micro_mode CUR Booleans[31] +#define has_print_wheel CUR Booleans[32] +#define row_addr_glitch CUR Booleans[33] +#define semi_auto_right_margin CUR Booleans[34] +#define cpi_changes_res CUR Booleans[35] +#define lpi_changes_res CUR Booleans[36] +#define columns CUR Numbers[0] +#define init_tabs CUR Numbers[1] +#define lines CUR Numbers[2] +#define lines_of_memory CUR Numbers[3] +#define magic_cookie_glitch CUR Numbers[4] +#define padding_baud_rate CUR Numbers[5] +#define virtual_terminal CUR Numbers[6] +#define width_status_line CUR Numbers[7] +#define num_labels CUR Numbers[8] +#define label_height CUR Numbers[9] +#define label_width CUR Numbers[10] +#define max_attributes CUR Numbers[11] +#define maximum_windows CUR Numbers[12] +#define max_colors CUR Numbers[13] +#define max_pairs CUR Numbers[14] +#define no_color_video CUR Numbers[15] +#define buffer_capacity CUR Numbers[16] +#define dot_vert_spacing CUR Numbers[17] +#define dot_horz_spacing CUR Numbers[18] +#define max_micro_address CUR Numbers[19] +#define max_micro_jump CUR Numbers[20] +#define micro_col_size CUR Numbers[21] +#define micro_line_size CUR Numbers[22] +#define number_of_pins CUR Numbers[23] +#define output_res_char CUR Numbers[24] +#define output_res_line CUR Numbers[25] +#define output_res_horz_inch CUR Numbers[26] +#define output_res_vert_inch CUR Numbers[27] +#define print_rate CUR Numbers[28] +#define wide_char_size CUR Numbers[29] +#define buttons CUR Numbers[30] +#define bit_image_entwining CUR Numbers[31] +#define bit_image_type CUR Numbers[32] +#define back_tab CUR Strings[0] +#define bell CUR Strings[1] +#define carriage_return CUR Strings[2] +#define change_scroll_region CUR Strings[3] +#define clear_all_tabs CUR Strings[4] +#define clear_screen CUR Strings[5] +#define clr_eol CUR Strings[6] +#define clr_eos CUR Strings[7] +#define column_address CUR Strings[8] +#define command_character CUR Strings[9] +#define cursor_address CUR Strings[10] +#define cursor_down CUR Strings[11] +#define cursor_home CUR Strings[12] +#define cursor_invisible CUR Strings[13] +#define cursor_left CUR Strings[14] +#define cursor_mem_address CUR Strings[15] +#define cursor_normal CUR Strings[16] +#define cursor_right CUR Strings[17] +#define cursor_to_ll CUR Strings[18] +#define cursor_up CUR Strings[19] +#define cursor_visible CUR Strings[20] +#define delete_character CUR Strings[21] +#define delete_line CUR Strings[22] +#define dis_status_line CUR Strings[23] +#define down_half_line CUR Strings[24] +#define enter_alt_charset_mode CUR Strings[25] +#define enter_blink_mode CUR Strings[26] +#define enter_bold_mode CUR Strings[27] +#define enter_ca_mode CUR Strings[28] +#define enter_delete_mode CUR Strings[29] +#define enter_dim_mode CUR Strings[30] +#define enter_insert_mode CUR Strings[31] +#define enter_secure_mode CUR Strings[32] +#define enter_protected_mode CUR Strings[33] +#define enter_reverse_mode CUR Strings[34] +#define enter_standout_mode CUR Strings[35] +#define enter_underline_mode CUR Strings[36] +#define erase_chars CUR Strings[37] +#define exit_alt_charset_mode CUR Strings[38] +#define exit_attribute_mode CUR Strings[39] +#define exit_ca_mode CUR Strings[40] +#define exit_delete_mode CUR Strings[41] +#define exit_insert_mode CUR Strings[42] +#define exit_standout_mode CUR Strings[43] +#define exit_underline_mode CUR Strings[44] +#define flash_screen CUR Strings[45] +#define form_feed CUR Strings[46] +#define from_status_line CUR Strings[47] +#define init_1string CUR Strings[48] +#define init_2string CUR Strings[49] +#define init_3string CUR Strings[50] +#define init_file CUR Strings[51] +#define insert_character CUR Strings[52] +#define insert_line CUR Strings[53] +#define insert_padding CUR Strings[54] +#define key_backspace CUR Strings[55] +#define key_catab CUR Strings[56] +#define key_clear CUR Strings[57] +#define key_ctab CUR Strings[58] +#define key_dc CUR Strings[59] +#define key_dl CUR Strings[60] +#define key_down CUR Strings[61] +#define key_eic CUR Strings[62] +#define key_eol CUR Strings[63] +#define key_eos CUR Strings[64] +#define key_f0 CUR Strings[65] +#define key_f1 CUR Strings[66] +#define key_f10 CUR Strings[67] +#define key_f2 CUR Strings[68] +#define key_f3 CUR Strings[69] +#define key_f4 CUR Strings[70] +#define key_f5 CUR Strings[71] +#define key_f6 CUR Strings[72] +#define key_f7 CUR Strings[73] +#define key_f8 CUR Strings[74] +#define key_f9 CUR Strings[75] +#define key_home CUR Strings[76] +#define key_ic CUR Strings[77] +#define key_il CUR Strings[78] +#define key_left CUR Strings[79] +#define key_ll CUR Strings[80] +#define key_npage CUR Strings[81] +#define key_ppage CUR Strings[82] +#define key_right CUR Strings[83] +#define key_sf CUR Strings[84] +#define key_sr CUR Strings[85] +#define key_stab CUR Strings[86] +#define key_up CUR Strings[87] +#define keypad_local CUR Strings[88] +#define keypad_xmit CUR Strings[89] +#define lab_f0 CUR Strings[90] +#define lab_f1 CUR Strings[91] +#define lab_f10 CUR Strings[92] +#define lab_f2 CUR Strings[93] +#define lab_f3 CUR Strings[94] +#define lab_f4 CUR Strings[95] +#define lab_f5 CUR Strings[96] +#define lab_f6 CUR Strings[97] +#define lab_f7 CUR Strings[98] +#define lab_f8 CUR Strings[99] +#define lab_f9 CUR Strings[100] +#define meta_off CUR Strings[101] +#define meta_on CUR Strings[102] +#define newline CUR Strings[103] +#define pad_char CUR Strings[104] +#define parm_dch CUR Strings[105] +#define parm_delete_line CUR Strings[106] +#define parm_down_cursor CUR Strings[107] +#define parm_ich CUR Strings[108] +#define parm_index CUR Strings[109] +#define parm_insert_line CUR Strings[110] +#define parm_left_cursor CUR Strings[111] +#define parm_right_cursor CUR Strings[112] +#define parm_rindex CUR Strings[113] +#define parm_up_cursor CUR Strings[114] +#define pkey_key CUR Strings[115] +#define pkey_local CUR Strings[116] +#define pkey_xmit CUR Strings[117] +#define print_screen CUR Strings[118] +#define prtr_off CUR Strings[119] +#define prtr_on CUR Strings[120] +#define repeat_char CUR Strings[121] +#define reset_1string CUR Strings[122] +#define reset_2string CUR Strings[123] +#define reset_3string CUR Strings[124] +#define reset_file CUR Strings[125] +#define restore_cursor CUR Strings[126] +#define row_address CUR Strings[127] +#define save_cursor CUR Strings[128] +#define scroll_forward CUR Strings[129] +#define scroll_reverse CUR Strings[130] +#define set_attributes CUR Strings[131] +#define set_tab CUR Strings[132] +#define set_window CUR Strings[133] +#define tab CUR Strings[134] +#define to_status_line CUR Strings[135] +#define underline_char CUR Strings[136] +#define up_half_line CUR Strings[137] +#define init_prog CUR Strings[138] +#define key_a1 CUR Strings[139] +#define key_a3 CUR Strings[140] +#define key_b2 CUR Strings[141] +#define key_c1 CUR Strings[142] +#define key_c3 CUR Strings[143] +#define prtr_non CUR Strings[144] +#define char_padding CUR Strings[145] +#define acs_chars CUR Strings[146] +#define plab_norm CUR Strings[147] +#define key_btab CUR Strings[148] +#define enter_xon_mode CUR Strings[149] +#define exit_xon_mode CUR Strings[150] +#define enter_am_mode CUR Strings[151] +#define exit_am_mode CUR Strings[152] +#define xon_character CUR Strings[153] +#define xoff_character CUR Strings[154] +#define ena_acs CUR Strings[155] +#define label_on CUR Strings[156] +#define label_off CUR Strings[157] +#define key_beg CUR Strings[158] +#define key_cancel CUR Strings[159] +#define key_close CUR Strings[160] +#define key_command CUR Strings[161] +#define key_copy CUR Strings[162] +#define key_create CUR Strings[163] +#define key_end CUR Strings[164] +#define key_enter CUR Strings[165] +#define key_exit CUR Strings[166] +#define key_find CUR Strings[167] +#define key_help CUR Strings[168] +#define key_mark CUR Strings[169] +#define key_message CUR Strings[170] +#define key_move CUR Strings[171] +#define key_next CUR Strings[172] +#define key_open CUR Strings[173] +#define key_options CUR Strings[174] +#define key_previous CUR Strings[175] +#define key_print CUR Strings[176] +#define key_redo CUR Strings[177] +#define key_reference CUR Strings[178] +#define key_refresh CUR Strings[179] +#define key_replace CUR Strings[180] +#define key_restart CUR Strings[181] +#define key_resume CUR Strings[182] +#define key_save CUR Strings[183] +#define key_suspend CUR Strings[184] +#define key_undo CUR Strings[185] +#define key_sbeg CUR Strings[186] +#define key_scancel CUR Strings[187] +#define key_scommand CUR Strings[188] +#define key_scopy CUR Strings[189] +#define key_screate CUR Strings[190] +#define key_sdc CUR Strings[191] +#define key_sdl CUR Strings[192] +#define key_select CUR Strings[193] +#define key_send CUR Strings[194] +#define key_seol CUR Strings[195] +#define key_sexit CUR Strings[196] +#define key_sfind CUR Strings[197] +#define key_shelp CUR Strings[198] +#define key_shome CUR Strings[199] +#define key_sic CUR Strings[200] +#define key_sleft CUR Strings[201] +#define key_smessage CUR Strings[202] +#define key_smove CUR Strings[203] +#define key_snext CUR Strings[204] +#define key_soptions CUR Strings[205] +#define key_sprevious CUR Strings[206] +#define key_sprint CUR Strings[207] +#define key_sredo CUR Strings[208] +#define key_sreplace CUR Strings[209] +#define key_sright CUR Strings[210] +#define key_srsume CUR Strings[211] +#define key_ssave CUR Strings[212] +#define key_ssuspend CUR Strings[213] +#define key_sundo CUR Strings[214] +#define req_for_input CUR Strings[215] +#define key_f11 CUR Strings[216] +#define key_f12 CUR Strings[217] +#define key_f13 CUR Strings[218] +#define key_f14 CUR Strings[219] +#define key_f15 CUR Strings[220] +#define key_f16 CUR Strings[221] +#define key_f17 CUR Strings[222] +#define key_f18 CUR Strings[223] +#define key_f19 CUR Strings[224] +#define key_f20 CUR Strings[225] +#define key_f21 CUR Strings[226] +#define key_f22 CUR Strings[227] +#define key_f23 CUR Strings[228] +#define key_f24 CUR Strings[229] +#define key_f25 CUR Strings[230] +#define key_f26 CUR Strings[231] +#define key_f27 CUR Strings[232] +#define key_f28 CUR Strings[233] +#define key_f29 CUR Strings[234] +#define key_f30 CUR Strings[235] +#define key_f31 CUR Strings[236] +#define key_f32 CUR Strings[237] +#define key_f33 CUR Strings[238] +#define key_f34 CUR Strings[239] +#define key_f35 CUR Strings[240] +#define key_f36 CUR Strings[241] +#define key_f37 CUR Strings[242] +#define key_f38 CUR Strings[243] +#define key_f39 CUR Strings[244] +#define key_f40 CUR Strings[245] +#define key_f41 CUR Strings[246] +#define key_f42 CUR Strings[247] +#define key_f43 CUR Strings[248] +#define key_f44 CUR Strings[249] +#define key_f45 CUR Strings[250] +#define key_f46 CUR Strings[251] +#define key_f47 CUR Strings[252] +#define key_f48 CUR Strings[253] +#define key_f49 CUR Strings[254] +#define key_f50 CUR Strings[255] +#define key_f51 CUR Strings[256] +#define key_f52 CUR Strings[257] +#define key_f53 CUR Strings[258] +#define key_f54 CUR Strings[259] +#define key_f55 CUR Strings[260] +#define key_f56 CUR Strings[261] +#define key_f57 CUR Strings[262] +#define key_f58 CUR Strings[263] +#define key_f59 CUR Strings[264] +#define key_f60 CUR Strings[265] +#define key_f61 CUR Strings[266] +#define key_f62 CUR Strings[267] +#define key_f63 CUR Strings[268] +#define clr_bol CUR Strings[269] +#define clear_margins CUR Strings[270] +#define set_left_margin CUR Strings[271] +#define set_right_margin CUR Strings[272] +#define label_format CUR Strings[273] +#define set_clock CUR Strings[274] +#define display_clock CUR Strings[275] +#define remove_clock CUR Strings[276] +#define create_window CUR Strings[277] +#define goto_window CUR Strings[278] +#define hangup CUR Strings[279] +#define dial_phone CUR Strings[280] +#define quick_dial CUR Strings[281] +#define tone CUR Strings[282] +#define pulse CUR Strings[283] +#define flash_hook CUR Strings[284] +#define fixed_pause CUR Strings[285] +#define wait_tone CUR Strings[286] +#define user0 CUR Strings[287] +#define user1 CUR Strings[288] +#define user2 CUR Strings[289] +#define user3 CUR Strings[290] +#define user4 CUR Strings[291] +#define user5 CUR Strings[292] +#define user6 CUR Strings[293] +#define user7 CUR Strings[294] +#define user8 CUR Strings[295] +#define user9 CUR Strings[296] +#define orig_pair CUR Strings[297] +#define orig_colors CUR Strings[298] +#define initialize_color CUR Strings[299] +#define initialize_pair CUR Strings[300] +#define set_color_pair CUR Strings[301] +#define set_foreground CUR Strings[302] +#define set_background CUR Strings[303] +#define change_char_pitch CUR Strings[304] +#define change_line_pitch CUR Strings[305] +#define change_res_horz CUR Strings[306] +#define change_res_vert CUR Strings[307] +#define define_char CUR Strings[308] +#define enter_doublewide_mode CUR Strings[309] +#define enter_draft_quality CUR Strings[310] +#define enter_italics_mode CUR Strings[311] +#define enter_leftward_mode CUR Strings[312] +#define enter_micro_mode CUR Strings[313] +#define enter_near_letter_quality CUR Strings[314] +#define enter_normal_quality CUR Strings[315] +#define enter_shadow_mode CUR Strings[316] +#define enter_subscript_mode CUR Strings[317] +#define enter_superscript_mode CUR Strings[318] +#define enter_upward_mode CUR Strings[319] +#define exit_doublewide_mode CUR Strings[320] +#define exit_italics_mode CUR Strings[321] +#define exit_leftward_mode CUR Strings[322] +#define exit_micro_mode CUR Strings[323] +#define exit_shadow_mode CUR Strings[324] +#define exit_subscript_mode CUR Strings[325] +#define exit_superscript_mode CUR Strings[326] +#define exit_upward_mode CUR Strings[327] +#define micro_column_address CUR Strings[328] +#define micro_down CUR Strings[329] +#define micro_left CUR Strings[330] +#define micro_right CUR Strings[331] +#define micro_row_address CUR Strings[332] +#define micro_up CUR Strings[333] +#define order_of_pins CUR Strings[334] +#define parm_down_micro CUR Strings[335] +#define parm_left_micro CUR Strings[336] +#define parm_right_micro CUR Strings[337] +#define parm_up_micro CUR Strings[338] +#define select_char_set CUR Strings[339] +#define set_bottom_margin CUR Strings[340] +#define set_bottom_margin_parm CUR Strings[341] +#define set_left_margin_parm CUR Strings[342] +#define set_right_margin_parm CUR Strings[343] +#define set_top_margin CUR Strings[344] +#define set_top_margin_parm CUR Strings[345] +#define start_bit_image CUR Strings[346] +#define start_char_set_def CUR Strings[347] +#define stop_bit_image CUR Strings[348] +#define stop_char_set_def CUR Strings[349] +#define subscript_characters CUR Strings[350] +#define superscript_characters CUR Strings[351] +#define these_cause_cr CUR Strings[352] +#define zero_motion CUR Strings[353] +#define char_set_names CUR Strings[354] +#define key_mouse CUR Strings[355] +#define mouse_info CUR Strings[356] +#define req_mouse_pos CUR Strings[357] +#define get_mouse CUR Strings[358] +#define set_a_foreground CUR Strings[359] +#define set_a_background CUR Strings[360] +#define pkey_plab CUR Strings[361] +#define device_type CUR Strings[362] +#define code_set_init CUR Strings[363] +#define set0_des_seq CUR Strings[364] +#define set1_des_seq CUR Strings[365] +#define set2_des_seq CUR Strings[366] +#define set3_des_seq CUR Strings[367] +#define set_lr_margin CUR Strings[368] +#define set_tb_margin CUR Strings[369] +#define bit_image_repeat CUR Strings[370] +#define bit_image_newline CUR Strings[371] +#define bit_image_carriage_return CUR Strings[372] +#define color_names CUR Strings[373] +#define define_bit_image_region CUR Strings[374] +#define end_bit_image_region CUR Strings[375] +#define set_color_band CUR Strings[376] +#define set_page_length CUR Strings[377] +#define display_pc_char CUR Strings[378] +#define enter_pc_charset_mode CUR Strings[379] +#define exit_pc_charset_mode CUR Strings[380] +#define enter_scancode_mode CUR Strings[381] +#define exit_scancode_mode CUR Strings[382] +#define pc_term_options CUR Strings[383] +#define scancode_escape CUR Strings[384] +#define alt_scancode_esc CUR Strings[385] +#define enter_horizontal_hl_mode CUR Strings[386] +#define enter_left_hl_mode CUR Strings[387] +#define enter_low_hl_mode CUR Strings[388] +#define enter_right_hl_mode CUR Strings[389] +#define enter_top_hl_mode CUR Strings[390] +#define enter_vertical_hl_mode CUR Strings[391] +#define set_a_attributes CUR Strings[392] +#define set_pglen_inch CUR Strings[393] + +#define BOOLWRITE 37 +#define NUMWRITE 33 +#define STRWRITE 394 + +/* older synonyms for some capabilities */ +#define beehive_glitch no_esc_ctlc +#define teleray_glitch dest_tabs_magic_smso + +/* HPUX-11 uses this name rather than the standard one */ +#ifndef micro_char_size +#define micro_char_size micro_col_size +#endif + +#ifdef __INTERNAL_CAPS_VISIBLE +#define termcap_init2 CUR Strings[394] +#define termcap_reset CUR Strings[395] +#define magic_cookie_glitch_ul CUR Numbers[33] +#define backspaces_with_bs CUR Booleans[37] +#define crt_no_scrolling CUR Booleans[38] +#define no_correctly_working_cr CUR Booleans[39] +#define carriage_return_delay CUR Numbers[34] +#define new_line_delay CUR Numbers[35] +#define linefeed_if_not_lf CUR Strings[396] +#define backspace_if_not_bs CUR Strings[397] +#define gnu_has_meta_key CUR Booleans[40] +#define linefeed_is_newline CUR Booleans[41] +#define backspace_delay CUR Numbers[36] +#define horizontal_tab_delay CUR Numbers[37] +#define number_of_function_keys CUR Numbers[38] +#define other_non_function_keys CUR Strings[398] +#define arrow_key_map CUR Strings[399] +#define has_hardware_tabs CUR Booleans[42] +#define return_does_clr_eol CUR Booleans[43] +#define acs_ulcorner CUR Strings[400] +#define acs_llcorner CUR Strings[401] +#define acs_urcorner CUR Strings[402] +#define acs_lrcorner CUR Strings[403] +#define acs_ltee CUR Strings[404] +#define acs_rtee CUR Strings[405] +#define acs_btee CUR Strings[406] +#define acs_ttee CUR Strings[407] +#define acs_hline CUR Strings[408] +#define acs_vline CUR Strings[409] +#define acs_plus CUR Strings[410] +#define memory_lock CUR Strings[411] +#define memory_unlock CUR Strings[412] +#define box_chars_1 CUR Strings[413] +#endif /* __INTERNAL_CAPS_VISIBLE */ + + +/* + * Predefined terminfo array sizes + */ +#define BOOLCOUNT 44 +#define NUMCOUNT 39 +#define STRCOUNT 414 + +/* used by code for comparing entries */ +#define acs_chars_index 146 + +typedef struct termtype { /* in-core form of terminfo data */ + char *term_names; /* str_table offset of term names */ + char *str_table; /* pointer to string table */ + NCURSES_SBOOL *Booleans; /* array of boolean values */ + short *Numbers; /* array of integer values */ + char **Strings; /* array of string offsets */ + +#if NCURSES_XNAMES + char *ext_str_table; /* pointer to extended string table */ + char **ext_Names; /* corresponding names */ + + unsigned short num_Booleans;/* count total Booleans */ + unsigned short num_Numbers; /* count total Numbers */ + unsigned short num_Strings; /* count total Strings */ + + unsigned short ext_Booleans;/* count extensions to Booleans */ + unsigned short ext_Numbers; /* count extensions to Numbers */ + unsigned short ext_Strings; /* count extensions to Strings */ +#endif /* NCURSES_XNAMES */ + +} TERMTYPE; + +/* + * The only reason these structures are visible is for read-only use. + * Programs which modify the data are not, never were, portable across + * curses implementations. + * + * The first field in TERMINAL is used in macros. + * The remaining fields are private. + */ +#ifdef NCURSES_INTERNALS + +#undef TERMINAL +#define TERMINAL struct term +TERMINAL; + +typedef struct termtype2 { /* in-core form of terminfo data */ + char *term_names; /* str_table offset of term names */ + char *str_table; /* pointer to string table */ + NCURSES_SBOOL *Booleans; /* array of boolean values */ + int *Numbers; /* array of integer values */ + char **Strings; /* array of string offsets */ + +#if NCURSES_XNAMES + char *ext_str_table; /* pointer to extended string table */ + char **ext_Names; /* corresponding names */ + + unsigned short num_Booleans;/* count total Booleans */ + unsigned short num_Numbers; /* count total Numbers */ + unsigned short num_Strings; /* count total Strings */ + + unsigned short ext_Booleans;/* count extensions to Booleans */ + unsigned short ext_Numbers; /* count extensions to Numbers */ + unsigned short ext_Strings; /* count extensions to Strings */ +#endif /* NCURSES_XNAMES */ + +} TERMTYPE2; +#else + +typedef struct term { /* describe an actual terminal */ + TERMTYPE type; /* terminal type description */ +} TERMINAL; + +#endif /* NCURSES_INTERNALS */ + + +#if 0 && !0 +extern NCURSES_EXPORT_VAR(TERMINAL *) cur_term; +#elif 0 +NCURSES_WRAPPED_VAR(TERMINAL *, cur_term); +#define cur_term NCURSES_PUBLIC_VAR(cur_term()) +#else +extern NCURSES_EXPORT_VAR(TERMINAL *) cur_term; +#endif + +#if 0 || 0 +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, boolnames); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, boolcodes); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, boolfnames); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, numnames); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, numcodes); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, numfnames); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, strnames); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, strcodes); +NCURSES_WRAPPED_VAR(NCURSES_CONST char * const *, strfnames); + +#define boolnames NCURSES_PUBLIC_VAR(boolnames()) +#define boolcodes NCURSES_PUBLIC_VAR(boolcodes()) +#define boolfnames NCURSES_PUBLIC_VAR(boolfnames()) +#define numnames NCURSES_PUBLIC_VAR(numnames()) +#define numcodes NCURSES_PUBLIC_VAR(numcodes()) +#define numfnames NCURSES_PUBLIC_VAR(numfnames()) +#define strnames NCURSES_PUBLIC_VAR(strnames()) +#define strcodes NCURSES_PUBLIC_VAR(strcodes()) +#define strfnames NCURSES_PUBLIC_VAR(strfnames()) + +#else + +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) boolnames[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) boolcodes[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) boolfnames[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) numnames[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) numcodes[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) numfnames[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) strnames[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) strcodes[]; +extern NCURSES_EXPORT_VAR(NCURSES_CONST char * const ) strfnames[]; + +#endif + +/* + * These entrypoints are used only by the ncurses utilities such as tic. + */ +#ifdef NCURSES_INTERNALS + +extern NCURSES_EXPORT(int) _nc_set_tty_mode (TTY *buf); +extern NCURSES_EXPORT(int) _nc_read_entry2 (const char * const, char * const, TERMTYPE2 *const); +extern NCURSES_EXPORT(int) _nc_read_file_entry (const char *const, TERMTYPE2 *); +extern NCURSES_EXPORT(int) _nc_read_termtype (TERMTYPE2 *, char *, int); +extern NCURSES_EXPORT(char *) _nc_first_name (const char *const); +extern NCURSES_EXPORT(int) _nc_name_match (const char *const, const char *const, const char *const); +extern NCURSES_EXPORT(char *) _nc_tiparm(int, const char *, ...); + +#endif /* NCURSES_INTERNALS */ + + +/* + * These entrypoints are used by tack 1.07. + */ +extern NCURSES_EXPORT(const TERMTYPE *) _nc_fallback (const char *); +extern NCURSES_EXPORT(int) _nc_read_entry (const char * const, char * const, TERMTYPE *const); + +/* + * Normal entry points + */ +extern NCURSES_EXPORT(TERMINAL *) set_curterm (TERMINAL *); +extern NCURSES_EXPORT(int) del_curterm (TERMINAL *); + +/* miscellaneous entry points */ +extern NCURSES_EXPORT(int) restartterm (NCURSES_CONST char *, int, int *); +extern NCURSES_EXPORT(int) setupterm (const char *,int,int *); + +/* terminfo entry points, also declared in curses.h */ +#if !defined(__NCURSES_H) +extern NCURSES_EXPORT(char *) tigetstr (const char *); +extern NCURSES_EXPORT_VAR(char) ttytype[]; +extern NCURSES_EXPORT(int) putp (const char *); +extern NCURSES_EXPORT(int) tigetflag (const char *); +extern NCURSES_EXPORT(int) tigetnum (const char *); + +#if 1 /* NCURSES_TPARM_VARARGS */ +extern NCURSES_EXPORT(char *) tparm (const char *, ...); /* special */ +#else +extern NCURSES_EXPORT(char *) tparm (const char *, long,long,long,long,long,long,long,long,long); /* special */ +#endif + +extern NCURSES_EXPORT(char *) tiparm (const char *, ...); /* special */ + +#endif /* __NCURSES_H */ + +/* termcap database emulation (XPG4 uses const only for 2nd param of tgetent) */ +#if !defined(NCURSES_TERMCAP_H_incl) +extern NCURSES_EXPORT(char *) tgetstr (const char *, char **); +extern NCURSES_EXPORT(char *) tgoto (const char *, int, int); +extern NCURSES_EXPORT(int) tgetent (char *, const char *); +extern NCURSES_EXPORT(int) tgetflag (const char *); +extern NCURSES_EXPORT(int) tgetnum (const char *); +extern NCURSES_EXPORT(int) tputs (const char *, int, int (*)(int)); +#endif /* NCURSES_TERMCAP_H_incl */ + +/* + * Include curses.h before term.h to enable these extensions. + */ +#if defined(NCURSES_SP_FUNCS) && (NCURSES_SP_FUNCS != 0) + +extern NCURSES_EXPORT(char *) NCURSES_SP_NAME(tigetstr) (SCREEN*, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(putp) (SCREEN*, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tigetflag) (SCREEN*, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tigetnum) (SCREEN*, const char *); + +#if 1 /* NCURSES_TPARM_VARARGS */ +extern NCURSES_EXPORT(char *) NCURSES_SP_NAME(tparm) (SCREEN*, const char *, ...); /* special */ +#else +extern NCURSES_EXPORT(char *) NCURSES_SP_NAME(tparm) (SCREEN*, const char *, long,long,long,long,long,long,long,long,long); /* special */ +#endif + +/* termcap database emulation (XPG4 uses const only for 2nd param of tgetent) */ +extern NCURSES_EXPORT(char *) NCURSES_SP_NAME(tgetstr) (SCREEN*, const char *, char **); +extern NCURSES_EXPORT(char *) NCURSES_SP_NAME(tgoto) (SCREEN*, const char *, int, int); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tgetent) (SCREEN*, char *, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tgetflag) (SCREEN*, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tgetnum) (SCREEN*, const char *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(tputs) (SCREEN*, const char *, int, NCURSES_SP_OUTC); + +extern NCURSES_EXPORT(TERMINAL *) NCURSES_SP_NAME(set_curterm) (SCREEN*, TERMINAL *); +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(del_curterm) (SCREEN*, TERMINAL *); + +extern NCURSES_EXPORT(int) NCURSES_SP_NAME(restartterm) (SCREEN*, NCURSES_CONST char *, int, int *); +#endif /* NCURSES_SP_FUNCS */ + +/* + * Debugging features. + */ +extern GCC_NORETURN NCURSES_EXPORT(void) exit_terminfo(int); + +#ifdef __cplusplus +} +#endif + +#endif /* NCURSES_TERM_H_incl */ diff --git a/llava_next/include/tkMacOSXFont.h b/llava_next/include/tkMacOSXFont.h new file mode 100644 index 0000000000000000000000000000000000000000..7fc9265f80eeb102391c407a3eee786a09486dd6 --- /dev/null +++ b/llava_next/include/tkMacOSXFont.h @@ -0,0 +1,32 @@ +/* + * tkMacOSXFont.h -- + * + * Contains the Macintosh implementation of the platform-independent + * font package interface. + * + * Copyright (c) 1990-1994 The Regents of the University of California. + * Copyright (c) 1994-1997 Sun Microsystems, Inc. + * Copyright 2001-2009, Apple Inc. + * Copyright (c) 2006-2009 Daniel A. Steffen + * + * See the file "license.terms" for information on usage and redistribution + * of this file, and for a DISCLAIMER OF ALL WARRANTIES. + */ + +#ifndef TKMACOSXFONT_H +#define TKMACOSXFONT_H 1 + +#include "tkFont.h" + +#ifndef _TKMACINT +#include "tkMacOSXInt.h" +#endif + +/* + * Function prototypes + */ + +MODULE_SCOPE Tcl_Obj * TkMacOSXFontDescriptionForNSFontAndNSFontAttributes( + NSFont *nsFont, NSDictionary *nsAttributes); + +#endif /*TKMACOSXFONT_H*/ diff --git a/llava_next/include/tkMenubutton.h b/llava_next/include/tkMenubutton.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a1d3ae171eff4748cef87f4206808ea9e199e8 --- /dev/null +++ b/llava_next/include/tkMenubutton.h @@ -0,0 +1,216 @@ +/* + * tkMenubutton.h -- + * + * Declarations of types and functions used to implement the menubutton + * widget. + * + * Copyright (c) 1996-1997 by Sun Microsystems, Inc. + * + * See the file "license.terms" for information on usage and redistribution of + * this file, and for a DISCLAIMER OF ALL WARRANTIES. + */ + +#ifndef _TKMENUBUTTON +#define _TKMENUBUTTON + +#ifndef _TKINT +#include "tkInt.h" +#endif + +#ifndef _TKMENU +#include "tkMenu.h" +#endif + +/* + * Legal values for the "orient" field of TkMenubutton records. + */ + +enum direction { + DIRECTION_ABOVE, DIRECTION_BELOW, DIRECTION_FLUSH, + DIRECTION_LEFT, DIRECTION_RIGHT +}; + +/* + * Legal values for the "state" field of TkMenubutton records. + */ + +enum state { + STATE_ACTIVE, STATE_DISABLED, STATE_NORMAL +}; + +/* + * A data structure of the following type is kept for each widget managed by + * this file: + */ + +typedef struct { + Tk_Window tkwin; /* Window that embodies the widget. NULL means + * that the window has been destroyed but the + * data structures haven't yet been cleaned + * up. */ + Display *display; /* Display containing widget. Needed, among + * other things, so that resources can bee + * freed up even after tkwin has gone away. */ + Tcl_Interp *interp; /* Interpreter associated with menubutton. */ + Tcl_Command widgetCmd; /* Token for menubutton's widget command. */ + Tk_OptionTable optionTable; /* Table that defines configuration options + * available for this widget. */ + char *menuName; /* Name of menu associated with widget. + * Malloc-ed. */ + + /* + * Information about what's displayed in the menu button: + */ + + char *text; /* Text to display in button (malloc'ed) or + * NULL. */ + int underline; /* Index of character to underline. */ + char *textVarName; /* Name of variable (malloc'ed) or NULL. If + * non-NULL, button displays the contents of + * this variable. */ + Pixmap bitmap; /* Bitmap to display or None. If not None then + * text and textVar and underline are + * ignored. */ + char *imageString; /* Name of image to display (malloc'ed), or + * NULL. If non-NULL, bitmap, text, and + * textVarName are ignored. */ + Tk_Image image; /* Image to display in window, or NULL if + * none. */ + + /* + * Information used when displaying widget: + */ + + enum state state; /* State of button for display purposes: + * normal, active, or disabled. */ + Tk_3DBorder normalBorder; /* Structure used to draw 3-D border and + * background when window isn't active. NULL + * means no such border exists. */ + Tk_3DBorder activeBorder; /* Structure used to draw 3-D border and + * background when window is active. NULL + * means no such border exists. */ + int borderWidth; /* Width of border. */ + int relief; /* 3-d effect: TK_RELIEF_RAISED, etc. */ + int highlightWidth; /* Width in pixels of highlight to draw around + * widget when it has the focus. <= 0 means + * don't draw a highlight. */ + XColor *highlightBgColorPtr;/* Color for drawing traversal highlight area + * when highlight is off. */ + XColor *highlightColorPtr; /* Color for drawing traversal highlight. */ + int inset; /* Total width of all borders, including + * traversal highlight and 3-D border. + * Indicates how much interior stuff must be + * offset from outside edges to leave room for + * borders. */ + Tk_Font tkfont; /* Information about text font, or NULL. */ + XColor *normalFg; /* Foreground color in normal mode. */ + XColor *activeFg; /* Foreground color in active mode. NULL means + * use normalFg instead. */ + XColor *disabledFg; /* Foreground color when disabled. NULL means + * use normalFg with a 50% stipple instead. */ + GC normalTextGC; /* GC for drawing text in normal mode. */ + GC activeTextGC; /* GC for drawing text in active mode (NULL + * means use normalTextGC). */ + Pixmap gray; /* Pixmap for displaying disabled text/icon if + * disabledFg is NULL. */ + GC disabledGC; /* Used to produce disabled effect for + * text. */ + GC stippleGC; /* Used to produce disabled stipple effect for + * images when disabled. */ + int leftBearing; /* Distance from text origin to leftmost drawn + * pixel (positive means to right). */ + int rightBearing; /* Amount text sticks right from its + * origin. */ + char *widthString; /* Value of -width option. Malloc'ed. */ + char *heightString; /* Value of -height option. Malloc'ed. */ + int width, height; /* If > 0, these specify dimensions to request + * for window, in characters for text and in + * pixels for bitmaps. In this case the actual + * size of the text string or bitmap is + * ignored in computing desired window + * size. */ + int wrapLength; /* Line length (in pixels) at which to wrap + * onto next line. <= 0 means don't wrap + * except at newlines. */ + int padX, padY; /* Extra space around text or bitmap (pixels + * on each side). */ + Tk_Anchor anchor; /* Where text/bitmap should be displayed + * inside window region. */ + Tk_Justify justify; /* Justification to use for multi-line + * text. */ + int textWidth; /* Width needed to display text as requested, + * in pixels. */ + int textHeight; /* Height needed to display text as requested, + * in pixels. */ + Tk_TextLayout textLayout; /* Saved text layout information. */ + int indicatorOn; /* Non-zero means display indicator; 0 means + * don't display. */ + int indicatorHeight; /* Height of indicator in pixels. This same + * amount of extra space is also left on each + * side of the indicator. 0 if no + * indicator. */ + int indicatorWidth; /* Width of indicator in pixels, including + * indicatorHeight in padding on each side. 0 + * if no indicator. */ + + /* + * Miscellaneous information: + */ + + int compound; /* Value of -compound option; specifies + * whether the menubutton should show both an + * image and text, and, if so, how. */ + enum direction direction; /* Direction for where to pop the menu. Valid + * directions are "above", "below", "left", + * "right", and "flush". "flush" means that + * the upper left corner of the menubutton is + * where the menu pops up. "above" and "below" + * will attempt to pop the menu completely + * above or below the menu respectively. + * "left" and "right" will pop the menu left + * or right, and the active item will be next + * to the button. */ + Tk_Cursor cursor; /* Current cursor for window, or NULL. */ + char *takeFocus; /* Value of -takefocus option; not used in the + * C code, but used by keyboard traversal + * scripts. Malloc'ed, but may be NULL. */ + int flags; /* Various flags; see below for + * definitions. */ +} TkMenuButton; + +/* + * Flag bits for buttons: + * + * REDRAW_PENDING: Non-zero means a DoWhenIdle handler has + * already been queued to redraw this window. + * POSTED: Non-zero means that the menu associated with + * this button has been posted (typically because + * of an active button press). + * GOT_FOCUS: Non-zero means this button currently has the + * input focus. + */ + +#define REDRAW_PENDING 1 +#define POSTED 2 +#define GOT_FOCUS 4 + +/* + * The following constants define the dimensions of the cascade indicator, + * which is displayed if the "-indicatoron" option is true. The units for + * these options are 1/10 millimeters. + */ + +#define INDICATOR_WIDTH 40 +#define INDICATOR_HEIGHT 17 + +/* + * Declaration of procedures used in the implementation of the button widget. + */ + +MODULE_SCOPE void TkpComputeMenuButtonGeometry(TkMenuButton *mbPtr); +MODULE_SCOPE TkMenuButton *TkpCreateMenuButton(Tk_Window tkwin); +MODULE_SCOPE void TkpDisplayMenuButton(ClientData clientData); +MODULE_SCOPE void TkpDestroyMenuButton(TkMenuButton *mbPtr); +MODULE_SCOPE void TkMenuButtonWorldChanged(ClientData instanceData); + +#endif /* _TKMENUBUTTON */ diff --git a/llava_next/include/tkUuid.h b/llava_next/include/tkUuid.h new file mode 100644 index 0000000000000000000000000000000000000000..0db00689e0df86a67bc5a9da0cb113374c9e5a82 --- /dev/null +++ b/llava_next/include/tkUuid.h @@ -0,0 +1,3 @@ +#define TK_VERSION_UUID \ +e987bb51b8fce99b545a408b5eb2cbcecedf6929ff1f7094e383666f02a5f556 + diff --git a/llava_next/share/info/history.info b/llava_next/share/info/history.info new file mode 100644 index 0000000000000000000000000000000000000000..a6799c38edeba8ffea535ff2e55f100c0d90fd70 --- /dev/null +++ b/llava_next/share/info/history.info @@ -0,0 +1,1426 @@ +This is history.info, produced by makeinfo version 6.8 from +history.texi. + +This document describes the GNU History library (version 8.2, 19 +September 2022), a programming tool that provides a consistent user +interface for recalling lines of previously typed input. + + Copyright (C) 1988-2022 Free Software Foundation, Inc. + + Permission is granted to copy, distribute and/or modify this + document under the terms of the GNU Free Documentation License, + Version 1.3 or any later version published by the Free Software + Foundation; with no Invariant Sections, no Front-Cover Texts, and + no Back-Cover Texts. A copy of the license is included in the + section entitled "GNU Free Documentation License". + +INFO-DIR-SECTION Libraries +START-INFO-DIR-ENTRY +* History: (history). The GNU history library API. +END-INFO-DIR-ENTRY + + +File: history.info, Node: Top, Next: Using History Interactively, Up: (dir) + +GNU History Library +******************* + +This document describes the GNU History library, a programming tool that +provides a consistent user interface for recalling lines of previously +typed input. + +* Menu: + +* Using History Interactively:: GNU History User's Manual. +* Programming with GNU History:: GNU History Programmer's Manual. +* GNU Free Documentation License:: License for copying this manual. +* Concept Index:: Index of concepts described in this manual. +* Function and Variable Index:: Index of externally visible functions + and variables. + + +File: history.info, Node: Using History Interactively, Next: Programming with GNU History, Prev: Top, Up: Top + +1 Using History Interactively +***************************** + +This chapter describes how to use the GNU History Library interactively, +from a user's standpoint. It should be considered a user's guide. For +information on using the GNU History Library in your own programs, *note +Programming with GNU History::. + +* Menu: + +* History Interaction:: What it feels like using History as a user. + + +File: history.info, Node: History Interaction, Up: Using History Interactively + +1.1 History Expansion +===================== + +The History library provides a history expansion feature that is similar +to the history expansion provided by 'csh'. This section describes the +syntax used to manipulate the history information. + + History expansions introduce words from the history list into the +input stream, making it easy to repeat commands, insert the arguments to +a previous command into the current input line, or fix errors in +previous commands quickly. + + History expansion takes place in two parts. The first is to +determine which line from the history list should be used during +substitution. The second is to select portions of that line for +inclusion into the current one. The line selected from the history is +called the "event", and the portions of that line that are acted upon +are called "words". Various "modifiers" are available to manipulate the +selected words. The line is broken into words in the same fashion that +Bash does, so that several words surrounded by quotes are considered one +word. History expansions are introduced by the appearance of the +history expansion character, which is '!' by default. + + History expansion implements shell-like quoting conventions: a +backslash can be used to remove the special handling for the next +character; single quotes enclose verbatim sequences of characters, and +can be used to inhibit history expansion; and characters enclosed within +double quotes may be subject to history expansion, since backslash can +escape the history expansion character, but single quotes may not, since +they are not treated specially within double quotes. + +* Menu: + +* Event Designators:: How to specify which history line to use. +* Word Designators:: Specifying which words are of interest. +* Modifiers:: Modifying the results of substitution. + + +File: history.info, Node: Event Designators, Next: Word Designators, Up: History Interaction + +1.1.1 Event Designators +----------------------- + +An event designator is a reference to a command line entry in the +history list. Unless the reference is absolute, events are relative to +the current position in the history list. + +'!' + Start a history substitution, except when followed by a space, tab, + the end of the line, or '='. + +'!N' + Refer to command line N. + +'!-N' + Refer to the command N lines back. + +'!!' + Refer to the previous command. This is a synonym for '!-1'. + +'!STRING' + Refer to the most recent command preceding the current position in + the history list starting with STRING. + +'!?STRING[?]' + Refer to the most recent command preceding the current position in + the history list containing STRING. The trailing '?' may be + omitted if the STRING is followed immediately by a newline. If + STRING is missing, the string from the most recent search is used; + it is an error if there is no previous search string. + +'^STRING1^STRING2^' + Quick Substitution. Repeat the last command, replacing STRING1 + with STRING2. Equivalent to '!!:s^STRING1^STRING2^'. + +'!#' + The entire command line typed so far. + + +File: history.info, Node: Word Designators, Next: Modifiers, Prev: Event Designators, Up: History Interaction + +1.1.2 Word Designators +---------------------- + +Word designators are used to select desired words from the event. A ':' +separates the event specification from the word designator. It may be +omitted if the word designator begins with a '^', '$', '*', '-', or '%'. +Words are numbered from the beginning of the line, with the first word +being denoted by 0 (zero). Words are inserted into the current line +separated by single spaces. + + For example, + +'!!' + designates the preceding command. When you type this, the + preceding command is repeated in toto. + +'!!:$' + designates the last argument of the preceding command. This may be + shortened to '!$'. + +'!fi:2' + designates the second argument of the most recent command starting + with the letters 'fi'. + + Here are the word designators: + +'0 (zero)' + The '0'th word. For many applications, this is the command word. + +'N' + The Nth word. + +'^' + The first argument; that is, word 1. + +'$' + The last argument. + +'%' + The first word matched by the most recent '?STRING?' search, if the + search string begins with a character that is part of a word. + +'X-Y' + A range of words; '-Y' abbreviates '0-Y'. + +'*' + All of the words, except the '0'th. This is a synonym for '1-$'. + It is not an error to use '*' if there is just one word in the + event; the empty string is returned in that case. + +'X*' + Abbreviates 'X-$' + +'X-' + Abbreviates 'X-$' like 'X*', but omits the last word. If 'x' is + missing, it defaults to 0. + + If a word designator is supplied without an event specification, the +previous command is used as the event. + + +File: history.info, Node: Modifiers, Prev: Word Designators, Up: History Interaction + +1.1.3 Modifiers +--------------- + +After the optional word designator, you can add a sequence of one or +more of the following modifiers, each preceded by a ':'. These modify, +or edit, the word or words selected from the history event. + +'h' + Remove a trailing pathname component, leaving only the head. + +'t' + Remove all leading pathname components, leaving the tail. + +'r' + Remove a trailing suffix of the form '.SUFFIX', leaving the + basename. + +'e' + Remove all but the trailing suffix. + +'p' + Print the new command but do not execute it. + +'s/OLD/NEW/' + Substitute NEW for the first occurrence of OLD in the event line. + Any character may be used as the delimiter in place of '/'. The + delimiter may be quoted in OLD and NEW with a single backslash. If + '&' appears in NEW, it is replaced by OLD. A single backslash will + quote the '&'. If OLD is null, it is set to the last OLD + substituted, or, if no previous history substitutions took place, + the last STRING in a !?STRING'[?]' search. If NEW is null, each + matching OLD is deleted. The final delimiter is optional if it is + the last character on the input line. + +'&' + Repeat the previous substitution. + +'g' +'a' + Cause changes to be applied over the entire event line. Used in + conjunction with 's', as in 'gs/OLD/NEW/', or with '&'. + +'G' + Apply the following 's' or '&' modifier once to each word in the + event. + + +File: history.info, Node: Programming with GNU History, Next: GNU Free Documentation License, Prev: Using History Interactively, Up: Top + +2 Programming with GNU History +****************************** + +This chapter describes how to interface programs that you write with the +GNU History Library. It should be considered a technical guide. For +information on the interactive use of GNU History, *note Using History +Interactively::. + +* Menu: + +* Introduction to History:: What is the GNU History library for? +* History Storage:: How information is stored. +* History Functions:: Functions that you can use. +* History Variables:: Variables that control behaviour. +* History Programming Example:: Example of using the GNU History Library. + + +File: history.info, Node: Introduction to History, Next: History Storage, Up: Programming with GNU History + +2.1 Introduction to History +=========================== + +Many programs read input from the user a line at a time. The GNU +History library is able to keep track of those lines, associate +arbitrary data with each line, and utilize information from previous +lines in composing new ones. + + A programmer using the History library has available functions for +remembering lines on a history list, associating arbitrary data with a +line, removing lines from the list, searching through the list for a +line containing an arbitrary text string, and referencing any line in +the list directly. In addition, a history "expansion" function is +available which provides for a consistent user interface across +different programs. + + The user using programs written with the History library has the +benefit of a consistent user interface with a set of well-known commands +for manipulating the text of previous lines and using that text in new +commands. The basic history manipulation commands are similar to the +history substitution provided by 'csh'. + + The programmer can also use the Readline library, which includes some +history manipulation by default, and has the added advantage of command +line editing. + + Before declaring any functions using any functionality the History +library provides in other code, an application writer should include the +file '' in any file that uses the History library's +features. It supplies extern declarations for all of the library's +public functions and variables, and declares all of the public data +structures. + + +File: history.info, Node: History Storage, Next: History Functions, Prev: Introduction to History, Up: Programming with GNU History + +2.2 History Storage +=================== + +The history list is an array of history entries. A history entry is +declared as follows: + + typedef void *histdata_t; + + typedef struct _hist_entry { + char *line; + char *timestamp; + histdata_t data; + } HIST_ENTRY; + + The history list itself might therefore be declared as + + HIST_ENTRY **the_history_list; + + The state of the History library is encapsulated into a single +structure: + + /* + * A structure used to pass around the current state of the history. + */ + typedef struct _hist_state { + HIST_ENTRY **entries; /* Pointer to the entries themselves. */ + int offset; /* The location pointer within this array. */ + int length; /* Number of elements within this array. */ + int size; /* Number of slots allocated to this array. */ + int flags; + } HISTORY_STATE; + + If the flags member includes 'HS_STIFLED', the history has been +stifled. + + +File: history.info, Node: History Functions, Next: History Variables, Prev: History Storage, Up: Programming with GNU History + +2.3 History Functions +===================== + +This section describes the calling sequence for the various functions +exported by the GNU History library. + +* Menu: + +* Initializing History and State Management:: Functions to call when you + want to use history in a + program. +* History List Management:: Functions used to manage the list + of history entries. +* Information About the History List:: Functions returning information about + the history list. +* Moving Around the History List:: Functions used to change the position + in the history list. +* Searching the History List:: Functions to search the history list + for entries containing a string. +* Managing the History File:: Functions that read and write a file + containing the history list. +* History Expansion:: Functions to perform csh-like history + expansion. + + +File: history.info, Node: Initializing History and State Management, Next: History List Management, Up: History Functions + +2.3.1 Initializing History and State Management +----------------------------------------------- + +This section describes functions used to initialize and manage the state +of the History library when you want to use the history functions in +your program. + + -- Function: void using_history (void) + Begin a session in which the history functions might be used. This + initializes the interactive variables. + + -- Function: HISTORY_STATE * history_get_history_state (void) + Return a structure describing the current state of the input + history. + + -- Function: void history_set_history_state (HISTORY_STATE *state) + Set the state of the history list according to STATE. + + +File: history.info, Node: History List Management, Next: Information About the History List, Prev: Initializing History and State Management, Up: History Functions + +2.3.2 History List Management +----------------------------- + +These functions manage individual entries on the history list, or set +parameters managing the list itself. + + -- Function: void add_history (const char *string) + Place STRING at the end of the history list. The associated data + field (if any) is set to 'NULL'. If the maximum number of history + entries has been set using 'stifle_history()', and the new number + of history entries would exceed that maximum, the oldest history + entry is removed. + + -- Function: void add_history_time (const char *string) + Change the time stamp associated with the most recent history entry + to STRING. + + -- Function: HIST_ENTRY * remove_history (int which) + Remove history entry at offset WHICH from the history. The removed + element is returned so you can free the line, data, and containing + structure. + + -- Function: histdata_t free_history_entry (HIST_ENTRY *histent) + Free the history entry HISTENT and any history library private data + associated with it. Returns the application-specific data so the + caller can dispose of it. + + -- Function: HIST_ENTRY * replace_history_entry (int which, const char + *line, histdata_t data) + Make the history entry at offset WHICH have LINE and DATA. This + returns the old entry so the caller can dispose of any + application-specific data. In the case of an invalid WHICH, a + 'NULL' pointer is returned. + + -- Function: void clear_history (void) + Clear the history list by deleting all the entries. + + -- Function: void stifle_history (int max) + Stifle the history list, remembering only the last MAX entries. + The history list will contain only MAX entries at a time. + + -- Function: int unstifle_history (void) + Stop stifling the history. This returns the previously-set maximum + number of history entries (as set by 'stifle_history()'). The + value is positive if the history was stifled, negative if it + wasn't. + + -- Function: int history_is_stifled (void) + Returns non-zero if the history is stifled, zero if it is not. + + +File: history.info, Node: Information About the History List, Next: Moving Around the History List, Prev: History List Management, Up: History Functions + +2.3.3 Information About the History List +---------------------------------------- + +These functions return information about the entire history list or +individual list entries. + + -- Function: HIST_ENTRY ** history_list (void) + Return a 'NULL' terminated array of 'HIST_ENTRY *' which is the + current input history. Element 0 of this list is the beginning of + time. If there is no history, return 'NULL'. + + -- Function: int where_history (void) + Returns the offset of the current history element. + + -- Function: HIST_ENTRY * current_history (void) + Return the history entry at the current position, as determined by + 'where_history()'. If there is no entry there, return a 'NULL' + pointer. + + -- Function: HIST_ENTRY * history_get (int offset) + Return the history entry at position OFFSET. The range of valid + values of OFFSET starts at 'history_base' and ends at + HISTORY_LENGTH - 1 (*note History Variables::). If there is no + entry there, or if OFFSET is outside the valid range, return a + 'NULL' pointer. + + -- Function: time_t history_get_time (HIST_ENTRY *entry) + Return the time stamp associated with the history entry ENTRY. If + the timestamp is missing or invalid, return 0. + + -- Function: int history_total_bytes (void) + Return the number of bytes that the primary history entries are + using. This function returns the sum of the lengths of all the + lines in the history. + + +File: history.info, Node: Moving Around the History List, Next: Searching the History List, Prev: Information About the History List, Up: History Functions + +2.3.4 Moving Around the History List +------------------------------------ + +These functions allow the current index into the history list to be set +or changed. + + -- Function: int history_set_pos (int pos) + Set the current history offset to POS, an absolute index into the + list. Returns 1 on success, 0 if POS is less than zero or greater + than the number of history entries. + + -- Function: HIST_ENTRY * previous_history (void) + Back up the current history offset to the previous history entry, + and return a pointer to that entry. If there is no previous entry, + return a 'NULL' pointer. + + -- Function: HIST_ENTRY * next_history (void) + If the current history offset refers to a valid history entry, + increment the current history offset. If the possibly-incremented + history offset refers to a valid history entry, return a pointer to + that entry; otherwise, return a 'BNULL' pointer. + + +File: history.info, Node: Searching the History List, Next: Managing the History File, Prev: Moving Around the History List, Up: History Functions + +2.3.5 Searching the History List +-------------------------------- + +These functions allow searching of the history list for entries +containing a specific string. Searching may be performed both forward +and backward from the current history position. The search may be +"anchored", meaning that the string must match at the beginning of the +history entry. + + -- Function: int history_search (const char *string, int direction) + Search the history for STRING, starting at the current history + offset. If DIRECTION is less than 0, then the search is through + previous entries, otherwise through subsequent entries. If STRING + is found, then the current history index is set to that history + entry, and the value returned is the offset in the line of the + entry where STRING was found. Otherwise, nothing is changed, and a + -1 is returned. + + -- Function: int history_search_prefix (const char *string, int + direction) + Search the history for STRING, starting at the current history + offset. The search is anchored: matching lines must begin with + STRING. If DIRECTION is less than 0, then the search is through + previous entries, otherwise through subsequent entries. If STRING + is found, then the current history index is set to that entry, and + the return value is 0. Otherwise, nothing is changed, and a -1 is + returned. + + -- Function: int history_search_pos (const char *string, int direction, + int pos) + Search for STRING in the history list, starting at POS, an absolute + index into the list. If DIRECTION is negative, the search proceeds + backward from POS, otherwise forward. Returns the absolute index + of the history element where STRING was found, or -1 otherwise. + + +File: history.info, Node: Managing the History File, Next: History Expansion, Prev: Searching the History List, Up: History Functions + +2.3.6 Managing the History File +------------------------------- + +The History library can read the history from and write it to a file. +This section documents the functions for managing a history file. + + -- Function: int read_history (const char *filename) + Add the contents of FILENAME to the history list, a line at a time. + If FILENAME is 'NULL', then read from '~/.history'. Returns 0 if + successful, or 'errno' if not. + + -- Function: int read_history_range (const char *filename, int from, + int to) + Read a range of lines from FILENAME, adding them to the history + list. Start reading at line FROM and end at TO. If FROM is zero, + start at the beginning. If TO is less than FROM, then read until + the end of the file. If FILENAME is 'NULL', then read from + '~/.history'. Returns 0 if successful, or 'errno' if not. + + -- Function: int write_history (const char *filename) + Write the current history to FILENAME, overwriting FILENAME if + necessary. If FILENAME is 'NULL', then write the history list to + '~/.history'. Returns 0 on success, or 'errno' on a read or write + error. + + -- Function: int append_history (int nelements, const char *filename) + Append the last NELEMENTS of the history list to FILENAME. If + FILENAME is 'NULL', then append to '~/.history'. Returns 0 on + success, or 'errno' on a read or write error. + + -- Function: int history_truncate_file (const char *filename, int + nlines) + Truncate the history file FILENAME, leaving only the last NLINES + lines. If FILENAME is 'NULL', then '~/.history' is truncated. + Returns 0 on success, or 'errno' on failure. + + +File: history.info, Node: History Expansion, Prev: Managing the History File, Up: History Functions + +2.3.7 History Expansion +----------------------- + +These functions implement history expansion. + + -- Function: int history_expand (char *string, char **output) + Expand STRING, placing the result into OUTPUT, a pointer to a + string (*note History Interaction::). Returns: + '0' + If no expansions took place (or, if the only change in the + text was the removal of escape characters preceding the + history expansion character); + '1' + if expansions did take place; + '-1' + if there was an error in expansion; + '2' + if the returned line should be displayed, but not executed, as + with the ':p' modifier (*note Modifiers::). + + If an error occurred in expansion, then OUTPUT contains a + descriptive error message. + + -- Function: char * get_history_event (const char *string, int *cindex, + int qchar) + Returns the text of the history event beginning at STRING + + *CINDEX. *CINDEX is modified to point to after the event + specifier. At function entry, CINDEX points to the index into + STRING where the history event specification begins. QCHAR is a + character that is allowed to end the event specification in + addition to the "normal" terminating characters. + + -- Function: char ** history_tokenize (const char *string) + Return an array of tokens parsed out of STRING, much as the shell + might. The tokens are split on the characters in the + HISTORY_WORD_DELIMITERS variable, and shell quoting conventions are + obeyed as described below. + + -- Function: char * history_arg_extract (int first, int last, const + char *string) + Extract a string segment consisting of the FIRST through LAST + arguments present in STRING. Arguments are split using + 'history_tokenize'. + + +File: history.info, Node: History Variables, Next: History Programming Example, Prev: History Functions, Up: Programming with GNU History + +2.4 History Variables +===================== + +This section describes the externally-visible variables exported by the +GNU History Library. + + -- Variable: int history_base + The logical offset of the first entry in the history list. + + -- Variable: int history_length + The number of entries currently stored in the history list. + + -- Variable: int history_max_entries + The maximum number of history entries. This must be changed using + 'stifle_history()'. + + -- Variable: int history_write_timestamps + If non-zero, timestamps are written to the history file, so they + can be preserved between sessions. The default value is 0, meaning + that timestamps are not saved. + + The current timestamp format uses the value of HISTORY_COMMENT_CHAR + to delimit timestamp entries in the history file. If that variable + does not have a value (the default), timestamps will not be + written. + + -- Variable: char history_expansion_char + The character that introduces a history event. The default is '!'. + Setting this to 0 inhibits history expansion. + + -- Variable: char history_subst_char + The character that invokes word substitution if found at the start + of a line. The default is '^'. + + -- Variable: char history_comment_char + During tokenization, if this character is seen as the first + character of a word, then it and all subsequent characters up to a + newline are ignored, suppressing history expansion for the + remainder of the line. This is disabled by default. + + -- Variable: char * history_word_delimiters + The characters that separate tokens for 'history_tokenize()'. The + default value is '" \t\n()<>;&|"'. + + -- Variable: char * history_search_delimiter_chars + The list of additional characters which can delimit a history + search string, in addition to space, TAB, ':' and '?' in the case + of a substring search. The default is empty. + + -- Variable: char * history_no_expand_chars + The list of characters which inhibit history expansion if found + immediately following HISTORY_EXPANSION_CHAR. The default is + space, tab, newline, carriage return, and '='. + + -- Variable: int history_quotes_inhibit_expansion + If non-zero, the history expansion code implements shell-like + quoting: single-quoted words are not scanned for the history + expansion character or the history comment character, and + double-quoted words may have history expansion performed, since + single quotes are not special within double quotes. The default + value is 0. + + -- Variable: int history_quoting_state + An application may set this variable to indicate that the current + line being expanded is subject to existing quoting. If set to ''', + the history expansion function will assume that the line is + single-quoted and inhibit expansion until it reads an unquoted + closing single quote; if set to '"', history expansion will assume + the line is double quoted until it reads an unquoted closing double + quote. If set to zero, the default, the history expansion function + will assume the line is not quoted and treat quote characters + within the line as described above. This is only effective if + HISTORY_QUOTES_INHIBIT_EXPANSION is set. + + -- Variable: rl_linebuf_func_t * history_inhibit_expansion_function + This should be set to the address of a function that takes two + arguments: a 'char *' (STRING) and an 'int' index into that string + (I). It should return a non-zero value if the history expansion + starting at STRING[I] should not be performed; zero if the + expansion should be done. It is intended for use by applications + like Bash that use the history expansion character for additional + purposes. By default, this variable is set to 'NULL'. + + +File: history.info, Node: History Programming Example, Prev: History Variables, Up: Programming with GNU History + +2.5 History Programming Example +=============================== + +The following program demonstrates simple use of the GNU History +Library. + + #include + #include + + main (argc, argv) + int argc; + char **argv; + { + char line[1024], *t; + int len, done = 0; + + line[0] = 0; + + using_history (); + while (!done) + { + printf ("history$ "); + fflush (stdout); + t = fgets (line, sizeof (line) - 1, stdin); + if (t && *t) + { + len = strlen (t); + if (t[len - 1] == '\n') + t[len - 1] = '\0'; + } + + if (!t) + strcpy (line, "quit"); + + if (line[0]) + { + char *expansion; + int result; + + result = history_expand (line, &expansion); + if (result) + fprintf (stderr, "%s\n", expansion); + + if (result < 0 || result == 2) + { + free (expansion); + continue; + } + + add_history (expansion); + strncpy (line, expansion, sizeof (line) - 1); + free (expansion); + } + + if (strcmp (line, "quit") == 0) + done = 1; + else if (strcmp (line, "save") == 0) + write_history ("history_file"); + else if (strcmp (line, "read") == 0) + read_history ("history_file"); + else if (strcmp (line, "list") == 0) + { + register HIST_ENTRY **the_list; + register int i; + + the_list = history_list (); + if (the_list) + for (i = 0; the_list[i]; i++) + printf ("%d: %s\n", i + history_base, the_list[i]->line); + } + else if (strncmp (line, "delete", 6) == 0) + { + int which; + if ((sscanf (line + 6, "%d", &which)) == 1) + { + HIST_ENTRY *entry = remove_history (which); + if (!entry) + fprintf (stderr, "No such entry %d\n", which); + else + { + free (entry->line); + free (entry); + } + } + else + { + fprintf (stderr, "non-numeric arg given to `delete'\n"); + } + } + } + } + + +File: history.info, Node: GNU Free Documentation License, Next: Concept Index, Prev: Programming with GNU History, Up: Top + +Appendix A GNU Free Documentation License +***************************************** + + Version 1.3, 3 November 2008 + + Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. + + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + 0. PREAMBLE + + The purpose of this License is to make a manual, textbook, or other + functional and useful document "free" in the sense of freedom: to + assure everyone the effective freedom to copy and redistribute it, + with or without modifying it, either commercially or + noncommercially. Secondarily, this License preserves for the + author and publisher a way to get credit for their work, while not + being considered responsible for modifications made by others. + + This License is a kind of "copyleft", which means that derivative + works of the document must themselves be free in the same sense. + It complements the GNU General Public License, which is a copyleft + license designed for free software. + + We have designed this License in order to use it for manuals for + free software, because free software needs free documentation: a + free program should come with manuals providing the same freedoms + that the software does. But this License is not limited to + software manuals; it can be used for any textual work, regardless + of subject matter or whether it is published as a printed book. We + recommend this License principally for works whose purpose is + instruction or reference. + + 1. APPLICABILITY AND DEFINITIONS + + This License applies to any manual or other work, in any medium, + that contains a notice placed by the copyright holder saying it can + be distributed under the terms of this License. Such a notice + grants a world-wide, royalty-free license, unlimited in duration, + to use that work under the conditions stated herein. The + "Document", below, refers to any such manual or work. Any member + of the public is a licensee, and is addressed as "you". You accept + the license if you copy, modify or distribute the work in a way + requiring permission under copyright law. + + A "Modified Version" of the Document means any work containing the + Document or a portion of it, either copied verbatim, or with + modifications and/or translated into another language. + + A "Secondary Section" is a named appendix or a front-matter section + of the Document that deals exclusively with the relationship of the + publishers or authors of the Document to the Document's overall + subject (or to related matters) and contains nothing that could + fall directly within that overall subject. (Thus, if the Document + is in part a textbook of mathematics, a Secondary Section may not + explain any mathematics.) The relationship could be a matter of + historical connection with the subject or with related matters, or + of legal, commercial, philosophical, ethical or political position + regarding them. + + The "Invariant Sections" are certain Secondary Sections whose + titles are designated, as being those of Invariant Sections, in the + notice that says that the Document is released under this License. + If a section does not fit the above definition of Secondary then it + is not allowed to be designated as Invariant. The Document may + contain zero Invariant Sections. If the Document does not identify + any Invariant Sections then there are none. + + The "Cover Texts" are certain short passages of text that are + listed, as Front-Cover Texts or Back-Cover Texts, in the notice + that says that the Document is released under this License. A + Front-Cover Text may be at most 5 words, and a Back-Cover Text may + be at most 25 words. + + A "Transparent" copy of the Document means a machine-readable copy, + represented in a format whose specification is available to the + general public, that is suitable for revising the document + straightforwardly with generic text editors or (for images composed + of pixels) generic paint programs or (for drawings) some widely + available drawing editor, and that is suitable for input to text + formatters or for automatic translation to a variety of formats + suitable for input to text formatters. A copy made in an otherwise + Transparent file format whose markup, or absence of markup, has + been arranged to thwart or discourage subsequent modification by + readers is not Transparent. An image format is not Transparent if + used for any substantial amount of text. A copy that is not + "Transparent" is called "Opaque". + + Examples of suitable formats for Transparent copies include plain + ASCII without markup, Texinfo input format, LaTeX input format, + SGML or XML using a publicly available DTD, and standard-conforming + simple HTML, PostScript or PDF designed for human modification. + Examples of transparent image formats include PNG, XCF and JPG. + Opaque formats include proprietary formats that can be read and + edited only by proprietary word processors, SGML or XML for which + the DTD and/or processing tools are not generally available, and + the machine-generated HTML, PostScript or PDF produced by some word + processors for output purposes only. + + The "Title Page" means, for a printed book, the title page itself, + plus such following pages as are needed to hold, legibly, the + material this License requires to appear in the title page. For + works in formats which do not have any title page as such, "Title + Page" means the text near the most prominent appearance of the + work's title, preceding the beginning of the body of the text. + + The "publisher" means any person or entity that distributes copies + of the Document to the public. + + A section "Entitled XYZ" means a named subunit of the Document + whose title either is precisely XYZ or contains XYZ in parentheses + following text that translates XYZ in another language. (Here XYZ + stands for a specific section name mentioned below, such as + "Acknowledgements", "Dedications", "Endorsements", or "History".) + To "Preserve the Title" of such a section when you modify the + Document means that it remains a section "Entitled XYZ" according + to this definition. + + The Document may include Warranty Disclaimers next to the notice + which states that this License applies to the Document. These + Warranty Disclaimers are considered to be included by reference in + this License, but only as regards disclaiming warranties: any other + implication that these Warranty Disclaimers may have is void and + has no effect on the meaning of this License. + + 2. VERBATIM COPYING + + You may copy and distribute the Document in any medium, either + commercially or noncommercially, provided that this License, the + copyright notices, and the license notice saying this License + applies to the Document are reproduced in all copies, and that you + add no other conditions whatsoever to those of this License. You + may not use technical measures to obstruct or control the reading + or further copying of the copies you make or distribute. However, + you may accept compensation in exchange for copies. If you + distribute a large enough number of copies you must also follow the + conditions in section 3. + + You may also lend copies, under the same conditions stated above, + and you may publicly display copies. + + 3. COPYING IN QUANTITY + + If you publish printed copies (or copies in media that commonly + have printed covers) of the Document, numbering more than 100, and + the Document's license notice requires Cover Texts, you must + enclose the copies in covers that carry, clearly and legibly, all + these Cover Texts: Front-Cover Texts on the front cover, and + Back-Cover Texts on the back cover. Both covers must also clearly + and legibly identify you as the publisher of these copies. The + front cover must present the full title with all words of the title + equally prominent and visible. You may add other material on the + covers in addition. Copying with changes limited to the covers, as + long as they preserve the title of the Document and satisfy these + conditions, can be treated as verbatim copying in other respects. + + If the required texts for either cover are too voluminous to fit + legibly, you should put the first ones listed (as many as fit + reasonably) on the actual cover, and continue the rest onto + adjacent pages. + + If you publish or distribute Opaque copies of the Document + numbering more than 100, you must either include a machine-readable + Transparent copy along with each Opaque copy, or state in or with + each Opaque copy a computer-network location from which the general + network-using public has access to download using public-standard + network protocols a complete Transparent copy of the Document, free + of added material. If you use the latter option, you must take + reasonably prudent steps, when you begin distribution of Opaque + copies in quantity, to ensure that this Transparent copy will + remain thus accessible at the stated location until at least one + year after the last time you distribute an Opaque copy (directly or + through your agents or retailers) of that edition to the public. + + It is requested, but not required, that you contact the authors of + the Document well before redistributing any large number of copies, + to give them a chance to provide you with an updated version of the + Document. + + 4. MODIFICATIONS + + You may copy and distribute a Modified Version of the Document + under the conditions of sections 2 and 3 above, provided that you + release the Modified Version under precisely this License, with the + Modified Version filling the role of the Document, thus licensing + distribution and modification of the Modified Version to whoever + possesses a copy of it. In addition, you must do these things in + the Modified Version: + + A. Use in the Title Page (and on the covers, if any) a title + distinct from that of the Document, and from those of previous + versions (which should, if there were any, be listed in the + History section of the Document). You may use the same title + as a previous version if the original publisher of that + version gives permission. + + B. List on the Title Page, as authors, one or more persons or + entities responsible for authorship of the modifications in + the Modified Version, together with at least five of the + principal authors of the Document (all of its principal + authors, if it has fewer than five), unless they release you + from this requirement. + + C. State on the Title page the name of the publisher of the + Modified Version, as the publisher. + + D. Preserve all the copyright notices of the Document. + + E. Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + + F. Include, immediately after the copyright notices, a license + notice giving the public permission to use the Modified + Version under the terms of this License, in the form shown in + the Addendum below. + + G. Preserve in that license notice the full lists of Invariant + Sections and required Cover Texts given in the Document's + license notice. + + H. Include an unaltered copy of this License. + + I. Preserve the section Entitled "History", Preserve its Title, + and add to it an item stating at least the title, year, new + authors, and publisher of the Modified Version as given on the + Title Page. If there is no section Entitled "History" in the + Document, create one stating the title, year, authors, and + publisher of the Document as given on its Title Page, then add + an item describing the Modified Version as stated in the + previous sentence. + + J. Preserve the network location, if any, given in the Document + for public access to a Transparent copy of the Document, and + likewise the network locations given in the Document for + previous versions it was based on. These may be placed in the + "History" section. You may omit a network location for a work + that was published at least four years before the Document + itself, or if the original publisher of the version it refers + to gives permission. + + K. For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section + all the substance and tone of each of the contributor + acknowledgements and/or dedications given therein. + + L. Preserve all the Invariant Sections of the Document, unaltered + in their text and in their titles. Section numbers or the + equivalent are not considered part of the section titles. + + M. Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + + N. Do not retitle any existing section to be Entitled + "Endorsements" or to conflict in title with any Invariant + Section. + + O. Preserve any Warranty Disclaimers. + + If the Modified Version includes new front-matter sections or + appendices that qualify as Secondary Sections and contain no + material copied from the Document, you may at your option designate + some or all of these sections as invariant. To do this, add their + titles to the list of Invariant Sections in the Modified Version's + license notice. These titles must be distinct from any other + section titles. + + You may add a section Entitled "Endorsements", provided it contains + nothing but endorsements of your Modified Version by various + parties--for example, statements of peer review or that the text + has been approved by an organization as the authoritative + definition of a standard. + + You may add a passage of up to five words as a Front-Cover Text, + and a passage of up to 25 words as a Back-Cover Text, to the end of + the list of Cover Texts in the Modified Version. Only one passage + of Front-Cover Text and one of Back-Cover Text may be added by (or + through arrangements made by) any one entity. If the Document + already includes a cover text for the same cover, previously added + by you or by arrangement made by the same entity you are acting on + behalf of, you may not add another; but you may replace the old + one, on explicit permission from the previous publisher that added + the old one. + + The author(s) and publisher(s) of the Document do not by this + License give permission to use their names for publicity for or to + assert or imply endorsement of any Modified Version. + + 5. COMBINING DOCUMENTS + + You may combine the Document with other documents released under + this License, under the terms defined in section 4 above for + modified versions, provided that you include in the combination all + of the Invariant Sections of all of the original documents, + unmodified, and list them all as Invariant Sections of your + combined work in its license notice, and that you preserve all + their Warranty Disclaimers. + + The combined work need only contain one copy of this License, and + multiple identical Invariant Sections may be replaced with a single + copy. If there are multiple Invariant Sections with the same name + but different contents, make the title of each such section unique + by adding at the end of it, in parentheses, the name of the + original author or publisher of that section if known, or else a + unique number. Make the same adjustment to the section titles in + the list of Invariant Sections in the license notice of the + combined work. + + In the combination, you must combine any sections Entitled + "History" in the various original documents, forming one section + Entitled "History"; likewise combine any sections Entitled + "Acknowledgements", and any sections Entitled "Dedications". You + must delete all sections Entitled "Endorsements." + + 6. COLLECTIONS OF DOCUMENTS + + You may make a collection consisting of the Document and other + documents released under this License, and replace the individual + copies of this License in the various documents with a single copy + that is included in the collection, provided that you follow the + rules of this License for verbatim copying of each of the documents + in all other respects. + + You may extract a single document from such a collection, and + distribute it individually under this License, provided you insert + a copy of this License into the extracted document, and follow this + License in all other respects regarding verbatim copying of that + document. + + 7. AGGREGATION WITH INDEPENDENT WORKS + + A compilation of the Document or its derivatives with other + separate and independent documents or works, in or on a volume of a + storage or distribution medium, is called an "aggregate" if the + copyright resulting from the compilation is not used to limit the + legal rights of the compilation's users beyond what the individual + works permit. When the Document is included in an aggregate, this + License does not apply to the other works in the aggregate which + are not themselves derivative works of the Document. + + If the Cover Text requirement of section 3 is applicable to these + copies of the Document, then if the Document is less than one half + of the entire aggregate, the Document's Cover Texts may be placed + on covers that bracket the Document within the aggregate, or the + electronic equivalent of covers if the Document is in electronic + form. Otherwise they must appear on printed covers that bracket + the whole aggregate. + + 8. TRANSLATION + + Translation is considered a kind of modification, so you may + distribute translations of the Document under the terms of section + 4. Replacing Invariant Sections with translations requires special + permission from their copyright holders, but you may include + translations of some or all Invariant Sections in addition to the + original versions of these Invariant Sections. You may include a + translation of this License, and all the license notices in the + Document, and any Warranty Disclaimers, provided that you also + include the original English version of this License and the + original versions of those notices and disclaimers. In case of a + disagreement between the translation and the original version of + this License or a notice or disclaimer, the original version will + prevail. + + If a section in the Document is Entitled "Acknowledgements", + "Dedications", or "History", the requirement (section 4) to + Preserve its Title (section 1) will typically require changing the + actual title. + + 9. TERMINATION + + You may not copy, modify, sublicense, or distribute the Document + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense, or distribute it is void, + and will automatically terminate your rights under this License. + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the + copyright holder fails to notify you of the violation by some + reasonable means prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from + that copyright holder, and you cure the violation prior to 30 days + after your receipt of the notice. + + Termination of your rights under this section does not terminate + the licenses of parties who have received copies or rights from you + under this License. If your rights have been terminated and not + permanently reinstated, receipt of a copy of some or all of the + same material does not give you any rights to use it. + + 10. FUTURE REVISIONS OF THIS LICENSE + + The Free Software Foundation may publish new, revised versions of + the GNU Free Documentation License from time to time. Such new + versions will be similar in spirit to the present version, but may + differ in detail to address new problems or concerns. See + . + + Each version of the License is given a distinguishing version + number. If the Document specifies that a particular numbered + version of this License "or any later version" applies to it, you + have the option of following the terms and conditions either of + that specified version or of any later version that has been + published (not as a draft) by the Free Software Foundation. If the + Document does not specify a version number of this License, you may + choose any version ever published (not as a draft) by the Free + Software Foundation. If the Document specifies that a proxy can + decide which future versions of this License can be used, that + proxy's public statement of acceptance of a version permanently + authorizes you to choose that version for the Document. + + 11. RELICENSING + + "Massive Multiauthor Collaboration Site" (or "MMC Site") means any + World Wide Web server that publishes copyrightable works and also + provides prominent facilities for anybody to edit those works. A + public wiki that anybody can edit is an example of such a server. + A "Massive Multiauthor Collaboration" (or "MMC") contained in the + site means any set of copyrightable works thus published on the MMC + site. + + "CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0 + license published by Creative Commons Corporation, a not-for-profit + corporation with a principal place of business in San Francisco, + California, as well as future copyleft versions of that license + published by that same organization. + + "Incorporate" means to publish or republish a Document, in whole or + in part, as part of another Document. + + An MMC is "eligible for relicensing" if it is licensed under this + License, and if all works that were first published under this + License somewhere other than this MMC, and subsequently + incorporated in whole or in part into the MMC, (1) had no cover + texts or invariant sections, and (2) were thus incorporated prior + to November 1, 2008. + + The operator of an MMC Site may republish an MMC contained in the + site under CC-BY-SA on the same site at any time before August 1, + 2009, provided the MMC is eligible for relicensing. + +ADDENDUM: How to use this License for your documents +==================================================== + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and license +notices just after the title page: + + Copyright (C) YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.3 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover + Texts. A copy of the license is included in the section entitled ``GNU + Free Documentation License''. + + If you have Invariant Sections, Front-Cover Texts and Back-Cover +Texts, replace the "with...Texts." line with this: + + with the Invariant Sections being LIST THEIR TITLES, with + the Front-Cover Texts being LIST, and with the Back-Cover Texts + being LIST. + + If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + + If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of free +software license, such as the GNU General Public License, to permit +their use in free software. + + +File: history.info, Node: Concept Index, Next: Function and Variable Index, Prev: GNU Free Documentation License, Up: Top + +Appendix B Concept Index +************************ + +[index] +* Menu: + +* anchored search: Searching the History List. + (line 10) +* event designators: Event Designators. (line 6) +* history events: Event Designators. (line 8) +* history expansion: History Interaction. (line 6) +* History Searching: Searching the History List. + (line 6) + + +File: history.info, Node: Function and Variable Index, Prev: Concept Index, Up: Top + +Appendix C Function and Variable Index +************************************** + +[index] +* Menu: + +* add_history: History List Management. + (line 9) +* add_history_time: History List Management. + (line 16) +* append_history: Managing the History File. + (line 28) +* clear_history: History List Management. + (line 37) +* current_history: Information About the History List. + (line 17) +* free_history_entry: History List Management. + (line 25) +* get_history_event: History Expansion. (line 26) +* history_arg_extract: History Expansion. (line 41) +* history_base: History Variables. (line 9) +* history_comment_char: History Variables. (line 37) +* history_expand: History Expansion. (line 8) +* history_expansion_char: History Variables. (line 29) +* history_get: Information About the History List. + (line 22) +* history_get_history_state: Initializing History and State Management. + (line 14) +* history_get_time: Information About the History List. + (line 29) +* history_inhibit_expansion_function: History Variables. (line 77) +* history_is_stifled: History List Management. + (line 50) +* history_length: History Variables. (line 12) +* history_list: Information About the History List. + (line 9) +* history_max_entries: History Variables. (line 15) +* history_no_expand_chars: History Variables. (line 52) +* history_quotes_inhibit_expansion: History Variables. (line 57) +* history_quoting_state: History Variables. (line 65) +* history_search: Searching the History List. + (line 12) +* history_search_delimiter_chars: History Variables. (line 47) +* history_search_pos: Searching the History List. + (line 31) +* history_search_prefix: Searching the History List. + (line 21) +* history_set_history_state: Initializing History and State Management. + (line 18) +* history_set_pos: Moving Around the History List. + (line 9) +* history_subst_char: History Variables. (line 33) +* history_tokenize: History Expansion. (line 35) +* history_total_bytes: Information About the History List. + (line 33) +* history_truncate_file: Managing the History File. + (line 33) +* history_word_delimiters: History Variables. (line 43) +* history_write_timestamps: History Variables. (line 19) +* next_history: Moving Around the History List. + (line 19) +* previous_history: Moving Around the History List. + (line 14) +* read_history: Managing the History File. + (line 9) +* read_history_range: Managing the History File. + (line 14) +* remove_history: History List Management. + (line 20) +* replace_history_entry: History List Management. + (line 30) +* stifle_history: History List Management. + (line 40) +* unstifle_history: History List Management. + (line 44) +* using_history: Initializing History and State Management. + (line 10) +* where_history: Information About the History List. + (line 14) +* write_history: Managing the History File. + (line 22) + + + +Tag Table: +Node: Top850 +Node: Using History Interactively1495 +Node: History Interaction2003 +Node: Event Designators3901 +Node: Word Designators5175 +Node: Modifiers6935 +Node: Programming with GNU History8477 +Node: Introduction to History9221 +Node: History Storage10899 +Node: History Functions12034 +Node: Initializing History and State Management13023 +Node: History List Management13835 +Node: Information About the History List16129 +Node: Moving Around the History List17743 +Node: Searching the History List18836 +Node: Managing the History File20761 +Node: History Expansion22581 +Node: History Variables24510 +Node: History Programming Example28490 +Node: GNU Free Documentation License31167 +Node: Concept Index56339 +Node: Function and Variable Index57044 + +End Tag Table + + +Local Variables: +coding: utf-8 +End: diff --git a/llava_next/share/info/libgomp.info b/llava_next/share/info/libgomp.info new file mode 100644 index 0000000000000000000000000000000000000000..661b7438af879af3619034ce01ae6b17bc9eeda3 --- /dev/null +++ b/llava_next/share/info/libgomp.info @@ -0,0 +1,5266 @@ +This is libgomp.info, produced by makeinfo version 6.8 from +libgomp.texi. + +Copyright (C) 2006-2021 Free Software Foundation, Inc. + + Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with the +Invariant Sections being "Funding Free Software", the Front-Cover texts +being (a) (see below), and with the Back-Cover Texts being (b) (see +below). A copy of the license is included in the section entitled "GNU +Free Documentation License". + + (a) The FSF's Front-Cover Text is: + + A GNU Manual + + (b) The FSF's Back-Cover Text is: + + You have freedom to copy and modify this GNU Manual, like GNU +software. Copies published by the Free Software Foundation raise funds +for GNU development. +INFO-DIR-SECTION GNU Libraries +START-INFO-DIR-ENTRY +* libgomp: (libgomp). GNU Offloading and Multi Processing Runtime Library. +END-INFO-DIR-ENTRY + + This manual documents libgomp, the GNU Offloading and Multi +Processing Runtime library. This is the GNU implementation of the +OpenMP and OpenACC APIs for parallel and accelerator programming in +C/C++ and Fortran. + + Published by the Free Software Foundation 51 Franklin Street, Fifth +Floor Boston, MA 02110-1301 USA + + Copyright (C) 2006-2021 Free Software Foundation, Inc. + + Permission is granted to copy, distribute and/or modify this document +under the terms of the GNU Free Documentation License, Version 1.3 or +any later version published by the Free Software Foundation; with the +Invariant Sections being "Funding Free Software", the Front-Cover texts +being (a) (see below), and with the Back-Cover Texts being (b) (see +below). A copy of the license is included in the section entitled "GNU +Free Documentation License". + + (a) The FSF's Front-Cover Text is: + + A GNU Manual + + (b) The FSF's Back-Cover Text is: + + You have freedom to copy and modify this GNU Manual, like GNU +software. Copies published by the Free Software Foundation raise funds +for GNU development. + + +File: libgomp.info, Node: Top, Next: Enabling OpenMP, Up: (dir) + +Introduction +************ + +This manual documents the usage of libgomp, the GNU Offloading and Multi +Processing Runtime Library. This includes the GNU implementation of the +OpenMP (https://www.openmp.org) Application Programming Interface (API) +for multi-platform shared-memory parallel programming in C/C++ and +Fortran, and the GNU implementation of the OpenACC +(https://www.openacc.org) Application Programming Interface (API) for +offloading of code to accelerator devices in C/C++ and Fortran. + + Originally, libgomp implemented the GNU OpenMP Runtime Library. +Based on this, support for OpenACC and offloading (both OpenACC and +OpenMP 4's target construct) has been added later on, and the library's +name changed to GNU Offloading and Multi Processing Runtime Library. + +* Menu: + +* Enabling OpenMP:: How to enable OpenMP for your applications. +* OpenMP Runtime Library Routines: Runtime Library Routines. + The OpenMP runtime application programming + interface. +* OpenMP Environment Variables: Environment Variables. + Influencing OpenMP runtime behavior with + environment variables. +* Enabling OpenACC:: How to enable OpenACC for your + applications. +* OpenACC Runtime Library Routines:: The OpenACC runtime application + programming interface. +* OpenACC Environment Variables:: Influencing OpenACC runtime behavior with + environment variables. +* CUDA Streams Usage:: Notes on the implementation of + asynchronous operations. +* OpenACC Library Interoperability:: OpenACC library interoperability with the + NVIDIA CUBLAS library. +* OpenACC Profiling Interface:: +* The libgomp ABI:: Notes on the external ABI presented by libgomp. +* Reporting Bugs:: How to report bugs in the GNU Offloading and + Multi Processing Runtime Library. +* Copying:: GNU general public license says + how you can copy and share libgomp. +* GNU Free Documentation License:: + How you can copy and share this manual. +* Funding:: How to help assure continued work for free + software. +* Library Index:: Index of this documentation. + + +File: libgomp.info, Node: Enabling OpenMP, Next: Runtime Library Routines, Up: Top + +1 Enabling OpenMP +***************** + +To activate the OpenMP extensions for C/C++ and Fortran, the +compile-time flag '-fopenmp' must be specified. This enables the OpenMP +directive '#pragma omp' in C/C++ and '!$omp' directives in free form, +'c$omp', '*$omp' and '!$omp' directives in fixed form, '!$' conditional +compilation sentinels in free form and 'c$', '*$' and '!$' sentinels in +fixed form, for Fortran. The flag also arranges for automatic linking +of the OpenMP runtime library (*note Runtime Library Routines::). + + A complete description of all OpenMP directives accepted may be found +in the OpenMP Application Program Interface (https://www.openmp.org) +manual, version 4.5. + + +File: libgomp.info, Node: Runtime Library Routines, Next: Environment Variables, Prev: Enabling OpenMP, Up: Top + +2 OpenMP Runtime Library Routines +********************************* + +The runtime routines described here are defined by Section 3 of the +OpenMP specification in version 4.5. The routines are structured in +following three parts: + +* Menu: + +Control threads, processors and the parallel environment. They have C +linkage, and do not throw exceptions. + +* omp_get_active_level:: Number of active parallel regions +* omp_get_ancestor_thread_num:: Ancestor thread ID +* omp_get_cancellation:: Whether cancellation support is enabled +* omp_get_default_device:: Get the default device for target regions +* omp_get_dynamic:: Dynamic teams setting +* omp_get_initial_device:: Device number of host device +* omp_get_level:: Number of parallel regions +* omp_get_max_active_levels:: Current maximum number of active regions +* omp_get_max_task_priority:: Maximum task priority value that can be set +* omp_get_max_threads:: Maximum number of threads of parallel region +* omp_get_nested:: Nested parallel regions +* omp_get_num_devices:: Number of target devices +* omp_get_num_procs:: Number of processors online +* omp_get_num_teams:: Number of teams +* omp_get_num_threads:: Size of the active team +* omp_get_proc_bind:: Whether theads may be moved between CPUs +* omp_get_schedule:: Obtain the runtime scheduling method +* omp_get_supported_active_levels:: Maximum number of active regions supported +* omp_get_team_num:: Get team number +* omp_get_team_size:: Number of threads in a team +* omp_get_thread_limit:: Maximum number of threads +* omp_get_thread_num:: Current thread ID +* omp_in_parallel:: Whether a parallel region is active +* omp_in_final:: Whether in final or included task region +* omp_is_initial_device:: Whether executing on the host device +* omp_set_default_device:: Set the default device for target regions +* omp_set_dynamic:: Enable/disable dynamic teams +* omp_set_max_active_levels:: Limits the number of active parallel regions +* omp_set_nested:: Enable/disable nested parallel regions +* omp_set_num_threads:: Set upper team size limit +* omp_set_schedule:: Set the runtime scheduling method + +Initialize, set, test, unset and destroy simple and nested locks. + +* omp_init_lock:: Initialize simple lock +* omp_set_lock:: Wait for and set simple lock +* omp_test_lock:: Test and set simple lock if available +* omp_unset_lock:: Unset simple lock +* omp_destroy_lock:: Destroy simple lock +* omp_init_nest_lock:: Initialize nested lock +* omp_set_nest_lock:: Wait for and set simple lock +* omp_test_nest_lock:: Test and set nested lock if available +* omp_unset_nest_lock:: Unset nested lock +* omp_destroy_nest_lock:: Destroy nested lock + +Portable, thread-based, wall clock timer. + +* omp_get_wtick:: Get timer precision. +* omp_get_wtime:: Elapsed wall clock time. + +Support for event objects. + +* omp_fulfill_event:: Fulfill and destroy an OpenMP event. + + +File: libgomp.info, Node: omp_get_active_level, Next: omp_get_ancestor_thread_num, Up: Runtime Library Routines + +2.1 'omp_get_active_level' - Number of parallel regions +======================================================= + +_Description_: + This function returns the nesting level for the active parallel + blocks, which enclose the calling call. + +_C/C++_ + _Prototype_: 'int omp_get_active_level(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_active_level()' + +_See also_: + *note omp_get_level::, *note omp_get_max_active_levels::, *note + omp_set_max_active_levels:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.20. + + +File: libgomp.info, Node: omp_get_ancestor_thread_num, Next: omp_get_cancellation, Prev: omp_get_active_level, Up: Runtime Library Routines + +2.2 'omp_get_ancestor_thread_num' - Ancestor thread ID +====================================================== + +_Description_: + This function returns the thread identification number for the + given nesting level of the current thread. For values of LEVEL + outside zero to 'omp_get_level' -1 is returned; if LEVEL is + 'omp_get_level' the result is identical to 'omp_get_thread_num'. + +_C/C++_ + _Prototype_: 'int omp_get_ancestor_thread_num(int level);' + +_Fortran_: + _Interface_: 'integer function omp_get_ancestor_thread_num(level)' + 'integer level' + +_See also_: + *note omp_get_level::, *note omp_get_thread_num::, *note + omp_get_team_size:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.18. + + +File: libgomp.info, Node: omp_get_cancellation, Next: omp_get_default_device, Prev: omp_get_ancestor_thread_num, Up: Runtime Library Routines + +2.3 'omp_get_cancellation' - Whether cancellation support is enabled +==================================================================== + +_Description_: + This function returns 'true' if cancellation is activated, 'false' + otherwise. Here, 'true' and 'false' represent their + language-specific counterparts. Unless 'OMP_CANCELLATION' is set + true, cancellations are deactivated. + +_C/C++_: + _Prototype_: 'int omp_get_cancellation(void);' + +_Fortran_: + _Interface_: 'logical function omp_get_cancellation()' + +_See also_: + *note OMP_CANCELLATION:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.9. + + +File: libgomp.info, Node: omp_get_default_device, Next: omp_get_dynamic, Prev: omp_get_cancellation, Up: Runtime Library Routines + +2.4 'omp_get_default_device' - Get the default device for target regions +======================================================================== + +_Description_: + Get the default device for target regions without device clause. + +_C/C++_: + _Prototype_: 'int omp_get_default_device(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_default_device()' + +_See also_: + *note OMP_DEFAULT_DEVICE::, *note omp_set_default_device:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.30. + + +File: libgomp.info, Node: omp_get_dynamic, Next: omp_get_initial_device, Prev: omp_get_default_device, Up: Runtime Library Routines + +2.5 'omp_get_dynamic' - Dynamic teams setting +============================================= + +_Description_: + This function returns 'true' if enabled, 'false' otherwise. Here, + 'true' and 'false' represent their language-specific counterparts. + + The dynamic team setting may be initialized at startup by the + 'OMP_DYNAMIC' environment variable or at runtime using + 'omp_set_dynamic'. If undefined, dynamic adjustment is disabled by + default. + +_C/C++_: + _Prototype_: 'int omp_get_dynamic(void);' + +_Fortran_: + _Interface_: 'logical function omp_get_dynamic()' + +_See also_: + *note omp_set_dynamic::, *note OMP_DYNAMIC:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.8. + + +File: libgomp.info, Node: omp_get_initial_device, Next: omp_get_level, Prev: omp_get_dynamic, Up: Runtime Library Routines + +2.6 'omp_get_initial_device' - Return device number of initial device +===================================================================== + +_Description_: + This function returns a device number that represents the host + device. For OpenMP 5.1, this must be equal to the value returned + by the 'omp_get_num_devices' function. + +_C/C++_ + _Prototype_: 'int omp_get_initial_device(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_initial_device()' + +_See also_: + *note omp_get_num_devices:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.35. + + +File: libgomp.info, Node: omp_get_level, Next: omp_get_max_active_levels, Prev: omp_get_initial_device, Up: Runtime Library Routines + +2.7 'omp_get_level' - Obtain the current nesting level +====================================================== + +_Description_: + This function returns the nesting level for the parallel blocks, + which enclose the calling call. + +_C/C++_ + _Prototype_: 'int omp_get_level(void);' + +_Fortran_: + _Interface_: 'integer function omp_level()' + +_See also_: + *note omp_get_active_level:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.17. + + +File: libgomp.info, Node: omp_get_max_active_levels, Next: omp_get_max_task_priority, Prev: omp_get_level, Up: Runtime Library Routines + +2.8 'omp_get_max_active_levels' - Current maximum number of active regions +========================================================================== + +_Description_: + This function obtains the maximum allowed number of nested, active + parallel regions. + +_C/C++_ + _Prototype_: 'int omp_get_max_active_levels(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_max_active_levels()' + +_See also_: + *note omp_set_max_active_levels::, *note omp_get_active_level:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.16. + + +File: libgomp.info, Node: omp_get_max_task_priority, Next: omp_get_max_threads, Prev: omp_get_max_active_levels, Up: Runtime Library Routines + +2.9 'omp_get_max_task_priority' - Maximum priority value +======================================================== + +that can be set for tasks. +_Description_: + This function obtains the maximum allowed priority number for + tasks. + +_C/C++_ + _Prototype_: 'int omp_get_max_task_priority(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_max_task_priority()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.29. + + +File: libgomp.info, Node: omp_get_max_threads, Next: omp_get_nested, Prev: omp_get_max_task_priority, Up: Runtime Library Routines + +2.10 'omp_get_max_threads' - Maximum number of threads of parallel region +========================================================================= + +_Description_: + Return the maximum number of threads used for the current parallel + region that does not use the clause 'num_threads'. + +_C/C++_: + _Prototype_: 'int omp_get_max_threads(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_max_threads()' + +_See also_: + *note omp_set_num_threads::, *note omp_set_dynamic::, *note + omp_get_thread_limit:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.3. + + +File: libgomp.info, Node: omp_get_nested, Next: omp_get_num_devices, Prev: omp_get_max_threads, Up: Runtime Library Routines + +2.11 'omp_get_nested' - Nested parallel regions +=============================================== + +_Description_: + This function returns 'true' if nested parallel regions are + enabled, 'false' otherwise. Here, 'true' and 'false' represent + their language-specific counterparts. + + The state of nested parallel regions at startup depends on several + environment variables. If 'OMP_MAX_ACTIVE_LEVELS' is defined and + is set to greater than one, then nested parallel regions will be + enabled. If not defined, then the value of the 'OMP_NESTED' + environment variable will be followed if defined. If neither are + defined, then if either 'OMP_NUM_THREADS' or 'OMP_PROC_BIND' are + defined with a list of more than one value, then nested parallel + regions are enabled. If none of these are defined, then nested + parallel regions are disabled by default. + + Nested parallel regions can be enabled or disabled at runtime using + 'omp_set_nested', or by setting the maximum number of nested + regions with 'omp_set_max_active_levels' to one to disable, or + above one to enable. + +_C/C++_: + _Prototype_: 'int omp_get_nested(void);' + +_Fortran_: + _Interface_: 'logical function omp_get_nested()' + +_See also_: + *note omp_set_max_active_levels::, *note omp_set_nested::, *note + OMP_MAX_ACTIVE_LEVELS::, *note OMP_NESTED:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.11. + + +File: libgomp.info, Node: omp_get_num_devices, Next: omp_get_num_procs, Prev: omp_get_nested, Up: Runtime Library Routines + +2.12 'omp_get_num_devices' - Number of target devices +===================================================== + +_Description_: + Returns the number of target devices. + +_C/C++_: + _Prototype_: 'int omp_get_num_devices(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_num_devices()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.31. + + +File: libgomp.info, Node: omp_get_num_procs, Next: omp_get_num_teams, Prev: omp_get_num_devices, Up: Runtime Library Routines + +2.13 'omp_get_num_procs' - Number of processors online +====================================================== + +_Description_: + Returns the number of processors online on that device. + +_C/C++_: + _Prototype_: 'int omp_get_num_procs(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_num_procs()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.5. + + +File: libgomp.info, Node: omp_get_num_teams, Next: omp_get_num_threads, Prev: omp_get_num_procs, Up: Runtime Library Routines + +2.14 'omp_get_num_teams' - Number of teams +========================================== + +_Description_: + Returns the number of teams in the current team region. + +_C/C++_: + _Prototype_: 'int omp_get_num_teams(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_num_teams()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.32. + + +File: libgomp.info, Node: omp_get_num_threads, Next: omp_get_proc_bind, Prev: omp_get_num_teams, Up: Runtime Library Routines + +2.15 'omp_get_num_threads' - Size of the active team +==================================================== + +_Description_: + Returns the number of threads in the current team. In a sequential + section of the program 'omp_get_num_threads' returns 1. + + The default team size may be initialized at startup by the + 'OMP_NUM_THREADS' environment variable. At runtime, the size of + the current team may be set either by the 'NUM_THREADS' clause or + by 'omp_set_num_threads'. If none of the above were used to define + a specific value and 'OMP_DYNAMIC' is disabled, one thread per CPU + online is used. + +_C/C++_: + _Prototype_: 'int omp_get_num_threads(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_num_threads()' + +_See also_: + *note omp_get_max_threads::, *note omp_set_num_threads::, *note + OMP_NUM_THREADS:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.2. + + +File: libgomp.info, Node: omp_get_proc_bind, Next: omp_get_schedule, Prev: omp_get_num_threads, Up: Runtime Library Routines + +2.16 'omp_get_proc_bind' - Whether theads may be moved between CPUs +=================================================================== + +_Description_: + This functions returns the currently active thread affinity policy, + which is set via 'OMP_PROC_BIND'. Possible values are + 'omp_proc_bind_false', 'omp_proc_bind_true', + 'omp_proc_bind_master', 'omp_proc_bind_close' and + 'omp_proc_bind_spread'. + +_C/C++_: + _Prototype_: 'omp_proc_bind_t omp_get_proc_bind(void);' + +_Fortran_: + _Interface_: 'integer(kind=omp_proc_bind_kind) function + omp_get_proc_bind()' + +_See also_: + *note OMP_PROC_BIND::, *note OMP_PLACES::, *note + GOMP_CPU_AFFINITY::, + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.22. + + +File: libgomp.info, Node: omp_get_schedule, Next: omp_get_supported_active_levels, Prev: omp_get_proc_bind, Up: Runtime Library Routines + +2.17 'omp_get_schedule' - Obtain the runtime scheduling method +============================================================== + +_Description_: + Obtain the runtime scheduling method. The KIND argument will be + set to the value 'omp_sched_static', 'omp_sched_dynamic', + 'omp_sched_guided' or 'omp_sched_auto'. The second argument, + CHUNK_SIZE, is set to the chunk size. + +_C/C++_ + _Prototype_: 'void omp_get_schedule(omp_sched_t *kind, int + *chunk_size);' + +_Fortran_: + _Interface_: 'subroutine omp_get_schedule(kind, chunk_size)' + 'integer(kind=omp_sched_kind) kind' + 'integer chunk_size' + +_See also_: + *note omp_set_schedule::, *note OMP_SCHEDULE:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.13. + + +File: libgomp.info, Node: omp_get_supported_active_levels, Next: omp_get_team_num, Prev: omp_get_schedule, Up: Runtime Library Routines + +2.18 'omp_get_supported_active_levels' - Maximum number of active regions supported +=================================================================================== + +_Description_: + This function returns the maximum number of nested, active parallel + regions supported by this implementation. + +_C/C++_ + _Prototype_: 'int omp_get_supported_active_levels(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_supported_active_levels()' + +_See also_: + *note omp_get_max_active_levels::, *note + omp_set_max_active_levels:: + +_Reference_: + OpenMP specification v5.0 (https://www.openmp.org), Section 3.2.15. + + +File: libgomp.info, Node: omp_get_team_num, Next: omp_get_team_size, Prev: omp_get_supported_active_levels, Up: Runtime Library Routines + +2.19 'omp_get_team_num' - Get team number +========================================= + +_Description_: + Returns the team number of the calling thread. + +_C/C++_: + _Prototype_: 'int omp_get_team_num(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_team_num()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.33. + + +File: libgomp.info, Node: omp_get_team_size, Next: omp_get_thread_limit, Prev: omp_get_team_num, Up: Runtime Library Routines + +2.20 'omp_get_team_size' - Number of threads in a team +====================================================== + +_Description_: + This function returns the number of threads in a thread team to + which either the current thread or its ancestor belongs. For + values of LEVEL outside zero to 'omp_get_level', -1 is returned; if + LEVEL is zero, 1 is returned, and for 'omp_get_level', the result + is identical to 'omp_get_num_threads'. + +_C/C++_: + _Prototype_: 'int omp_get_team_size(int level);' + +_Fortran_: + _Interface_: 'integer function omp_get_team_size(level)' + 'integer level' + +_See also_: + *note omp_get_num_threads::, *note omp_get_level::, *note + omp_get_ancestor_thread_num:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.19. + + +File: libgomp.info, Node: omp_get_thread_limit, Next: omp_get_thread_num, Prev: omp_get_team_size, Up: Runtime Library Routines + +2.21 'omp_get_thread_limit' - Maximum number of threads +======================================================= + +_Description_: + Return the maximum number of threads of the program. + +_C/C++_: + _Prototype_: 'int omp_get_thread_limit(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_thread_limit()' + +_See also_: + *note omp_get_max_threads::, *note OMP_THREAD_LIMIT:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.14. + + +File: libgomp.info, Node: omp_get_thread_num, Next: omp_in_parallel, Prev: omp_get_thread_limit, Up: Runtime Library Routines + +2.22 'omp_get_thread_num' - Current thread ID +============================================= + +_Description_: + Returns a unique thread identification number within the current + team. In a sequential parts of the program, 'omp_get_thread_num' + always returns 0. In parallel regions the return value varies from + 0 to 'omp_get_num_threads'-1 inclusive. The return value of the + master thread of a team is always 0. + +_C/C++_: + _Prototype_: 'int omp_get_thread_num(void);' + +_Fortran_: + _Interface_: 'integer function omp_get_thread_num()' + +_See also_: + *note omp_get_num_threads::, *note omp_get_ancestor_thread_num:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.4. + + +File: libgomp.info, Node: omp_in_parallel, Next: omp_in_final, Prev: omp_get_thread_num, Up: Runtime Library Routines + +2.23 'omp_in_parallel' - Whether a parallel region is active +============================================================ + +_Description_: + This function returns 'true' if currently running in parallel, + 'false' otherwise. Here, 'true' and 'false' represent their + language-specific counterparts. + +_C/C++_: + _Prototype_: 'int omp_in_parallel(void);' + +_Fortran_: + _Interface_: 'logical function omp_in_parallel()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.6. + + +File: libgomp.info, Node: omp_in_final, Next: omp_is_initial_device, Prev: omp_in_parallel, Up: Runtime Library Routines + +2.24 'omp_in_final' - Whether in final or included task region +============================================================== + +_Description_: + This function returns 'true' if currently running in a final or + included task region, 'false' otherwise. Here, 'true' and 'false' + represent their language-specific counterparts. + +_C/C++_: + _Prototype_: 'int omp_in_final(void);' + +_Fortran_: + _Interface_: 'logical function omp_in_final()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.21. + + +File: libgomp.info, Node: omp_is_initial_device, Next: omp_set_default_device, Prev: omp_in_final, Up: Runtime Library Routines + +2.25 'omp_is_initial_device' - Whether executing on the host device +=================================================================== + +_Description_: + This function returns 'true' if currently running on the host + device, 'false' otherwise. Here, 'true' and 'false' represent + their language-specific counterparts. + +_C/C++_: + _Prototype_: 'int omp_is_initial_device(void);' + +_Fortran_: + _Interface_: 'logical function omp_is_initial_device()' + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.34. + + +File: libgomp.info, Node: omp_set_default_device, Next: omp_set_dynamic, Prev: omp_is_initial_device, Up: Runtime Library Routines + +2.26 'omp_set_default_device' - Set the default device for target regions +========================================================================= + +_Description_: + Set the default device for target regions without device clause. + The argument shall be a nonnegative device number. + +_C/C++_: + _Prototype_: 'void omp_set_default_device(int device_num);' + +_Fortran_: + _Interface_: 'subroutine omp_set_default_device(device_num)' + 'integer device_num' + +_See also_: + *note OMP_DEFAULT_DEVICE::, *note omp_get_default_device:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.29. + + +File: libgomp.info, Node: omp_set_dynamic, Next: omp_set_max_active_levels, Prev: omp_set_default_device, Up: Runtime Library Routines + +2.27 'omp_set_dynamic' - Enable/disable dynamic teams +===================================================== + +_Description_: + Enable or disable the dynamic adjustment of the number of threads + within a team. The function takes the language-specific equivalent + of 'true' and 'false', where 'true' enables dynamic adjustment of + team sizes and 'false' disables it. + +_C/C++_: + _Prototype_: 'void omp_set_dynamic(int dynamic_threads);' + +_Fortran_: + _Interface_: 'subroutine omp_set_dynamic(dynamic_threads)' + 'logical, intent(in) :: dynamic_threads' + +_See also_: + *note OMP_DYNAMIC::, *note omp_get_dynamic:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.7. + + +File: libgomp.info, Node: omp_set_max_active_levels, Next: omp_set_nested, Prev: omp_set_dynamic, Up: Runtime Library Routines + +2.28 'omp_set_max_active_levels' - Limits the number of active parallel regions +=============================================================================== + +_Description_: + This function limits the maximum allowed number of nested, active + parallel regions. MAX_LEVELS must be less or equal to the value + returned by 'omp_get_supported_active_levels'. + +_C/C++_ + _Prototype_: 'void omp_set_max_active_levels(int max_levels);' + +_Fortran_: + _Interface_: 'subroutine omp_set_max_active_levels(max_levels)' + 'integer max_levels' + +_See also_: + *note omp_get_max_active_levels::, *note omp_get_active_level::, + *note omp_get_supported_active_levels:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.15. + + +File: libgomp.info, Node: omp_set_nested, Next: omp_set_num_threads, Prev: omp_set_max_active_levels, Up: Runtime Library Routines + +2.29 'omp_set_nested' - Enable/disable nested parallel regions +============================================================== + +_Description_: + Enable or disable nested parallel regions, i.e., whether team + members are allowed to create new teams. The function takes the + language-specific equivalent of 'true' and 'false', where 'true' + enables dynamic adjustment of team sizes and 'false' disables it. + + Enabling nested parallel regions will also set the maximum number + of active nested regions to the maximum supported. Disabling + nested parallel regions will set the maximum number of active + nested regions to one. + +_C/C++_: + _Prototype_: 'void omp_set_nested(int nested);' + +_Fortran_: + _Interface_: 'subroutine omp_set_nested(nested)' + 'logical, intent(in) :: nested' + +_See also_: + *note omp_get_nested::, *note omp_set_max_active_levels::, *note + OMP_MAX_ACTIVE_LEVELS::, *note OMP_NESTED:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.10. + + +File: libgomp.info, Node: omp_set_num_threads, Next: omp_set_schedule, Prev: omp_set_nested, Up: Runtime Library Routines + +2.30 'omp_set_num_threads' - Set upper team size limit +====================================================== + +_Description_: + Specifies the number of threads used by default in subsequent + parallel sections, if those do not specify a 'num_threads' clause. + The argument of 'omp_set_num_threads' shall be a positive integer. + +_C/C++_: + _Prototype_: 'void omp_set_num_threads(int num_threads);' + +_Fortran_: + _Interface_: 'subroutine omp_set_num_threads(num_threads)' + 'integer, intent(in) :: num_threads' + +_See also_: + *note OMP_NUM_THREADS::, *note omp_get_num_threads::, *note + omp_get_max_threads:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.1. + + +File: libgomp.info, Node: omp_set_schedule, Next: omp_init_lock, Prev: omp_set_num_threads, Up: Runtime Library Routines + +2.31 'omp_set_schedule' - Set the runtime scheduling method +=========================================================== + +_Description_: + Sets the runtime scheduling method. The KIND argument can have the + value 'omp_sched_static', 'omp_sched_dynamic', 'omp_sched_guided' + or 'omp_sched_auto'. Except for 'omp_sched_auto', the chunk size + is set to the value of CHUNK_SIZE if positive, or to the default + value if zero or negative. For 'omp_sched_auto' the CHUNK_SIZE + argument is ignored. + +_C/C++_ + _Prototype_: 'void omp_set_schedule(omp_sched_t kind, int + chunk_size);' + +_Fortran_: + _Interface_: 'subroutine omp_set_schedule(kind, chunk_size)' + 'integer(kind=omp_sched_kind) kind' + 'integer chunk_size' + +_See also_: + *note omp_get_schedule:: *note OMP_SCHEDULE:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.2.12. + + +File: libgomp.info, Node: omp_init_lock, Next: omp_set_lock, Prev: omp_set_schedule, Up: Runtime Library Routines + +2.32 'omp_init_lock' - Initialize simple lock +============================================= + +_Description_: + Initialize a simple lock. After initialization, the lock is in an + unlocked state. + +_C/C++_: + _Prototype_: 'void omp_init_lock(omp_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_init_lock(svar)' + 'integer(omp_lock_kind), intent(out) :: svar' + +_See also_: + *note omp_destroy_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.1. + + +File: libgomp.info, Node: omp_set_lock, Next: omp_test_lock, Prev: omp_init_lock, Up: Runtime Library Routines + +2.33 'omp_set_lock' - Wait for and set simple lock +================================================== + +_Description_: + Before setting a simple lock, the lock variable must be initialized + by 'omp_init_lock'. The calling thread is blocked until the lock + is available. If the lock is already held by the current thread, a + deadlock occurs. + +_C/C++_: + _Prototype_: 'void omp_set_lock(omp_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_set_lock(svar)' + 'integer(omp_lock_kind), intent(inout) :: svar' + +_See also_: + *note omp_init_lock::, *note omp_test_lock::, *note + omp_unset_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.4. + + +File: libgomp.info, Node: omp_test_lock, Next: omp_unset_lock, Prev: omp_set_lock, Up: Runtime Library Routines + +2.34 'omp_test_lock' - Test and set simple lock if available +============================================================ + +_Description_: + Before setting a simple lock, the lock variable must be initialized + by 'omp_init_lock'. Contrary to 'omp_set_lock', 'omp_test_lock' + does not block if the lock is not available. This function returns + 'true' upon success, 'false' otherwise. Here, 'true' and 'false' + represent their language-specific counterparts. + +_C/C++_: + _Prototype_: 'int omp_test_lock(omp_lock_t *lock);' + +_Fortran_: + _Interface_: 'logical function omp_test_lock(svar)' + 'integer(omp_lock_kind), intent(inout) :: svar' + +_See also_: + *note omp_init_lock::, *note omp_set_lock::, *note omp_set_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.6. + + +File: libgomp.info, Node: omp_unset_lock, Next: omp_destroy_lock, Prev: omp_test_lock, Up: Runtime Library Routines + +2.35 'omp_unset_lock' - Unset simple lock +========================================= + +_Description_: + A simple lock about to be unset must have been locked by + 'omp_set_lock' or 'omp_test_lock' before. In addition, the lock + must be held by the thread calling 'omp_unset_lock'. Then, the + lock becomes unlocked. If one or more threads attempted to set the + lock before, one of them is chosen to, again, set the lock to + itself. + +_C/C++_: + _Prototype_: 'void omp_unset_lock(omp_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_unset_lock(svar)' + 'integer(omp_lock_kind), intent(inout) :: svar' + +_See also_: + *note omp_set_lock::, *note omp_test_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.5. + + +File: libgomp.info, Node: omp_destroy_lock, Next: omp_init_nest_lock, Prev: omp_unset_lock, Up: Runtime Library Routines + +2.36 'omp_destroy_lock' - Destroy simple lock +============================================= + +_Description_: + Destroy a simple lock. In order to be destroyed, a simple lock + must be in the unlocked state. + +_C/C++_: + _Prototype_: 'void omp_destroy_lock(omp_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_destroy_lock(svar)' + 'integer(omp_lock_kind), intent(inout) :: svar' + +_See also_: + *note omp_init_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.3. + + +File: libgomp.info, Node: omp_init_nest_lock, Next: omp_set_nest_lock, Prev: omp_destroy_lock, Up: Runtime Library Routines + +2.37 'omp_init_nest_lock' - Initialize nested lock +================================================== + +_Description_: + Initialize a nested lock. After initialization, the lock is in an + unlocked state and the nesting count is set to zero. + +_C/C++_: + _Prototype_: 'void omp_init_nest_lock(omp_nest_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_init_nest_lock(nvar)' + 'integer(omp_nest_lock_kind), intent(out) :: nvar' + +_See also_: + *note omp_destroy_nest_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.1. + + +File: libgomp.info, Node: omp_set_nest_lock, Next: omp_test_nest_lock, Prev: omp_init_nest_lock, Up: Runtime Library Routines + +2.38 'omp_set_nest_lock' - Wait for and set nested lock +======================================================= + +_Description_: + Before setting a nested lock, the lock variable must be initialized + by 'omp_init_nest_lock'. The calling thread is blocked until the + lock is available. If the lock is already held by the current + thread, the nesting count for the lock is incremented. + +_C/C++_: + _Prototype_: 'void omp_set_nest_lock(omp_nest_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_set_nest_lock(nvar)' + 'integer(omp_nest_lock_kind), intent(inout) :: nvar' + +_See also_: + *note omp_init_nest_lock::, *note omp_unset_nest_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.4. + + +File: libgomp.info, Node: omp_test_nest_lock, Next: omp_unset_nest_lock, Prev: omp_set_nest_lock, Up: Runtime Library Routines + +2.39 'omp_test_nest_lock' - Test and set nested lock if available +================================================================= + +_Description_: + Before setting a nested lock, the lock variable must be initialized + by 'omp_init_nest_lock'. Contrary to 'omp_set_nest_lock', + 'omp_test_nest_lock' does not block if the lock is not available. + If the lock is already held by the current thread, the new nesting + count is returned. Otherwise, the return value equals zero. + +_C/C++_: + _Prototype_: 'int omp_test_nest_lock(omp_nest_lock_t *lock);' + +_Fortran_: + _Interface_: 'logical function omp_test_nest_lock(nvar)' + 'integer(omp_nest_lock_kind), intent(inout) :: nvar' + +_See also_: + *note omp_init_lock::, *note omp_set_lock::, *note omp_set_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.6. + + +File: libgomp.info, Node: omp_unset_nest_lock, Next: omp_destroy_nest_lock, Prev: omp_test_nest_lock, Up: Runtime Library Routines + +2.40 'omp_unset_nest_lock' - Unset nested lock +============================================== + +_Description_: + A nested lock about to be unset must have been locked by + 'omp_set_nested_lock' or 'omp_test_nested_lock' before. In + addition, the lock must be held by the thread calling + 'omp_unset_nested_lock'. If the nesting count drops to zero, the + lock becomes unlocked. If one ore more threads attempted to set + the lock before, one of them is chosen to, again, set the lock to + itself. + +_C/C++_: + _Prototype_: 'void omp_unset_nest_lock(omp_nest_lock_t *lock);' + +_Fortran_: + _Interface_: 'subroutine omp_unset_nest_lock(nvar)' + 'integer(omp_nest_lock_kind), intent(inout) :: nvar' + +_See also_: + *note omp_set_nest_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.5. + + +File: libgomp.info, Node: omp_destroy_nest_lock, Next: omp_get_wtick, Prev: omp_unset_nest_lock, Up: Runtime Library Routines + +2.41 'omp_destroy_nest_lock' - Destroy nested lock +================================================== + +_Description_: + Destroy a nested lock. In order to be destroyed, a nested lock + must be in the unlocked state and its nesting count must equal + zero. + +_C/C++_: + _Prototype_: 'void omp_destroy_nest_lock(omp_nest_lock_t *);' + +_Fortran_: + _Interface_: 'subroutine omp_destroy_nest_lock(nvar)' + 'integer(omp_nest_lock_kind), intent(inout) :: nvar' + +_See also_: + *note omp_init_lock:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.3.3. + + +File: libgomp.info, Node: omp_get_wtick, Next: omp_get_wtime, Prev: omp_destroy_nest_lock, Up: Runtime Library Routines + +2.42 'omp_get_wtick' - Get timer precision +========================================== + +_Description_: + Gets the timer precision, i.e., the number of seconds between two + successive clock ticks. + +_C/C++_: + _Prototype_: 'double omp_get_wtick(void);' + +_Fortran_: + _Interface_: 'double precision function omp_get_wtick()' + +_See also_: + *note omp_get_wtime:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.4.2. + + +File: libgomp.info, Node: omp_get_wtime, Next: omp_fulfill_event, Prev: omp_get_wtick, Up: Runtime Library Routines + +2.43 'omp_get_wtime' - Elapsed wall clock time +============================================== + +_Description_: + Elapsed wall clock time in seconds. The time is measured per + thread, no guarantee can be made that two distinct threads measure + the same time. Time is measured from some "time in the past", + which is an arbitrary time guaranteed not to change during the + execution of the program. + +_C/C++_: + _Prototype_: 'double omp_get_wtime(void);' + +_Fortran_: + _Interface_: 'double precision function omp_get_wtime()' + +_See also_: + *note omp_get_wtick:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 3.4.1. + + +File: libgomp.info, Node: omp_fulfill_event, Prev: omp_get_wtime, Up: Runtime Library Routines + +2.44 'omp_fulfill_event' - Fulfill and destroy an OpenMP event +============================================================== + +_Description_: + Fulfill the event associated with the event handle argument. + Currently, it is only used to fulfill events generated by detach + clauses on task constructs - the effect of fulfilling the event is + to allow the task to complete. + + The result of calling 'omp_fulfill_event' with an event handle + other than that generated by a detach clause is undefined. Calling + it with an event handle that has already been fulfilled is also + undefined. + +_C/C++_: + _Prototype_: 'void omp_fulfill_event(omp_event_handle_t event);' + +_Fortran_: + _Interface_: 'subroutine omp_fulfill_event(event)' + 'integer (kind=omp_event_handle_kind) :: event' + +_Reference_: + OpenMP specification v5.0 (https://www.openmp.org), Section 3.5.1. + + +File: libgomp.info, Node: Environment Variables, Next: Enabling OpenACC, Prev: Runtime Library Routines, Up: Top + +3 OpenMP Environment Variables +****************************** + +The environment variables which beginning with 'OMP_' are defined by +section 4 of the OpenMP specification in version 4.5, while those +beginning with 'GOMP_' are GNU extensions. + +* Menu: + +* OMP_CANCELLATION:: Set whether cancellation is activated +* OMP_DISPLAY_ENV:: Show OpenMP version and environment variables +* OMP_DEFAULT_DEVICE:: Set the device used in target regions +* OMP_DYNAMIC:: Dynamic adjustment of threads +* OMP_MAX_ACTIVE_LEVELS:: Set the maximum number of nested parallel regions +* OMP_MAX_TASK_PRIORITY:: Set the maximum task priority value +* OMP_NESTED:: Nested parallel regions +* OMP_NUM_THREADS:: Specifies the number of threads to use +* OMP_PROC_BIND:: Whether theads may be moved between CPUs +* OMP_PLACES:: Specifies on which CPUs the theads should be placed +* OMP_STACKSIZE:: Set default thread stack size +* OMP_SCHEDULE:: How threads are scheduled +* OMP_TARGET_OFFLOAD:: Controls offloading behaviour +* OMP_THREAD_LIMIT:: Set the maximum number of threads +* OMP_WAIT_POLICY:: How waiting threads are handled +* GOMP_CPU_AFFINITY:: Bind threads to specific CPUs +* GOMP_DEBUG:: Enable debugging output +* GOMP_STACKSIZE:: Set default thread stack size +* GOMP_SPINCOUNT:: Set the busy-wait spin count +* GOMP_RTEMS_THREAD_POOLS:: Set the RTEMS specific thread pools + + +File: libgomp.info, Node: OMP_CANCELLATION, Next: OMP_DISPLAY_ENV, Up: Environment Variables + +3.1 'OMP_CANCELLATION' - Set whether cancellation is activated +============================================================== + +_Description_: + If set to 'TRUE', the cancellation is activated. If set to 'FALSE' + or if unset, cancellation is disabled and the 'cancel' construct is + ignored. + +_See also_: + *note omp_get_cancellation:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.11 + + +File: libgomp.info, Node: OMP_DISPLAY_ENV, Next: OMP_DEFAULT_DEVICE, Prev: OMP_CANCELLATION, Up: Environment Variables + +3.2 'OMP_DISPLAY_ENV' - Show OpenMP version and environment variables +===================================================================== + +_Description_: + If set to 'TRUE', the OpenMP version number and the values + associated with the OpenMP environment variables are printed to + 'stderr'. If set to 'VERBOSE', it additionally shows the value of + the environment variables which are GNU extensions. If undefined + or set to 'FALSE', this information will not be shown. + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.12 + + +File: libgomp.info, Node: OMP_DEFAULT_DEVICE, Next: OMP_DYNAMIC, Prev: OMP_DISPLAY_ENV, Up: Environment Variables + +3.3 'OMP_DEFAULT_DEVICE' - Set the device used in target regions +================================================================ + +_Description_: + Set to choose the device which is used in a 'target' region, unless + the value is overridden by 'omp_set_default_device' or by a + 'device' clause. The value shall be the nonnegative device number. + If no device with the given device number exists, the code is + executed on the host. If unset, device number 0 will be used. + +_See also_: + *note omp_get_default_device::, *note omp_set_default_device::, + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.13 + + +File: libgomp.info, Node: OMP_DYNAMIC, Next: OMP_MAX_ACTIVE_LEVELS, Prev: OMP_DEFAULT_DEVICE, Up: Environment Variables + +3.4 'OMP_DYNAMIC' - Dynamic adjustment of threads +================================================= + +_Description_: + Enable or disable the dynamic adjustment of the number of threads + within a team. The value of this environment variable shall be + 'TRUE' or 'FALSE'. If undefined, dynamic adjustment is disabled by + default. + +_See also_: + *note omp_set_dynamic:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.3 + + +File: libgomp.info, Node: OMP_MAX_ACTIVE_LEVELS, Next: OMP_MAX_TASK_PRIORITY, Prev: OMP_DYNAMIC, Up: Environment Variables + +3.5 'OMP_MAX_ACTIVE_LEVELS' - Set the maximum number of nested parallel regions +=============================================================================== + +_Description_: + Specifies the initial value for the maximum number of nested + parallel regions. The value of this variable shall be a positive + integer. If undefined, then if 'OMP_NESTED' is defined and set to + true, or if 'OMP_NUM_THREADS' or 'OMP_PROC_BIND' are defined and + set to a list with more than one item, the maximum number of nested + parallel regions will be initialized to the largest number + supported, otherwise it will be set to one. + +_See also_: + *note omp_set_max_active_levels::, *note OMP_NESTED:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.9 + + +File: libgomp.info, Node: OMP_MAX_TASK_PRIORITY, Next: OMP_NESTED, Prev: OMP_MAX_ACTIVE_LEVELS, Up: Environment Variables + +3.6 'OMP_MAX_TASK_PRIORITY' - Set the maximum priority +====================================================== + +number that can be set for a task. +_Description_: + Specifies the initial value for the maximum priority value that can + be set for a task. The value of this variable shall be a + non-negative integer, and zero is allowed. If undefined, the + default priority is 0. + +_See also_: + *note omp_get_max_task_priority:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.14 + + +File: libgomp.info, Node: OMP_NESTED, Next: OMP_NUM_THREADS, Prev: OMP_MAX_TASK_PRIORITY, Up: Environment Variables + +3.7 'OMP_NESTED' - Nested parallel regions +========================================== + +_Description_: + Enable or disable nested parallel regions, i.e., whether team + members are allowed to create new teams. The value of this + environment variable shall be 'TRUE' or 'FALSE'. If set to 'TRUE', + the number of maximum active nested regions supported will by + default be set to the maximum supported, otherwise it will be set + to one. If 'OMP_MAX_ACTIVE_LEVELS' is defined, its setting will + override this setting. If both are undefined, nested parallel + regions are enabled if 'OMP_NUM_THREADS' or 'OMP_PROC_BINDS' are + defined to a list with more than one item, otherwise they are + disabled by default. + +_See also_: + *note omp_set_max_active_levels::, *note omp_set_nested:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.6 + + +File: libgomp.info, Node: OMP_NUM_THREADS, Next: OMP_PROC_BIND, Prev: OMP_NESTED, Up: Environment Variables + +3.8 'OMP_NUM_THREADS' - Specifies the number of threads to use +============================================================== + +_Description_: + Specifies the default number of threads to use in parallel regions. + The value of this variable shall be a comma-separated list of + positive integers; the value specifies the number of threads to use + for the corresponding nested level. Specifying more than one item + in the list will automatically enable nesting by default. If + undefined one thread per CPU is used. + +_See also_: + *note omp_set_num_threads::, *note OMP_NESTED:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.2 + + +File: libgomp.info, Node: OMP_PROC_BIND, Next: OMP_PLACES, Prev: OMP_NUM_THREADS, Up: Environment Variables + +3.9 'OMP_PROC_BIND' - Whether theads may be moved between CPUs +============================================================== + +_Description_: + Specifies whether threads may be moved between processors. If set + to 'TRUE', OpenMP theads should not be moved; if set to 'FALSE' + they may be moved. Alternatively, a comma separated list with the + values 'MASTER', 'CLOSE' and 'SPREAD' can be used to specify the + thread affinity policy for the corresponding nesting level. With + 'MASTER' the worker threads are in the same place partition as the + master thread. With 'CLOSE' those are kept close to the master + thread in contiguous place partitions. And with 'SPREAD' a sparse + distribution across the place partitions is used. Specifying more + than one item in the list will automatically enable nesting by + default. + + When undefined, 'OMP_PROC_BIND' defaults to 'TRUE' when + 'OMP_PLACES' or 'GOMP_CPU_AFFINITY' is set and 'FALSE' otherwise. + +_See also_: + *note omp_get_proc_bind::, *note GOMP_CPU_AFFINITY::, *note + OMP_NESTED::, *note OMP_PLACES:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.4 + + +File: libgomp.info, Node: OMP_PLACES, Next: OMP_STACKSIZE, Prev: OMP_PROC_BIND, Up: Environment Variables + +3.10 'OMP_PLACES' - Specifies on which CPUs the theads should be placed +======================================================================= + +_Description_: + The thread placement can be either specified using an abstract name + or by an explicit list of the places. The abstract names + 'threads', 'cores' and 'sockets' can be optionally followed by a + positive number in parentheses, which denotes the how many places + shall be created. With 'threads' each place corresponds to a + single hardware thread; 'cores' to a single core with the + corresponding number of hardware threads; and with 'sockets' the + place corresponds to a single socket. The resulting placement can + be shown by setting the 'OMP_DISPLAY_ENV' environment variable. + + Alternatively, the placement can be specified explicitly as + comma-separated list of places. A place is specified by set of + nonnegative numbers in curly braces, denoting the denoting the + hardware threads. The hardware threads belonging to a place can + either be specified as comma-separated list of nonnegative thread + numbers or using an interval. Multiple places can also be either + specified by a comma-separated list of places or by an interval. + To specify an interval, a colon followed by the count is placed + after after the hardware thread number or the place. Optionally, + the length can be followed by a colon and the stride number - + otherwise a unit stride is assumed. For instance, the following + specifies the same places list: '"{0,1,2}, {3,4,6}, {7,8,9}, + {10,11,12}"'; '"{0:3}, {3:3}, {7:3}, {10:3}"'; and '"{0:2}:4:3"'. + + If 'OMP_PLACES' and 'GOMP_CPU_AFFINITY' are unset and + 'OMP_PROC_BIND' is either unset or 'false', threads may be moved + between CPUs following no placement policy. + +_See also_: + *note OMP_PROC_BIND::, *note GOMP_CPU_AFFINITY::, *note + omp_get_proc_bind::, *note OMP_DISPLAY_ENV:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.5 + + +File: libgomp.info, Node: OMP_STACKSIZE, Next: OMP_SCHEDULE, Prev: OMP_PLACES, Up: Environment Variables + +3.11 'OMP_STACKSIZE' - Set default thread stack size +==================================================== + +_Description_: + Set the default thread stack size in kilobytes, unless the number + is suffixed by 'B', 'K', 'M' or 'G', in which case the size is, + respectively, in bytes, kilobytes, megabytes or gigabytes. This is + different from 'pthread_attr_setstacksize' which gets the number of + bytes as an argument. If the stack size cannot be set due to + system constraints, an error is reported and the initial stack size + is left unchanged. If undefined, the stack size is system + dependent. + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.7 + + +File: libgomp.info, Node: OMP_SCHEDULE, Next: OMP_TARGET_OFFLOAD, Prev: OMP_STACKSIZE, Up: Environment Variables + +3.12 'OMP_SCHEDULE' - How threads are scheduled +=============================================== + +_Description_: + Allows to specify 'schedule type' and 'chunk size'. The value of + the variable shall have the form: 'type[,chunk]' where 'type' is + one of 'static', 'dynamic', 'guided' or 'auto' The optional 'chunk' + size shall be a positive integer. If undefined, dynamic scheduling + and a chunk size of 1 is used. + +_See also_: + *note omp_set_schedule:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Sections + 2.7.1.1 and 4.1 + + +File: libgomp.info, Node: OMP_TARGET_OFFLOAD, Next: OMP_THREAD_LIMIT, Prev: OMP_SCHEDULE, Up: Environment Variables + +3.13 'OMP_TARGET_OFFLOAD' - Controls offloading behaviour +========================================================= + +_Description_: + Specifies the behaviour with regard to offloading code to a device. + This variable can be set to one of three values - 'MANDATORY', + 'DISABLED' or 'DEFAULT'. + + If set to 'MANDATORY', the program will terminate with an error if + the offload device is not present or is not supported. If set to + 'DISABLED', then offloading is disabled and all code will run on + the host. If set to 'DEFAULT', the program will try offloading to + the device first, then fall back to running code on the host if it + cannot. + + If undefined, then the program will behave as if 'DEFAULT' was set. + +_Reference_: + OpenMP specification v5.0 (https://www.openmp.org), Section 6.17 + + +File: libgomp.info, Node: OMP_THREAD_LIMIT, Next: OMP_WAIT_POLICY, Prev: OMP_TARGET_OFFLOAD, Up: Environment Variables + +3.14 'OMP_THREAD_LIMIT' - Set the maximum number of threads +=========================================================== + +_Description_: + Specifies the number of threads to use for the whole program. The + value of this variable shall be a positive integer. If undefined, + the number of threads is not limited. + +_See also_: + *note OMP_NUM_THREADS::, *note omp_get_thread_limit:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.10 + + +File: libgomp.info, Node: OMP_WAIT_POLICY, Next: GOMP_CPU_AFFINITY, Prev: OMP_THREAD_LIMIT, Up: Environment Variables + +3.15 'OMP_WAIT_POLICY' - How waiting threads are handled +======================================================== + +_Description_: + Specifies whether waiting threads should be active or passive. If + the value is 'PASSIVE', waiting threads should not consume CPU + power while waiting; while the value is 'ACTIVE' specifies that + they should. If undefined, threads wait actively for a short time + before waiting passively. + +_See also_: + *note GOMP_SPINCOUNT:: + +_Reference_: + OpenMP specification v4.5 (https://www.openmp.org), Section 4.8 + + +File: libgomp.info, Node: GOMP_CPU_AFFINITY, Next: GOMP_DEBUG, Prev: OMP_WAIT_POLICY, Up: Environment Variables + +3.16 'GOMP_CPU_AFFINITY' - Bind threads to specific CPUs +======================================================== + +_Description_: + Binds threads to specific CPUs. The variable should contain a + space-separated or comma-separated list of CPUs. This list may + contain different kinds of entries: either single CPU numbers in + any order, a range of CPUs (M-N) or a range with some stride + (M-N:S). CPU numbers are zero based. For example, + 'GOMP_CPU_AFFINITY="0 3 1-2 4-15:2"' will bind the initial thread + to CPU 0, the second to CPU 3, the third to CPU 1, the fourth to + CPU 2, the fifth to CPU 4, the sixth through tenth to CPUs 6, 8, + 10, 12, and 14 respectively and then start assigning back from the + beginning of the list. 'GOMP_CPU_AFFINITY=0' binds all threads to + CPU 0. + + There is no libgomp library routine to determine whether a CPU + affinity specification is in effect. As a workaround, + language-specific library functions, e.g., 'getenv' in C or + 'GET_ENVIRONMENT_VARIABLE' in Fortran, may be used to query the + setting of the 'GOMP_CPU_AFFINITY' environment variable. A defined + CPU affinity on startup cannot be changed or disabled during the + runtime of the application. + + If both 'GOMP_CPU_AFFINITY' and 'OMP_PROC_BIND' are set, + 'OMP_PROC_BIND' has a higher precedence. If neither has been set + and 'OMP_PROC_BIND' is unset, or when 'OMP_PROC_BIND' is set to + 'FALSE', the host system will handle the assignment of threads to + CPUs. + +_See also_: + *note OMP_PLACES::, *note OMP_PROC_BIND:: + + +File: libgomp.info, Node: GOMP_DEBUG, Next: GOMP_STACKSIZE, Prev: GOMP_CPU_AFFINITY, Up: Environment Variables + +3.17 'GOMP_DEBUG' - Enable debugging output +=========================================== + +_Description_: + Enable debugging output. The variable should be set to '0' + (disabled, also the default if not set), or '1' (enabled). + + If enabled, some debugging output will be printed during execution. + This is currently not specified in more detail, and subject to + change. + + +File: libgomp.info, Node: GOMP_STACKSIZE, Next: GOMP_SPINCOUNT, Prev: GOMP_DEBUG, Up: Environment Variables + +3.18 'GOMP_STACKSIZE' - Set default thread stack size +===================================================== + +_Description_: + Set the default thread stack size in kilobytes. This is different + from 'pthread_attr_setstacksize' which gets the number of bytes as + an argument. If the stack size cannot be set due to system + constraints, an error is reported and the initial stack size is + left unchanged. If undefined, the stack size is system dependent. + +_See also_: + *note OMP_STACKSIZE:: + +_Reference_: + GCC Patches Mailinglist + (https://gcc.gnu.org/ml/gcc-patches/2006-06/msg00493.html), GCC + Patches Mailinglist + (https://gcc.gnu.org/ml/gcc-patches/2006-06/msg00496.html) + + +File: libgomp.info, Node: GOMP_SPINCOUNT, Next: GOMP_RTEMS_THREAD_POOLS, Prev: GOMP_STACKSIZE, Up: Environment Variables + +3.19 'GOMP_SPINCOUNT' - Set the busy-wait spin count +==================================================== + +_Description_: + Determines how long a threads waits actively with consuming CPU + power before waiting passively without consuming CPU power. The + value may be either 'INFINITE', 'INFINITY' to always wait actively + or an integer which gives the number of spins of the busy-wait + loop. The integer may optionally be followed by the following + suffixes acting as multiplication factors: 'k' (kilo, thousand), + 'M' (mega, million), 'G' (giga, billion), or 'T' (tera, trillion). + If undefined, 0 is used when 'OMP_WAIT_POLICY' is 'PASSIVE', + 300,000 is used when 'OMP_WAIT_POLICY' is undefined and 30 billion + is used when 'OMP_WAIT_POLICY' is 'ACTIVE'. If there are more + OpenMP threads than available CPUs, 1000 and 100 spins are used for + 'OMP_WAIT_POLICY' being 'ACTIVE' or undefined, respectively; unless + the 'GOMP_SPINCOUNT' is lower or 'OMP_WAIT_POLICY' is 'PASSIVE'. + +_See also_: + *note OMP_WAIT_POLICY:: + + +File: libgomp.info, Node: GOMP_RTEMS_THREAD_POOLS, Prev: GOMP_SPINCOUNT, Up: Environment Variables + +3.20 'GOMP_RTEMS_THREAD_POOLS' - Set the RTEMS specific thread pools +==================================================================== + +_Description_: + This environment variable is only used on the RTEMS real-time + operating system. It determines the scheduler instance specific + thread pools. The format for 'GOMP_RTEMS_THREAD_POOLS' is a list + of optional '[$]@' + configurations separated by ':' where: + * '' is the thread pool count for this + scheduler instance. + * '$' is an optional priority for the worker threads + of a thread pool according to 'pthread_setschedparam'. In + case a priority value is omitted, then a worker thread will + inherit the priority of the OpenMP master thread that created + it. The priority of the worker thread is not changed after + creation, even if a new OpenMP master thread using the worker + has a different priority. + * '@' is the scheduler instance name according + to the RTEMS application configuration. + In case no thread pool configuration is specified for a scheduler + instance, then each OpenMP master thread of this scheduler instance + will use its own dynamically allocated thread pool. To limit the + worker thread count of the thread pools, each OpenMP master thread + must call 'omp_set_num_threads'. +_Example_: + Lets suppose we have three scheduler instances 'IO', 'WRK0', and + 'WRK1' with 'GOMP_RTEMS_THREAD_POOLS' set to '"1@WRK0:3$4@WRK1"'. + Then there are no thread pool restrictions for scheduler instance + 'IO'. In the scheduler instance 'WRK0' there is one thread pool + available. Since no priority is specified for this scheduler + instance, the worker thread inherits the priority of the OpenMP + master thread that created it. In the scheduler instance 'WRK1' + there are three thread pools available and their worker threads run + at priority four. + + +File: libgomp.info, Node: Enabling OpenACC, Next: OpenACC Runtime Library Routines, Prev: Environment Variables, Up: Top + +4 Enabling OpenACC +****************** + +To activate the OpenACC extensions for C/C++ and Fortran, the +compile-time flag '-fopenacc' must be specified. This enables the +OpenACC directive '#pragma acc' in C/C++ and '!$acc' directives in free +form, 'c$acc', '*$acc' and '!$acc' directives in fixed form, '!$' +conditional compilation sentinels in free form and 'c$', '*$' and '!$' +sentinels in fixed form, for Fortran. The flag also arranges for +automatic linking of the OpenACC runtime library (*note OpenACC Runtime +Library Routines::). + + See for more information. + + A complete description of all OpenACC directives accepted may be +found in the OpenACC (https://www.openacc.org) Application Programming +Interface manual, version 2.6. + + +File: libgomp.info, Node: OpenACC Runtime Library Routines, Next: OpenACC Environment Variables, Prev: Enabling OpenACC, Up: Top + +5 OpenACC Runtime Library Routines +********************************** + +The runtime routines described here are defined by section 3 of the +OpenACC specifications in version 2.6. They have C linkage, and do not +throw exceptions. Generally, they are available only for the host, with +the exception of 'acc_on_device', which is available for both the host +and the acceleration device. + +* Menu: + +* acc_get_num_devices:: Get number of devices for the given device + type. +* acc_set_device_type:: Set type of device accelerator to use. +* acc_get_device_type:: Get type of device accelerator to be used. +* acc_set_device_num:: Set device number to use. +* acc_get_device_num:: Get device number to be used. +* acc_get_property:: Get device property. +* acc_async_test:: Tests for completion of a specific asynchronous + operation. +* acc_async_test_all:: Tests for completion of all asynchronous + operations. +* acc_wait:: Wait for completion of a specific asynchronous + operation. +* acc_wait_all:: Waits for completion of all asynchronous + operations. +* acc_wait_all_async:: Wait for completion of all asynchronous + operations. +* acc_wait_async:: Wait for completion of asynchronous operations. +* acc_init:: Initialize runtime for a specific device type. +* acc_shutdown:: Shuts down the runtime for a specific device + type. +* acc_on_device:: Whether executing on a particular device +* acc_malloc:: Allocate device memory. +* acc_free:: Free device memory. +* acc_copyin:: Allocate device memory and copy host memory to + it. +* acc_present_or_copyin:: If the data is not present on the device, + allocate device memory and copy from host + memory. +* acc_create:: Allocate device memory and map it to host + memory. +* acc_present_or_create:: If the data is not present on the device, + allocate device memory and map it to host + memory. +* acc_copyout:: Copy device memory to host memory. +* acc_delete:: Free device memory. +* acc_update_device:: Update device memory from mapped host memory. +* acc_update_self:: Update host memory from mapped device memory. +* acc_map_data:: Map previously allocated device memory to host + memory. +* acc_unmap_data:: Unmap device memory from host memory. +* acc_deviceptr:: Get device pointer associated with specific + host address. +* acc_hostptr:: Get host pointer associated with specific + device address. +* acc_is_present:: Indicate whether host variable / array is + present on device. +* acc_memcpy_to_device:: Copy host memory to device memory. +* acc_memcpy_from_device:: Copy device memory to host memory. +* acc_attach:: Let device pointer point to device-pointer target. +* acc_detach:: Let device pointer point to host-pointer target. + +API routines for target platforms. + +* acc_get_current_cuda_device:: Get CUDA device handle. +* acc_get_current_cuda_context::Get CUDA context handle. +* acc_get_cuda_stream:: Get CUDA stream handle. +* acc_set_cuda_stream:: Set CUDA stream handle. + +API routines for the OpenACC Profiling Interface. + +* acc_prof_register:: Register callbacks. +* acc_prof_unregister:: Unregister callbacks. +* acc_prof_lookup:: Obtain inquiry functions. +* acc_register_library:: Library registration. + + +File: libgomp.info, Node: acc_get_num_devices, Next: acc_set_device_type, Up: OpenACC Runtime Library Routines + +5.1 'acc_get_num_devices' - Get number of devices for given device type +======================================================================= + +_Description_ + This function returns a value indicating the number of devices + available for the device type specified in DEVICETYPE. + +_C/C++_: + _Prototype_: 'int acc_get_num_devices(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'integer function acc_get_num_devices(devicetype)' + 'integer(kind=acc_device_kind) devicetype' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.1. + + +File: libgomp.info, Node: acc_set_device_type, Next: acc_get_device_type, Prev: acc_get_num_devices, Up: OpenACC Runtime Library Routines + +5.2 'acc_set_device_type' - Set type of device accelerator to use. +================================================================== + +_Description_ + This function indicates to the runtime library which device type, + specified in DEVICETYPE, to use when executing a parallel or + kernels region. + +_C/C++_: + _Prototype_: 'acc_set_device_type(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'subroutine acc_set_device_type(devicetype)' + 'integer(kind=acc_device_kind) devicetype' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.2. + + +File: libgomp.info, Node: acc_get_device_type, Next: acc_set_device_num, Prev: acc_set_device_type, Up: OpenACC Runtime Library Routines + +5.3 'acc_get_device_type' - Get type of device accelerator to be used. +====================================================================== + +_Description_ + This function returns what device type will be used when executing + a parallel or kernels region. + + This function returns 'acc_device_none' if 'acc_get_device_type' is + called from 'acc_ev_device_init_start', 'acc_ev_device_init_end' + callbacks of the OpenACC Profiling Interface (*note OpenACC + Profiling Interface::), that is, if the device is currently being + initialized. + +_C/C++_: + _Prototype_: 'acc_device_t acc_get_device_type(void);' + +_Fortran_: + _Interface_: 'function acc_get_device_type(void)' + 'integer(kind=acc_device_kind) acc_get_device_type' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.3. + + +File: libgomp.info, Node: acc_set_device_num, Next: acc_get_device_num, Prev: acc_get_device_type, Up: OpenACC Runtime Library Routines + +5.4 'acc_set_device_num' - Set device number to use. +==================================================== + +_Description_ + This function will indicate to the runtime which device number, + specified by DEVICENUM, associated with the specified device type + DEVICETYPE. + +_C/C++_: + _Prototype_: 'acc_set_device_num(int devicenum, acc_device_t + devicetype);' + +_Fortran_: + _Interface_: 'subroutine acc_set_device_num(devicenum, devicetype)' + 'integer devicenum' + 'integer(kind=acc_device_kind) devicetype' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.4. + + +File: libgomp.info, Node: acc_get_device_num, Next: acc_get_property, Prev: acc_set_device_num, Up: OpenACC Runtime Library Routines + +5.5 'acc_get_device_num' - Get device number to be used. +======================================================== + +_Description_ + This function returns which device number associated with the + specified device type DEVICETYPE, will be used when executing a + parallel or kernels region. + +_C/C++_: + _Prototype_: 'int acc_get_device_num(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'function acc_get_device_num(devicetype)' + 'integer(kind=acc_device_kind) devicetype' + 'integer acc_get_device_num' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.5. + + +File: libgomp.info, Node: acc_get_property, Next: acc_async_test, Prev: acc_get_device_num, Up: OpenACC Runtime Library Routines + +5.6 'acc_get_property' - Get device property. +============================================= + +_Description_ + These routines return the value of the specified PROPERTY for the + device being queried according to DEVICENUM and DEVICETYPE. + Integer-valued and string-valued properties are returned by + 'acc_get_property' and 'acc_get_property_string' respectively. The + Fortran 'acc_get_property_string' subroutine returns the string + retrieved in its fourth argument while the remaining entry points + are functions, which pass the return value as their result. + + Note for Fortran, only: the OpenACC technical committee corrected + and, hence, modified the interface introduced in OpenACC 2.6. The + kind-value parameter 'acc_device_property' has been renamed to + 'acc_device_property_kind' for consistency and the return type of + the 'acc_get_property' function is now a 'c_size_t' integer instead + of a 'acc_device_property' integer. The parameter + 'acc_device_property' will continue to be provided, but might be + removed in a future version of GCC. + +_C/C++_: + _Prototype_: 'size_t acc_get_property(int devicenum, acc_device_t + devicetype, acc_device_property_t property);' + _Prototype_: 'const char *acc_get_property_string(int devicenum, + acc_device_t devicetype, acc_device_property_t + property);' + +_Fortran_: + _Interface_: 'function acc_get_property(devicenum, devicetype, + property)' + _Interface_: 'subroutine acc_get_property_string(devicenum, + devicetype, property, string)' + 'use ISO_C_Binding, only: c_size_t' + 'integer devicenum' + 'integer(kind=acc_device_kind) devicetype' + 'integer(kind=acc_device_property_kind) property' + 'integer(kind=c_size_t) acc_get_property' + 'character(*) string' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.6. + + +File: libgomp.info, Node: acc_async_test, Next: acc_async_test_all, Prev: acc_get_property, Up: OpenACC Runtime Library Routines + +5.7 'acc_async_test' - Test for completion of a specific asynchronous operation. +================================================================================ + +_Description_ + This function tests for completion of the asynchronous operation + specified in ARG. In C/C++, a non-zero value will be returned to + indicate the specified asynchronous operation has completed. While + Fortran will return a 'true'. If the asynchronous operation has + not completed, C/C++ returns a zero and Fortran returns a 'false'. + +_C/C++_: + _Prototype_: 'int acc_async_test(int arg);' + +_Fortran_: + _Interface_: 'function acc_async_test(arg)' + 'integer(kind=acc_handle_kind) arg' + 'logical acc_async_test' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.9. + + +File: libgomp.info, Node: acc_async_test_all, Next: acc_wait, Prev: acc_async_test, Up: OpenACC Runtime Library Routines + +5.8 'acc_async_test_all' - Tests for completion of all asynchronous operations. +=============================================================================== + +_Description_ + This function tests for completion of all asynchronous operations. + In C/C++, a non-zero value will be returned to indicate all + asynchronous operations have completed. While Fortran will return + a 'true'. If any asynchronous operation has not completed, C/C++ + returns a zero and Fortran returns a 'false'. + +_C/C++_: + _Prototype_: 'int acc_async_test_all(void);' + +_Fortran_: + _Interface_: 'function acc_async_test()' + 'logical acc_get_device_num' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.10. + + +File: libgomp.info, Node: acc_wait, Next: acc_wait_all, Prev: acc_async_test_all, Up: OpenACC Runtime Library Routines + +5.9 'acc_wait' - Wait for completion of a specific asynchronous operation. +========================================================================== + +_Description_ + This function waits for completion of the asynchronous operation + specified in ARG. + +_C/C++_: + _Prototype_: 'acc_wait(arg);' + _Prototype 'acc_async_wait(arg);' + (OpenACC 1.0 + compatibility)_: + +_Fortran_: + _Interface_: 'subroutine acc_wait(arg)' + 'integer(acc_handle_kind) arg' + _Interface 'subroutine acc_async_wait(arg)' + (OpenACC 1.0 + compatibility)_: + 'integer(acc_handle_kind) arg' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.11. + + +File: libgomp.info, Node: acc_wait_all, Next: acc_wait_all_async, Prev: acc_wait, Up: OpenACC Runtime Library Routines + +5.10 'acc_wait_all' - Waits for completion of all asynchronous operations. +========================================================================== + +_Description_ + This function waits for the completion of all asynchronous + operations. + +_C/C++_: + _Prototype_: 'acc_wait_all(void);' + _Prototype 'acc_async_wait_all(void);' + (OpenACC 1.0 + compatibility)_: + +_Fortran_: + _Interface_: 'subroutine acc_wait_all()' + _Interface 'subroutine acc_async_wait_all()' + (OpenACC 1.0 + compatibility)_: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.13. + + +File: libgomp.info, Node: acc_wait_all_async, Next: acc_wait_async, Prev: acc_wait_all, Up: OpenACC Runtime Library Routines + +5.11 'acc_wait_all_async' - Wait for completion of all asynchronous operations. +=============================================================================== + +_Description_ + This function enqueues a wait operation on the queue ASYNC for any + and all asynchronous operations that have been previously enqueued + on any queue. + +_C/C++_: + _Prototype_: 'acc_wait_all_async(int async);' + +_Fortran_: + _Interface_: 'subroutine acc_wait_all_async(async)' + 'integer(acc_handle_kind) async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.14. + + +File: libgomp.info, Node: acc_wait_async, Next: acc_init, Prev: acc_wait_all_async, Up: OpenACC Runtime Library Routines + +5.12 'acc_wait_async' - Wait for completion of asynchronous operations. +======================================================================= + +_Description_ + This function enqueues a wait operation on queue ASYNC for any and + all asynchronous operations enqueued on queue ARG. + +_C/C++_: + _Prototype_: 'acc_wait_async(int arg, int async);' + +_Fortran_: + _Interface_: 'subroutine acc_wait_async(arg, async)' + 'integer(acc_handle_kind) arg, async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.12. + + +File: libgomp.info, Node: acc_init, Next: acc_shutdown, Prev: acc_wait_async, Up: OpenACC Runtime Library Routines + +5.13 'acc_init' - Initialize runtime for a specific device type. +================================================================ + +_Description_ + This function initializes the runtime for the device type specified + in DEVICETYPE. + +_C/C++_: + _Prototype_: 'acc_init(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'subroutine acc_init(devicetype)' + 'integer(acc_device_kind) devicetype' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.7. + + +File: libgomp.info, Node: acc_shutdown, Next: acc_on_device, Prev: acc_init, Up: OpenACC Runtime Library Routines + +5.14 'acc_shutdown' - Shuts down the runtime for a specific device type. +======================================================================== + +_Description_ + This function shuts down the runtime for the device type specified + in DEVICETYPE. + +_C/C++_: + _Prototype_: 'acc_shutdown(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'subroutine acc_shutdown(devicetype)' + 'integer(acc_device_kind) devicetype' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.8. + + +File: libgomp.info, Node: acc_on_device, Next: acc_malloc, Prev: acc_shutdown, Up: OpenACC Runtime Library Routines + +5.15 'acc_on_device' - Whether executing on a particular device +=============================================================== + +_Description_: + This function returns whether the program is executing on a + particular device specified in DEVICETYPE. In C/C++ a non-zero + value is returned to indicate the device is executing on the + specified device type. In Fortran, 'true' will be returned. If + the program is not executing on the specified device type C/C++ + will return a zero, while Fortran will return 'false'. + +_C/C++_: + _Prototype_: 'acc_on_device(acc_device_t devicetype);' + +_Fortran_: + _Interface_: 'function acc_on_device(devicetype)' + 'integer(acc_device_kind) devicetype' + 'logical acc_on_device' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.17. + + +File: libgomp.info, Node: acc_malloc, Next: acc_free, Prev: acc_on_device, Up: OpenACC Runtime Library Routines + +5.16 'acc_malloc' - Allocate device memory. +=========================================== + +_Description_ + This function allocates LEN bytes of device memory. It returns the + device address of the allocated memory. + +_C/C++_: + _Prototype_: 'd_void* acc_malloc(size_t len);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.18. + + +File: libgomp.info, Node: acc_free, Next: acc_copyin, Prev: acc_malloc, Up: OpenACC Runtime Library Routines + +5.17 'acc_free' - Free device memory. +===================================== + +_Description_ + Free previously allocated device memory at the device address 'a'. + +_C/C++_: + _Prototype_: 'acc_free(d_void *a);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.19. + + +File: libgomp.info, Node: acc_copyin, Next: acc_present_or_copyin, Prev: acc_free, Up: OpenACC Runtime Library Routines + +5.18 'acc_copyin' - Allocate device memory and copy host memory to it. +====================================================================== + +_Description_ + In C/C++, this function allocates LEN bytes of device memory and + maps it to the specified host address in A. The device address of + the newly allocated device memory is returned. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'void *acc_copyin(h_void *a, size_t len);' + _Prototype_: 'void *acc_copyin_async(h_void *a, size_t len, int + async);' + +_Fortran_: + _Interface_: 'subroutine acc_copyin(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_copyin(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_copyin_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_copyin_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.20. + + +File: libgomp.info, Node: acc_present_or_copyin, Next: acc_create, Prev: acc_copyin, Up: OpenACC Runtime Library Routines + +5.19 'acc_present_or_copyin' - If the data is not present on the device, allocate device memory and copy from host memory. +========================================================================================================================== + +_Description_ + This function tests if the host data specified by A and of length + LEN is present or not. If it is not present, then device memory + will be allocated and the host memory copied. The device address + of the newly allocated device memory is returned. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + + Note that 'acc_present_or_copyin' and 'acc_pcopyin' exist for + backward compatibility with OpenACC 2.0; use *note acc_copyin:: + instead. + +_C/C++_: + _Prototype_: 'void *acc_present_or_copyin(h_void *a, size_t len);' + _Prototype_: 'void *acc_pcopyin(h_void *a, size_t len);' + +_Fortran_: + _Interface_: 'subroutine acc_present_or_copyin(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_present_or_copyin(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_pcopyin(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_pcopyin(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.20. + + +File: libgomp.info, Node: acc_create, Next: acc_present_or_create, Prev: acc_present_or_copyin, Up: OpenACC Runtime Library Routines + +5.20 'acc_create' - Allocate device memory and map it to host memory. +===================================================================== + +_Description_ + This function allocates device memory and maps it to host memory + specified by the host address A with a length of LEN bytes. In + C/C++, the function returns the device address of the allocated + device memory. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'void *acc_create(h_void *a, size_t len);' + _Prototype_: 'void *acc_create_async(h_void *a, size_t len, int + async);' + +_Fortran_: + _Interface_: 'subroutine acc_create(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_create(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_create_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_create_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.21. + + +File: libgomp.info, Node: acc_present_or_create, Next: acc_copyout, Prev: acc_create, Up: OpenACC Runtime Library Routines + +5.21 'acc_present_or_create' - If the data is not present on the device, allocate device memory and map it to host memory. +========================================================================================================================== + +_Description_ + This function tests if the host data specified by A and of length + LEN is present or not. If it is not present, then device memory + will be allocated and mapped to host memory. In C/C++, the device + address of the newly allocated device memory is returned. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + + Note that 'acc_present_or_create' and 'acc_pcreate' exist for + backward compatibility with OpenACC 2.0; use *note acc_create:: + instead. + +_C/C++_: + _Prototype_: 'void *acc_present_or_create(h_void *a, size_t len)' + _Prototype_: 'void *acc_pcreate(h_void *a, size_t len)' + +_Fortran_: + _Interface_: 'subroutine acc_present_or_create(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_present_or_create(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_pcreate(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_pcreate(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.21. + + +File: libgomp.info, Node: acc_copyout, Next: acc_delete, Prev: acc_present_or_create, Up: OpenACC Runtime Library Routines + +5.22 'acc_copyout' - Copy device memory to host memory. +======================================================= + +_Description_ + This function copies mapped device memory to host memory which is + specified by host address A for a length LEN bytes in C/C++. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'acc_copyout(h_void *a, size_t len);' + _Prototype_: 'acc_copyout_async(h_void *a, size_t len, int async);' + _Prototype_: 'acc_copyout_finalize(h_void *a, size_t len);' + _Prototype_: 'acc_copyout_finalize_async(h_void *a, size_t len, int + async);' + +_Fortran_: + _Interface_: 'subroutine acc_copyout(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_copyout(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_copyout_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_copyout_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_copyout_finalize(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_copyout_finalize(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_copyout_finalize_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_copyout_finalize_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.22. + + +File: libgomp.info, Node: acc_delete, Next: acc_update_device, Prev: acc_copyout, Up: OpenACC Runtime Library Routines + +5.23 'acc_delete' - Free device memory. +======================================= + +_Description_ + This function frees previously allocated device memory specified by + the device address A and the length of LEN bytes. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'acc_delete(h_void *a, size_t len);' + _Prototype_: 'acc_delete_async(h_void *a, size_t len, int async);' + _Prototype_: 'acc_delete_finalize(h_void *a, size_t len);' + _Prototype_: 'acc_delete_finalize_async(h_void *a, size_t len, int + async);' + +_Fortran_: + _Interface_: 'subroutine acc_delete(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_delete(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_delete_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_delete_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_delete_finalize(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_delete_finalize(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_delete_async_finalize(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_delete_async_finalize(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.23. + + +File: libgomp.info, Node: acc_update_device, Next: acc_update_self, Prev: acc_delete, Up: OpenACC Runtime Library Routines + +5.24 'acc_update_device' - Update device memory from mapped host memory. +======================================================================== + +_Description_ + This function updates the device copy from the previously mapped + host memory. The host memory is specified with the host address A + and a length of LEN bytes. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'acc_update_device(h_void *a, size_t len);' + _Prototype_: 'acc_update_device(h_void *a, size_t len, async);' + +_Fortran_: + _Interface_: 'subroutine acc_update_device(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_update_device(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_update_device_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_update_device_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.24. + + +File: libgomp.info, Node: acc_update_self, Next: acc_map_data, Prev: acc_update_device, Up: OpenACC Runtime Library Routines + +5.25 'acc_update_self' - Update host memory from mapped device memory. +====================================================================== + +_Description_ + This function updates the host copy from the previously mapped + device memory. The host memory is specified with the host address + A and a length of LEN bytes. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + +_C/C++_: + _Prototype_: 'acc_update_self(h_void *a, size_t len);' + _Prototype_: 'acc_update_self_async(h_void *a, size_t len, int + async);' + +_Fortran_: + _Interface_: 'subroutine acc_update_self(a)' + 'type, dimension(:[,:]...) :: a' + _Interface_: 'subroutine acc_update_self(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + _Interface_: 'subroutine acc_update_self_async(a, async)' + 'type, dimension(:[,:]...) :: a' + 'integer(acc_handle_kind) :: async' + _Interface_: 'subroutine acc_update_self_async(a, len, async)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'integer(acc_handle_kind) :: async' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.25. + + +File: libgomp.info, Node: acc_map_data, Next: acc_unmap_data, Prev: acc_update_self, Up: OpenACC Runtime Library Routines + +5.26 'acc_map_data' - Map previously allocated device memory to host memory. +============================================================================ + +_Description_ + This function maps previously allocated device and host memory. + The device memory is specified with the device address D. The host + memory is specified with the host address H and a length of LEN. + +_C/C++_: + _Prototype_: 'acc_map_data(h_void *h, d_void *d, size_t len);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.26. + + +File: libgomp.info, Node: acc_unmap_data, Next: acc_deviceptr, Prev: acc_map_data, Up: OpenACC Runtime Library Routines + +5.27 'acc_unmap_data' - Unmap device memory from host memory. +============================================================= + +_Description_ + This function unmaps previously mapped device and host memory. The + latter specified by H. + +_C/C++_: + _Prototype_: 'acc_unmap_data(h_void *h);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.27. + + +File: libgomp.info, Node: acc_deviceptr, Next: acc_hostptr, Prev: acc_unmap_data, Up: OpenACC Runtime Library Routines + +5.28 'acc_deviceptr' - Get device pointer associated with specific host address. +================================================================================ + +_Description_ + This function returns the device address that has been mapped to + the host address specified by H. + +_C/C++_: + _Prototype_: 'void *acc_deviceptr(h_void *h);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.28. + + +File: libgomp.info, Node: acc_hostptr, Next: acc_is_present, Prev: acc_deviceptr, Up: OpenACC Runtime Library Routines + +5.29 'acc_hostptr' - Get host pointer associated with specific device address. +============================================================================== + +_Description_ + This function returns the host address that has been mapped to the + device address specified by D. + +_C/C++_: + _Prototype_: 'void *acc_hostptr(d_void *d);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.29. + + +File: libgomp.info, Node: acc_is_present, Next: acc_memcpy_to_device, Prev: acc_hostptr, Up: OpenACC Runtime Library Routines + +5.30 'acc_is_present' - Indicate whether host variable / array is present on device. +==================================================================================== + +_Description_ + This function indicates whether the specified host address in A and + a length of LEN bytes is present on the device. In C/C++, a + non-zero value is returned to indicate the presence of the mapped + memory on the device. A zero is returned to indicate the memory is + not mapped on the device. + + In Fortran, two (2) forms are supported. In the first form, A + specifies a contiguous array section. The second form A specifies + a variable or array element and LEN specifies the length in bytes. + If the host memory is mapped to device memory, then a 'true' is + returned. Otherwise, a 'false' is return to indicate the mapped + memory is not present. + +_C/C++_: + _Prototype_: 'int acc_is_present(h_void *a, size_t len);' + +_Fortran_: + _Interface_: 'function acc_is_present(a)' + 'type, dimension(:[,:]...) :: a' + 'logical acc_is_present' + _Interface_: 'function acc_is_present(a, len)' + 'type, dimension(:[,:]...) :: a' + 'integer len' + 'logical acc_is_present' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.30. + + +File: libgomp.info, Node: acc_memcpy_to_device, Next: acc_memcpy_from_device, Prev: acc_is_present, Up: OpenACC Runtime Library Routines + +5.31 'acc_memcpy_to_device' - Copy host memory to device memory. +================================================================ + +_Description_ + This function copies host memory specified by host address of SRC + to device memory specified by the device address DEST for a length + of BYTES bytes. + +_C/C++_: + _Prototype_: 'acc_memcpy_to_device(d_void *dest, h_void *src, size_t + bytes);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.31. + + +File: libgomp.info, Node: acc_memcpy_from_device, Next: acc_attach, Prev: acc_memcpy_to_device, Up: OpenACC Runtime Library Routines + +5.32 'acc_memcpy_from_device' - Copy device memory to host memory. +================================================================== + +_Description_ + This function copies host memory specified by host address of SRC + from device memory specified by the device address DEST for a + length of BYTES bytes. + +_C/C++_: + _Prototype_: 'acc_memcpy_from_device(d_void *dest, h_void *src, + size_t bytes);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.32. + + +File: libgomp.info, Node: acc_attach, Next: acc_detach, Prev: acc_memcpy_from_device, Up: OpenACC Runtime Library Routines + +5.33 'acc_attach' - Let device pointer point to device-pointer target. +====================================================================== + +_Description_ + This function updates a pointer on the device from pointing to a + host-pointer address to pointing to the corresponding device data. + +_C/C++_: + _Prototype_: 'acc_attach(h_void **ptr);' + _Prototype_: 'acc_attach_async(h_void **ptr, int async);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.34. + + +File: libgomp.info, Node: acc_detach, Next: acc_get_current_cuda_device, Prev: acc_attach, Up: OpenACC Runtime Library Routines + +5.34 'acc_detach' - Let device pointer point to host-pointer target. +==================================================================== + +_Description_ + This function updates a pointer on the device from pointing to a + device-pointer address to pointing to the corresponding host data. + +_C/C++_: + _Prototype_: 'acc_detach(h_void **ptr);' + _Prototype_: 'acc_detach_async(h_void **ptr, int async);' + _Prototype_: 'acc_detach_finalize(h_void **ptr);' + _Prototype_: 'acc_detach_finalize_async(h_void **ptr, int async);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + 3.2.35. + + +File: libgomp.info, Node: acc_get_current_cuda_device, Next: acc_get_current_cuda_context, Prev: acc_detach, Up: OpenACC Runtime Library Routines + +5.35 'acc_get_current_cuda_device' - Get CUDA device handle. +============================================================ + +_Description_ + This function returns the CUDA device handle. This handle is the + same as used by the CUDA Runtime or Driver API's. + +_C/C++_: + _Prototype_: 'void *acc_get_current_cuda_device(void);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + A.2.1.1. + + +File: libgomp.info, Node: acc_get_current_cuda_context, Next: acc_get_cuda_stream, Prev: acc_get_current_cuda_device, Up: OpenACC Runtime Library Routines + +5.36 'acc_get_current_cuda_context' - Get CUDA context handle. +============================================================== + +_Description_ + This function returns the CUDA context handle. This handle is the + same as used by the CUDA Runtime or Driver API's. + +_C/C++_: + _Prototype_: 'void *acc_get_current_cuda_context(void);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + A.2.1.2. + + +File: libgomp.info, Node: acc_get_cuda_stream, Next: acc_set_cuda_stream, Prev: acc_get_current_cuda_context, Up: OpenACC Runtime Library Routines + +5.37 'acc_get_cuda_stream' - Get CUDA stream handle. +==================================================== + +_Description_ + This function returns the CUDA stream handle for the queue ASYNC. + This handle is the same as used by the CUDA Runtime or Driver + API's. + +_C/C++_: + _Prototype_: 'void *acc_get_cuda_stream(int async);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + A.2.1.3. + + +File: libgomp.info, Node: acc_set_cuda_stream, Next: acc_prof_register, Prev: acc_get_cuda_stream, Up: OpenACC Runtime Library Routines + +5.38 'acc_set_cuda_stream' - Set CUDA stream handle. +==================================================== + +_Description_ + This function associates the stream handle specified by STREAM with + the queue ASYNC. + + This cannot be used to change the stream handle associated with + 'acc_async_sync'. + + The return value is not specified. + +_C/C++_: + _Prototype_: 'int acc_set_cuda_stream(int async, void *stream);' + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section + A.2.1.4. + + +File: libgomp.info, Node: acc_prof_register, Next: acc_prof_unregister, Prev: acc_set_cuda_stream, Up: OpenACC Runtime Library Routines + +5.39 'acc_prof_register' - Register callbacks. +============================================== + +_Description_: + This function registers callbacks. + +_C/C++_: + _Prototype_: 'void acc_prof_register (acc_event_t, acc_prof_callback, + acc_register_t);' + +_See also_: + *note OpenACC Profiling Interface:: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 5.3. + + +File: libgomp.info, Node: acc_prof_unregister, Next: acc_prof_lookup, Prev: acc_prof_register, Up: OpenACC Runtime Library Routines + +5.40 'acc_prof_unregister' - Unregister callbacks. +================================================== + +_Description_: + This function unregisters callbacks. + +_C/C++_: + _Prototype_: 'void acc_prof_unregister (acc_event_t, + acc_prof_callback, acc_register_t);' + +_See also_: + *note OpenACC Profiling Interface:: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 5.3. + + +File: libgomp.info, Node: acc_prof_lookup, Next: acc_register_library, Prev: acc_prof_unregister, Up: OpenACC Runtime Library Routines + +5.41 'acc_prof_lookup' - Obtain inquiry functions. +================================================== + +_Description_: + Function to obtain inquiry functions. + +_C/C++_: + _Prototype_: 'acc_query_fn acc_prof_lookup (const char *);' + +_See also_: + *note OpenACC Profiling Interface:: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 5.3. + + +File: libgomp.info, Node: acc_register_library, Prev: acc_prof_lookup, Up: OpenACC Runtime Library Routines + +5.42 'acc_register_library' - Library registration. +=================================================== + +_Description_: + Function for library registration. + +_C/C++_: + _Prototype_: 'void acc_register_library (acc_prof_reg, acc_prof_reg, + acc_prof_lookup_func);' + +_See also_: + *note OpenACC Profiling Interface::, *note ACC_PROFLIB:: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 5.3. + + +File: libgomp.info, Node: OpenACC Environment Variables, Next: CUDA Streams Usage, Prev: OpenACC Runtime Library Routines, Up: Top + +6 OpenACC Environment Variables +******************************* + +The variables 'ACC_DEVICE_TYPE' and 'ACC_DEVICE_NUM' are defined by +section 4 of the OpenACC specification in version 2.0. The variable +'ACC_PROFLIB' is defined by section 4 of the OpenACC specification in +version 2.6. The variable 'GCC_ACC_NOTIFY' is used for diagnostic +purposes. + +* Menu: + +* ACC_DEVICE_TYPE:: +* ACC_DEVICE_NUM:: +* ACC_PROFLIB:: +* GCC_ACC_NOTIFY:: + + +File: libgomp.info, Node: ACC_DEVICE_TYPE, Next: ACC_DEVICE_NUM, Up: OpenACC Environment Variables + +6.1 'ACC_DEVICE_TYPE' +===================== + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 4.1. + + +File: libgomp.info, Node: ACC_DEVICE_NUM, Next: ACC_PROFLIB, Prev: ACC_DEVICE_TYPE, Up: OpenACC Environment Variables + +6.2 'ACC_DEVICE_NUM' +==================== + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 4.2. + + +File: libgomp.info, Node: ACC_PROFLIB, Next: GCC_ACC_NOTIFY, Prev: ACC_DEVICE_NUM, Up: OpenACC Environment Variables + +6.3 'ACC_PROFLIB' +================= + +_See also_: + *note acc_register_library::, *note OpenACC Profiling Interface:: + +_Reference_: + OpenACC specification v2.6 (https://www.openacc.org), section 4.3. + + +File: libgomp.info, Node: GCC_ACC_NOTIFY, Prev: ACC_PROFLIB, Up: OpenACC Environment Variables + +6.4 'GCC_ACC_NOTIFY' +==================== + +_Description_: + Print debug information pertaining to the accelerator. + + +File: libgomp.info, Node: CUDA Streams Usage, Next: OpenACC Library Interoperability, Prev: OpenACC Environment Variables, Up: Top + +7 CUDA Streams Usage +******************** + +This applies to the 'nvptx' plugin only. + + The library provides elements that perform asynchronous movement of +data and asynchronous operation of computing constructs. This +asynchronous functionality is implemented by making use of CUDA +streams(1). + + The primary means by that the asynchronous functionality is accessed +is through the use of those OpenACC directives which make use of the +'async' and 'wait' clauses. When the 'async' clause is first used with +a directive, it creates a CUDA stream. If an 'async-argument' is used +with the 'async' clause, then the stream is associated with the +specified 'async-argument'. + + Following the creation of an association between a CUDA stream and +the 'async-argument' of an 'async' clause, both the 'wait' clause and +the 'wait' directive can be used. When either the clause or directive +is used after stream creation, it creates a rendezvous point whereby +execution waits until all operations associated with the +'async-argument', that is, stream, have completed. + + Normally, the management of the streams that are created as a result +of using the 'async' clause, is done without any intervention by the +caller. This implies the association between the 'async-argument' and +the CUDA stream will be maintained for the lifetime of the program. +However, this association can be changed through the use of the library +function 'acc_set_cuda_stream'. When the function 'acc_set_cuda_stream' +is called, the CUDA stream that was originally associated with the +'async' clause will be destroyed. Caution should be taken when changing +the association as subsequent references to the 'async-argument' refer +to a different CUDA stream. + + ---------- Footnotes ---------- + + (1) See "Stream Management" in "CUDA Driver API", TRM-06703-001, +Version 5.5, for additional information + + +File: libgomp.info, Node: OpenACC Library Interoperability, Next: OpenACC Profiling Interface, Prev: CUDA Streams Usage, Up: Top + +8 OpenACC Library Interoperability +********************************** + +8.1 Introduction +================ + +The OpenACC library uses the CUDA Driver API, and may interact with +programs that use the Runtime library directly, or another library based +on the Runtime library, e.g., CUBLAS(1). This chapter describes the use +cases and what changes are required in order to use both the OpenACC +library and the CUBLAS and Runtime libraries within a program. + +8.2 First invocation: NVIDIA CUBLAS library API +=============================================== + +In this first use case (see below), a function in the CUBLAS library is +called prior to any of the functions in the OpenACC library. More +specifically, the function 'cublasCreate()'. + + When invoked, the function initializes the library and allocates the +hardware resources on the host and the device on behalf of the caller. +Once the initialization and allocation has completed, a handle is +returned to the caller. The OpenACC library also requires +initialization and allocation of hardware resources. Since the CUBLAS +library has already allocated the hardware resources for the device, all +that is left to do is to initialize the OpenACC library and acquire the +hardware resources on the host. + + Prior to calling the OpenACC function that initializes the library +and allocate the host hardware resources, you need to acquire the device +number that was allocated during the call to 'cublasCreate()'. The +invoking of the runtime library function 'cudaGetDevice()' accomplishes +this. Once acquired, the device number is passed along with the device +type as parameters to the OpenACC library function +'acc_set_device_num()'. + + Once the call to 'acc_set_device_num()' has completed, the OpenACC +library uses the context that was created during the call to +'cublasCreate()'. In other words, both libraries will be sharing the +same context. + + /* Create the handle */ + s = cublasCreate(&h); + if (s != CUBLAS_STATUS_SUCCESS) + { + fprintf(stderr, "cublasCreate failed %d\n", s); + exit(EXIT_FAILURE); + } + + /* Get the device number */ + e = cudaGetDevice(&dev); + if (e != cudaSuccess) + { + fprintf(stderr, "cudaGetDevice failed %d\n", e); + exit(EXIT_FAILURE); + } + + /* Initialize OpenACC library and use device 'dev' */ + acc_set_device_num(dev, acc_device_nvidia); + + Use Case 1 + +8.3 First invocation: OpenACC library API +========================================= + +In this second use case (see below), a function in the OpenACC library +is called prior to any of the functions in the CUBLAS library. More +specificially, the function 'acc_set_device_num()'. + + In the use case presented here, the function 'acc_set_device_num()' +is used to both initialize the OpenACC library and allocate the hardware +resources on the host and the device. In the call to the function, the +call parameters specify which device to use and what device type to use, +i.e., 'acc_device_nvidia'. It should be noted that this is but one +method to initialize the OpenACC library and allocate the appropriate +hardware resources. Other methods are available through the use of +environment variables and these will be discussed in the next section. + + Once the call to 'acc_set_device_num()' has completed, other OpenACC +functions can be called as seen with multiple calls being made to +'acc_copyin()'. In addition, calls can be made to functions in the +CUBLAS library. In the use case a call to 'cublasCreate()' is made +subsequent to the calls to 'acc_copyin()'. As seen in the previous use +case, a call to 'cublasCreate()' initializes the CUBLAS library and +allocates the hardware resources on the host and the device. However, +since the device has already been allocated, 'cublasCreate()' will only +initialize the CUBLAS library and allocate the appropriate hardware +resources on the host. The context that was created as part of the +OpenACC initialization is shared with the CUBLAS library, similarly to +the first use case. + + dev = 0; + + acc_set_device_num(dev, acc_device_nvidia); + + /* Copy the first set to the device */ + d_X = acc_copyin(&h_X[0], N * sizeof (float)); + if (d_X == NULL) + { + fprintf(stderr, "copyin error h_X\n"); + exit(EXIT_FAILURE); + } + + /* Copy the second set to the device */ + d_Y = acc_copyin(&h_Y1[0], N * sizeof (float)); + if (d_Y == NULL) + { + fprintf(stderr, "copyin error h_Y1\n"); + exit(EXIT_FAILURE); + } + + /* Create the handle */ + s = cublasCreate(&h); + if (s != CUBLAS_STATUS_SUCCESS) + { + fprintf(stderr, "cublasCreate failed %d\n", s); + exit(EXIT_FAILURE); + } + + /* Perform saxpy using CUBLAS library function */ + s = cublasSaxpy(h, N, &alpha, d_X, 1, d_Y, 1); + if (s != CUBLAS_STATUS_SUCCESS) + { + fprintf(stderr, "cublasSaxpy failed %d\n", s); + exit(EXIT_FAILURE); + } + + /* Copy the results from the device */ + acc_memcpy_from_device(&h_Y1[0], d_Y, N * sizeof (float)); + + Use Case 2 + +8.4 OpenACC library and environment variables +============================================= + +There are two environment variables associated with the OpenACC library +that may be used to control the device type and device number: +'ACC_DEVICE_TYPE' and 'ACC_DEVICE_NUM', respectively. These two +environment variables can be used as an alternative to calling +'acc_set_device_num()'. As seen in the second use case, the device type +and device number were specified using 'acc_set_device_num()'. If +however, the aforementioned environment variables were set, then the +call to 'acc_set_device_num()' would not be required. + + The use of the environment variables is only relevant when an OpenACC +function is called prior to a call to 'cudaCreate()'. If 'cudaCreate()' +is called prior to a call to an OpenACC function, then you must call +'acc_set_device_num()'(2) + + ---------- Footnotes ---------- + + (1) See section 2.26, "Interactions with the CUDA Driver API" in +"CUDA Runtime API", Version 5.5, and section 2.27, "VDPAU +Interoperability", in "CUDA Driver API", TRM-06703-001, Version 5.5, for +additional information on library interoperability. + + (2) More complete information about 'ACC_DEVICE_TYPE' and +'ACC_DEVICE_NUM' can be found in sections 4.1 and 4.2 of the OpenACC +(https://www.openacc.org) Application Programming Interface”, Version +2.6. + + +File: libgomp.info, Node: OpenACC Profiling Interface, Next: The libgomp ABI, Prev: OpenACC Library Interoperability, Up: Top + +9 OpenACC Profiling Interface +***************************** + +9.1 Implementation Status and Implementation-Defined Behavior +============================================================= + +We're implementing the OpenACC Profiling Interface as defined by the +OpenACC 2.6 specification. We're clarifying some aspects here as +_implementation-defined behavior_, while they're still under discussion +within the OpenACC Technical Committee. + + This implementation is tuned to keep the performance impact as low as +possible for the (very common) case that the Profiling Interface is not +enabled. This is relevant, as the Profiling Interface affects all the +_hot_ code paths (in the target code, not in the offloaded code). Users +of the OpenACC Profiling Interface can be expected to understand that +performance will be impacted to some degree once the Profiling Interface +has gotten enabled: for example, because of the _runtime_ (libgomp) +calling into a third-party _library_ for every event that has been +registered. + + We're not yet accounting for the fact that 'OpenACC events may occur +during event processing'. We just handle one case specially, as +required by CUDA 9.0 'nvprof', that 'acc_get_device_type' (*note +acc_get_device_type::)) may be called from 'acc_ev_device_init_start', +'acc_ev_device_init_end' callbacks. + + We're not yet implementing initialization via a +'acc_register_library' function that is either statically linked in, or +dynamically via 'LD_PRELOAD'. Initialization via 'acc_register_library' +functions dynamically loaded via the 'ACC_PROFLIB' environment variable +does work, as does directly calling 'acc_prof_register', +'acc_prof_unregister', 'acc_prof_lookup'. + + As currently there are no inquiry functions defined, calls to +'acc_prof_lookup' will always return 'NULL'. + + There aren't separate _start_, _stop_ events defined for the event +types 'acc_ev_create', 'acc_ev_delete', 'acc_ev_alloc', 'acc_ev_free'. +It's not clear if these should be triggered before or after the actual +device-specific call is made. We trigger them after. + + Remarks about data provided to callbacks: + +'acc_prof_info.event_type' + It's not clear if for _nested_ event callbacks (for example, + 'acc_ev_enqueue_launch_start' as part of a parent compute + construct), this should be set for the nested event + ('acc_ev_enqueue_launch_start'), or if the value of the parent + construct should remain ('acc_ev_compute_construct_start'). In + this implementation, the value will generally correspond to the + innermost nested event type. + +'acc_prof_info.device_type' + + * For 'acc_ev_compute_construct_start', and in presence of an + 'if' clause with _false_ argument, this will still refer to + the offloading device type. It's not clear if that's the + expected behavior. + + * Complementary to the item before, for + 'acc_ev_compute_construct_end', this is set to + 'acc_device_host' in presence of an 'if' clause with _false_ + argument. It's not clear if that's the expected behavior. + +'acc_prof_info.thread_id' + Always '-1'; not yet implemented. + +'acc_prof_info.async' + + * Not yet implemented correctly for + 'acc_ev_compute_construct_start'. + + * In a compute construct, for host-fallback + execution/'acc_device_host' it will always be + 'acc_async_sync'. It's not clear if that's the expected + behavior. + + * For 'acc_ev_device_init_start' and 'acc_ev_device_init_end', + it will always be 'acc_async_sync'. It's not clear if that's + the expected behavior. + +'acc_prof_info.async_queue' + There is no 'limited number of asynchronous queues' in libgomp. + This will always have the same value as 'acc_prof_info.async'. + +'acc_prof_info.src_file' + Always 'NULL'; not yet implemented. + +'acc_prof_info.func_name' + Always 'NULL'; not yet implemented. + +'acc_prof_info.line_no' + Always '-1'; not yet implemented. + +'acc_prof_info.end_line_no' + Always '-1'; not yet implemented. + +'acc_prof_info.func_line_no' + Always '-1'; not yet implemented. + +'acc_prof_info.func_end_line_no' + Always '-1'; not yet implemented. + +'acc_event_info.event_type', 'acc_event_info.*.event_type' + Relating to 'acc_prof_info.event_type' discussed above, in this + implementation, this will always be the same value as + 'acc_prof_info.event_type'. + +'acc_event_info.*.parent_construct' + + * Will be 'acc_construct_parallel' for all OpenACC compute + constructs as well as many OpenACC Runtime API calls; should + be the one matching the actual construct, or + 'acc_construct_runtime_api', respectively. + + * Will be 'acc_construct_enter_data' or + 'acc_construct_exit_data' when processing variable mappings + specified in OpenACC _declare_ directives; should be + 'acc_construct_declare'. + + * For implicit 'acc_ev_device_init_start', + 'acc_ev_device_init_end', and explicit as well as implicit + 'acc_ev_alloc', 'acc_ev_free', 'acc_ev_enqueue_upload_start', + 'acc_ev_enqueue_upload_end', 'acc_ev_enqueue_download_start', + and 'acc_ev_enqueue_download_end', will be + 'acc_construct_parallel'; should reflect the real parent + construct. + +'acc_event_info.*.implicit' + For 'acc_ev_alloc', 'acc_ev_free', 'acc_ev_enqueue_upload_start', + 'acc_ev_enqueue_upload_end', 'acc_ev_enqueue_download_start', and + 'acc_ev_enqueue_download_end', this currently will be '1' also for + explicit usage. + +'acc_event_info.data_event.var_name' + Always 'NULL'; not yet implemented. + +'acc_event_info.data_event.host_ptr' + For 'acc_ev_alloc', and 'acc_ev_free', this is always 'NULL'. + +'typedef union acc_api_info' + ... as printed in '5.2.3. Third Argument: API-Specific + Information'. This should obviously be 'typedef _struct_ + acc_api_info'. + +'acc_api_info.device_api' + Possibly not yet implemented correctly for + 'acc_ev_compute_construct_start', 'acc_ev_device_init_start', + 'acc_ev_device_init_end': will always be 'acc_device_api_none' for + these event types. For 'acc_ev_enter_data_start', it will be + 'acc_device_api_none' in some cases. + +'acc_api_info.device_type' + Always the same as 'acc_prof_info.device_type'. + +'acc_api_info.vendor' + Always '-1'; not yet implemented. + +'acc_api_info.device_handle' + Always 'NULL'; not yet implemented. + +'acc_api_info.context_handle' + Always 'NULL'; not yet implemented. + +'acc_api_info.async_handle' + Always 'NULL'; not yet implemented. + + Remarks about certain event types: + +'acc_ev_device_init_start', 'acc_ev_device_init_end' + + * Whan a compute construct triggers implicit + 'acc_ev_device_init_start' and 'acc_ev_device_init_end' + events, they currently aren't _nested within_ the + corresponding 'acc_ev_compute_construct_start' and + 'acc_ev_compute_construct_end', but they're currently observed + _before_ 'acc_ev_compute_construct_start'. It's not clear + what to do: the standard asks us provide a lot of details to + the 'acc_ev_compute_construct_start' callback, without + (implicitly) initializing a device before? + + * Callbacks for these event types will not be invoked for calls + to the 'acc_set_device_type' and 'acc_set_device_num' + functions. It's not clear if they should be. + +'acc_ev_enter_data_start', 'acc_ev_enter_data_end', 'acc_ev_exit_data_start', 'acc_ev_exit_data_end' + + * Callbacks for these event types will also be invoked for + OpenACC _host_data_ constructs. It's not clear if they should + be. + + * Callbacks for these event types will also be invoked when + processing variable mappings specified in OpenACC _declare_ + directives. It's not clear if they should be. + + Callbacks for the following event types will be invoked, but dispatch +and information provided therein has not yet been thoroughly reviewed: + + * 'acc_ev_alloc' + * 'acc_ev_free' + * 'acc_ev_update_start', 'acc_ev_update_end' + * 'acc_ev_enqueue_upload_start', 'acc_ev_enqueue_upload_end' + * 'acc_ev_enqueue_download_start', 'acc_ev_enqueue_download_end' + + During device initialization, and finalization, respectively, +callbacks for the following event types will not yet be invoked: + + * 'acc_ev_alloc' + * 'acc_ev_free' + + Callbacks for the following event types have not yet been +implemented, so currently won't be invoked: + + * 'acc_ev_device_shutdown_start', 'acc_ev_device_shutdown_end' + * 'acc_ev_runtime_shutdown' + * 'acc_ev_create', 'acc_ev_delete' + * 'acc_ev_wait_start', 'acc_ev_wait_end' + + For the following runtime library functions, not all expected +callbacks will be invoked (mostly concerning implicit device +initialization): + + * 'acc_get_num_devices' + * 'acc_set_device_type' + * 'acc_get_device_type' + * 'acc_set_device_num' + * 'acc_get_device_num' + * 'acc_init' + * 'acc_shutdown' + + Aside from implicit device initialization, for the following runtime +library functions, no callbacks will be invoked for shared-memory +offloading devices (it's not clear if they should be): + + * 'acc_malloc' + * 'acc_free' + * 'acc_copyin', 'acc_present_or_copyin', 'acc_copyin_async' + * 'acc_create', 'acc_present_or_create', 'acc_create_async' + * 'acc_copyout', 'acc_copyout_async', 'acc_copyout_finalize', + 'acc_copyout_finalize_async' + * 'acc_delete', 'acc_delete_async', 'acc_delete_finalize', + 'acc_delete_finalize_async' + * 'acc_update_device', 'acc_update_device_async' + * 'acc_update_self', 'acc_update_self_async' + * 'acc_map_data', 'acc_unmap_data' + * 'acc_memcpy_to_device', 'acc_memcpy_to_device_async' + * 'acc_memcpy_from_device', 'acc_memcpy_from_device_async' + + +File: libgomp.info, Node: The libgomp ABI, Next: Reporting Bugs, Prev: OpenACC Profiling Interface, Up: Top + +10 The libgomp ABI +****************** + +The following sections present notes on the external ABI as presented by +libgomp. Only maintainers should need them. + +* Menu: + +* Implementing MASTER construct:: +* Implementing CRITICAL construct:: +* Implementing ATOMIC construct:: +* Implementing FLUSH construct:: +* Implementing BARRIER construct:: +* Implementing THREADPRIVATE construct:: +* Implementing PRIVATE clause:: +* Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses:: +* Implementing REDUCTION clause:: +* Implementing PARALLEL construct:: +* Implementing FOR construct:: +* Implementing ORDERED construct:: +* Implementing SECTIONS construct:: +* Implementing SINGLE construct:: +* Implementing OpenACC's PARALLEL construct:: + + +File: libgomp.info, Node: Implementing MASTER construct, Next: Implementing CRITICAL construct, Up: The libgomp ABI + +10.1 Implementing MASTER construct +================================== + + if (omp_get_thread_num () == 0) + block + + Alternately, we generate two copies of the parallel subfunction and +only include this in the version run by the master thread. Surely this +is not worthwhile though... + + +File: libgomp.info, Node: Implementing CRITICAL construct, Next: Implementing ATOMIC construct, Prev: Implementing MASTER construct, Up: The libgomp ABI + +10.2 Implementing CRITICAL construct +==================================== + +Without a specified name, + + void GOMP_critical_start (void); + void GOMP_critical_end (void); + + so that we don't get COPY relocations from libgomp to the main +application. + + With a specified name, use omp_set_lock and omp_unset_lock with name +being transformed into a variable declared like + + omp_lock_t gomp_critical_user_ __attribute__((common)) + + Ideally the ABI would specify that all zero is a valid unlocked +state, and so we wouldn't need to initialize this at startup. + + +File: libgomp.info, Node: Implementing ATOMIC construct, Next: Implementing FLUSH construct, Prev: Implementing CRITICAL construct, Up: The libgomp ABI + +10.3 Implementing ATOMIC construct +================================== + +The target should implement the '__sync' builtins. + + Failing that we could add + + void GOMP_atomic_enter (void) + void GOMP_atomic_exit (void) + + which reuses the regular lock code, but with yet another lock object +private to the library. + + +File: libgomp.info, Node: Implementing FLUSH construct, Next: Implementing BARRIER construct, Prev: Implementing ATOMIC construct, Up: The libgomp ABI + +10.4 Implementing FLUSH construct +================================= + +Expands to the '__sync_synchronize' builtin. + + +File: libgomp.info, Node: Implementing BARRIER construct, Next: Implementing THREADPRIVATE construct, Prev: Implementing FLUSH construct, Up: The libgomp ABI + +10.5 Implementing BARRIER construct +=================================== + + void GOMP_barrier (void) + + +File: libgomp.info, Node: Implementing THREADPRIVATE construct, Next: Implementing PRIVATE clause, Prev: Implementing BARRIER construct, Up: The libgomp ABI + +10.6 Implementing THREADPRIVATE construct +========================================= + +In _most_ cases we can map this directly to '__thread'. Except that OMP +allows constructors for C++ objects. We can either refuse to support +this (how often is it used?) or we can implement something akin to +.ctors. + + Even more ideally, this ctor feature is handled by extensions to the +main pthreads library. Failing that, we can have a set of entry points +to register ctor functions to be called. + + +File: libgomp.info, Node: Implementing PRIVATE clause, Next: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Prev: Implementing THREADPRIVATE construct, Up: The libgomp ABI + +10.7 Implementing PRIVATE clause +================================ + +In association with a PARALLEL, or within the lexical extent of a +PARALLEL block, the variable becomes a local variable in the parallel +subfunction. + + In association with FOR or SECTIONS blocks, create a new automatic +variable within the current function. This preserves the semantic of +new variable creation. + + +File: libgomp.info, Node: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Next: Implementing REDUCTION clause, Prev: Implementing PRIVATE clause, Up: The libgomp ABI + +10.8 Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses +========================================================================= + +This seems simple enough for PARALLEL blocks. Create a private struct +for communicating between the parent and subfunction. In the parent, +copy in values for scalar and "small" structs; copy in addresses for +others TREE_ADDRESSABLE types. In the subfunction, copy the value into +the local variable. + + It is not clear what to do with bare FOR or SECTION blocks. The only +thing I can figure is that we do something like: + + #pragma omp for firstprivate(x) lastprivate(y) + for (int i = 0; i < n; ++i) + body; + + which becomes + + { + int x = x, y; + + // for stuff + + if (i == n) + y = y; + } + + where the "x=x" and "y=y" assignments actually have different uids +for the two variables, i.e. not something you could write directly in +C. Presumably this only makes sense if the "outer" x and y are global +variables. + + COPYPRIVATE would work the same way, except the structure broadcast +would have to happen via SINGLE machinery instead. + + +File: libgomp.info, Node: Implementing REDUCTION clause, Next: Implementing PARALLEL construct, Prev: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses, Up: The libgomp ABI + +10.9 Implementing REDUCTION clause +================================== + +The private struct mentioned in the previous section should have a +pointer to an array of the type of the variable, indexed by the thread's +TEAM_ID. The thread stores its final value into the array, and after +the barrier, the master thread iterates over the array to collect the +values. + + +File: libgomp.info, Node: Implementing PARALLEL construct, Next: Implementing FOR construct, Prev: Implementing REDUCTION clause, Up: The libgomp ABI + +10.10 Implementing PARALLEL construct +===================================== + + #pragma omp parallel + { + body; + } + + becomes + + void subfunction (void *data) + { + use data; + body; + } + + setup data; + GOMP_parallel_start (subfunction, &data, num_threads); + subfunction (&data); + GOMP_parallel_end (); + + void GOMP_parallel_start (void (*fn)(void *), void *data, unsigned num_threads) + + The FN argument is the subfunction to be run in parallel. + + The DATA argument is a pointer to a structure used to communicate +data in and out of the subfunction, as discussed above with respect to +FIRSTPRIVATE et al. + + The NUM_THREADS argument is 1 if an IF clause is present and false, +or the value of the NUM_THREADS clause, if present, or 0. + + The function needs to create the appropriate number of threads and/or +launch them from the dock. It needs to create the team structure and +assign team ids. + + void GOMP_parallel_end (void) + + Tears down the team and returns us to the previous +'omp_in_parallel()' state. + + +File: libgomp.info, Node: Implementing FOR construct, Next: Implementing ORDERED construct, Prev: Implementing PARALLEL construct, Up: The libgomp ABI + +10.11 Implementing FOR construct +================================ + + #pragma omp parallel for + for (i = lb; i <= ub; i++) + body; + + becomes + + void subfunction (void *data) + { + long _s0, _e0; + while (GOMP_loop_static_next (&_s0, &_e0)) + { + long _e1 = _e0, i; + for (i = _s0; i < _e1; i++) + body; + } + GOMP_loop_end_nowait (); + } + + GOMP_parallel_loop_static (subfunction, NULL, 0, lb, ub+1, 1, 0); + subfunction (NULL); + GOMP_parallel_end (); + + #pragma omp for schedule(runtime) + for (i = 0; i < n; i++) + body; + + becomes + + { + long i, _s0, _e0; + if (GOMP_loop_runtime_start (0, n, 1, &_s0, &_e0)) + do { + long _e1 = _e0; + for (i = _s0, i < _e0; i++) + body; + } while (GOMP_loop_runtime_next (&_s0, _&e0)); + GOMP_loop_end (); + } + + Note that while it looks like there is trickiness to propagating a +non-constant STEP, there isn't really. We're explicitly allowed to +evaluate it as many times as we want, and any variables involved should +automatically be handled as PRIVATE or SHARED like any other variables. +So the expression should remain evaluable in the subfunction. We can +also pull it into a local variable if we like, but since its supposed to +remain unchanged, we can also not if we like. + + If we have SCHEDULE(STATIC), and no ORDERED, then we ought to be able +to get away with no work-sharing context at all, since we can simply +perform the arithmetic directly in each thread to divide up the +iterations. Which would mean that we wouldn't need to call any of these +routines. + + There are separate routines for handling loops with an ORDERED +clause. Bookkeeping for that is non-trivial... + + +File: libgomp.info, Node: Implementing ORDERED construct, Next: Implementing SECTIONS construct, Prev: Implementing FOR construct, Up: The libgomp ABI + +10.12 Implementing ORDERED construct +==================================== + + void GOMP_ordered_start (void) + void GOMP_ordered_end (void) + + +File: libgomp.info, Node: Implementing SECTIONS construct, Next: Implementing SINGLE construct, Prev: Implementing ORDERED construct, Up: The libgomp ABI + +10.13 Implementing SECTIONS construct +===================================== + +A block as + + #pragma omp sections + { + #pragma omp section + stmt1; + #pragma omp section + stmt2; + #pragma omp section + stmt3; + } + + becomes + + for (i = GOMP_sections_start (3); i != 0; i = GOMP_sections_next ()) + switch (i) + { + case 1: + stmt1; + break; + case 2: + stmt2; + break; + case 3: + stmt3; + break; + } + GOMP_barrier (); + + +File: libgomp.info, Node: Implementing SINGLE construct, Next: Implementing OpenACC's PARALLEL construct, Prev: Implementing SECTIONS construct, Up: The libgomp ABI + +10.14 Implementing SINGLE construct +=================================== + +A block like + + #pragma omp single + { + body; + } + + becomes + + if (GOMP_single_start ()) + body; + GOMP_barrier (); + + while + + #pragma omp single copyprivate(x) + body; + + becomes + + datap = GOMP_single_copy_start (); + if (datap == NULL) + { + body; + data.x = x; + GOMP_single_copy_end (&data); + } + else + x = datap->x; + GOMP_barrier (); + + +File: libgomp.info, Node: Implementing OpenACC's PARALLEL construct, Prev: Implementing SINGLE construct, Up: The libgomp ABI + +10.15 Implementing OpenACC's PARALLEL construct +=============================================== + + void GOACC_parallel () + + +File: libgomp.info, Node: Reporting Bugs, Next: Copying, Prev: The libgomp ABI, Up: Top + +11 Reporting Bugs +***************** + +Bugs in the GNU Offloading and Multi Processing Runtime Library should +be reported via Bugzilla (https://gcc.gnu.org/bugzilla/). Please add +"openacc", or "openmp", or both to the keywords field in the bug report, +as appropriate. + + +File: libgomp.info, Node: Copying, Next: GNU Free Documentation License, Prev: Reporting Bugs, Up: Top + +GNU General Public License +************************** + + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + + Everyone is permitted to copy and distribute verbatim copies of this + license document, but changing it is not allowed. + +Preamble +======== + +The GNU General Public License is a free, copyleft license for software +and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program-to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + +TERMS AND CONDITIONS +==================== + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public + License. + + "Copyright" also means copyright-like laws that apply to other + kinds of works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this + License. Each licensee is addressed as "you". "Licensees" and + "recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the + work in a fashion requiring copyright permission, other than the + making of an exact copy. The resulting work is called a "modified + version" of the earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work + based on the Program. + + To "propagate" a work means to do anything with it that, without + permission, would make you directly or secondarily liable for + infringement under applicable copyright law, except executing it on + a computer or modifying a private copy. Propagation includes + copying, distribution (with or without modification), making + available to the public, and in some countries other activities as + well. + + To "convey" a work means any kind of propagation that enables other + parties to make or receive copies. Mere interaction with a user + through a computer network, with no transfer of a copy, is not + conveying. + + An interactive user interface displays "Appropriate Legal Notices" + to the extent that it includes a convenient and prominently visible + feature that (1) displays an appropriate copyright notice, and (2) + tells the user that there is no warranty for the work (except to + the extent that warranties are provided), that licensees may convey + the work under this License, and how to view a copy of this + License. If the interface presents a list of user commands or + options, such as a menu, a prominent item in the list meets this + criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work + for making modifications to it. "Object code" means any non-source + form of a work. + + A "Standard Interface" means an interface that either is an + official standard defined by a recognized standards body, or, in + the case of interfaces specified for a particular programming + language, one that is widely used among developers working in that + language. + + The "System Libraries" of an executable work include anything, + other than the work as a whole, that (a) is included in the normal + form of packaging a Major Component, but which is not part of that + Major Component, and (b) serves only to enable use of the work with + that Major Component, or to implement a Standard Interface for + which an implementation is available to the public in source code + form. A "Major Component", in this context, means a major + essential component (kernel, window system, and so on) of the + specific operating system (if any) on which the executable work + runs, or a compiler used to produce the work, or an object code + interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all + the source code needed to generate, install, and (for an executable + work) run the object code and to modify the work, including scripts + to control those activities. However, it does not include the + work's System Libraries, or general-purpose tools or generally + available free programs which are used unmodified in performing + those activities but which are not part of the work. For example, + Corresponding Source includes interface definition files associated + with source files for the work, and the source code for shared + libraries and dynamically linked subprograms that the work is + specifically designed to require, such as by intimate data + communication or control flow between those subprograms and other + parts of the work. + + The Corresponding Source need not include anything that users can + regenerate automatically from other parts of the Corresponding + Source. + + The Corresponding Source for a work in source code form is that + same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of + copyright on the Program, and are irrevocable provided the stated + conditions are met. This License explicitly affirms your unlimited + permission to run the unmodified Program. The output from running + a covered work is covered by this License only if the output, given + its content, constitutes a covered work. This License acknowledges + your rights of fair use or other equivalent, as provided by + copyright law. + + You may make, run and propagate covered works that you do not + convey, without conditions so long as your license otherwise + remains in force. You may convey covered works to others for the + sole purpose of having them make modifications exclusively for you, + or provide you with facilities for running those works, provided + that you comply with the terms of this License in conveying all + material for which you do not control copyright. Those thus making + or running the covered works for you must do so exclusively on your + behalf, under your direction and control, on terms that prohibit + them from making any copies of your copyrighted material outside + their relationship with you. + + Conveying under any other circumstances is permitted solely under + the conditions stated below. Sublicensing is not allowed; section + 10 makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological + measure under any applicable law fulfilling obligations under + article 11 of the WIPO copyright treaty adopted on 20 December + 1996, or similar laws prohibiting or restricting circumvention of + such measures. + + When you convey a covered work, you waive any legal power to forbid + circumvention of technological measures to the extent such + circumvention is effected by exercising rights under this License + with respect to the covered work, and you disclaim any intention to + limit operation or modification of the work as a means of + enforcing, against the work's users, your or third parties' legal + rights to forbid circumvention of technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you + receive it, in any medium, provided that you conspicuously and + appropriately publish on each copy an appropriate copyright notice; + keep intact all notices stating that this License and any + non-permissive terms added in accord with section 7 apply to the + code; keep intact all notices of the absence of any warranty; and + give all recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, + and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to + produce it from the Program, in the form of source code under the + terms of section 4, provided that you also meet all of these + conditions: + + a. The work must carry prominent notices stating that you + modified it, and giving a relevant date. + + b. The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in + section 4 to "keep intact all notices". + + c. You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable + section 7 additional terms, to the whole of the work, and all + its parts, regardless of how they are packaged. This License + gives no permission to license the work in any other way, but + it does not invalidate such permission if you have separately + received it. + + d. If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has + interactive interfaces that do not display Appropriate Legal + Notices, your work need not make them do so. + + A compilation of a covered work with other separate and independent + works, which are not by their nature extensions of the covered + work, and which are not combined with it such as to form a larger + program, in or on a volume of a storage or distribution medium, is + called an "aggregate" if the compilation and its resulting + copyright are not used to limit the access or legal rights of the + compilation's users beyond what the individual works permit. + Inclusion of a covered work in an aggregate does not cause this + License to apply to the other parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms + of sections 4 and 5, provided that you also convey the + machine-readable Corresponding Source under the terms of this + License, in one of these ways: + + a. Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b. Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that + product model, to give anyone who possesses the object code + either (1) a copy of the Corresponding Source for all the + software in the product that is covered by this License, on a + durable physical medium customarily used for software + interchange, for a price no more than your reasonable cost of + physically performing this conveying of source, or (2) access + to copy the Corresponding Source from a network server at no + charge. + + c. Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, + and only if you received the object code with such an offer, + in accord with subsection 6b. + + d. Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to + the Corresponding Source in the same way through the same + place at no further charge. You need not require recipients + to copy the Corresponding Source along with the object code. + If the place to copy the object code is a network server, the + Corresponding Source may be on a different server (operated by + you or a third party) that supports equivalent copying + facilities, provided you maintain clear directions next to the + object code saying where to find the Corresponding Source. + Regardless of what server hosts the Corresponding Source, you + remain obligated to ensure that it is available for as long as + needed to satisfy these requirements. + + e. Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the + general public at no charge under subsection 6d. + + A separable portion of the object code, whose source code is + excluded from the Corresponding Source as a System Library, need + not be included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means + any tangible personal property which is normally used for personal, + family, or household purposes, or (2) anything designed or sold for + incorporation into a dwelling. In determining whether a product is + a consumer product, doubtful cases shall be resolved in favor of + coverage. For a particular product received by a particular user, + "normally used" refers to a typical or common use of that class of + product, regardless of the status of the particular user or of the + way in which the particular user actually uses, or expects or is + expected to use, the product. A product is a consumer product + regardless of whether the product has substantial commercial, + industrial or non-consumer uses, unless such uses represent the + only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, + procedures, authorization keys, or other information required to + install and execute modified versions of a covered work in that + User Product from a modified version of its Corresponding Source. + The information must suffice to ensure that the continued + functioning of the modified object code is in no case prevented or + interfered with solely because modification has been made. + + If you convey an object code work under this section in, or with, + or specifically for use in, a User Product, and the conveying + occurs as part of a transaction in which the right of possession + and use of the User Product is transferred to the recipient in + perpetuity or for a fixed term (regardless of how the transaction + is characterized), the Corresponding Source conveyed under this + section must be accompanied by the Installation Information. But + this requirement does not apply if neither you nor any third party + retains the ability to install modified object code on the User + Product (for example, the work has been installed in ROM). + + The requirement to provide Installation Information does not + include a requirement to continue to provide support service, + warranty, or updates for a work that has been modified or installed + by the recipient, or for the User Product in which it has been + modified or installed. Access to a network may be denied when the + modification itself materially and adversely affects the operation + of the network or violates the rules and protocols for + communication across the network. + + Corresponding Source conveyed, and Installation Information + provided, in accord with this section must be in a format that is + publicly documented (and with an implementation available to the + public in source code form), and must require no special password + or key for unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of + this License by making exceptions from one or more of its + conditions. Additional permissions that are applicable to the + entire Program shall be treated as though they were included in + this License, to the extent that they are valid under applicable + law. If additional permissions apply only to part of the Program, + that part may be used separately under those permissions, but the + entire Program remains governed by this License without regard to + the additional permissions. + + When you convey a copy of a covered work, you may at your option + remove any additional permissions from that copy, or from any part + of it. (Additional permissions may be written to require their own + removal in certain cases when you modify the work.) You may place + additional permissions on material, added by you to a covered work, + for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material + you add to a covered work, you may (if authorized by the copyright + holders of that material) supplement the terms of this License with + terms: + + a. Disclaiming warranty or limiting liability differently from + the terms of sections 15 and 16 of this License; or + + b. Requiring preservation of specified reasonable legal notices + or author attributions in that material or in the Appropriate + Legal Notices displayed by works containing it; or + + c. Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked + in reasonable ways as different from the original version; or + + d. Limiting the use for publicity purposes of names of licensors + or authors of the material; or + + e. Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f. Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified + versions of it) with contractual assumptions of liability to + the recipient, for any liability that these contractual + assumptions directly impose on those licensors and authors. + + All other non-permissive additional terms are considered "further + restrictions" within the meaning of section 10. If the Program as + you received it, or any part of it, contains a notice stating that + it is governed by this License along with a term that is a further + restriction, you may remove that term. If a license document + contains a further restriction but permits relicensing or conveying + under this License, you may add to a covered work material governed + by the terms of that license document, provided that the further + restriction does not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you + must place, in the relevant source files, a statement of the + additional terms that apply to those files, or a notice indicating + where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in + the form of a separately written license, or stated as exceptions; + the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly + provided under this License. Any attempt otherwise to propagate or + modify it is void, and will automatically terminate your rights + under this License (including any patent licenses granted under the + third paragraph of section 11). + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the + copyright holder fails to notify you of the violation by some + reasonable means prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from + that copyright holder, and you cure the violation prior to 30 days + after your receipt of the notice. + + Termination of your rights under this section does not terminate + the licenses of parties who have received copies or rights from you + under this License. If your rights have been terminated and not + permanently reinstated, you do not qualify to receive new licenses + for the same material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or + run a copy of the Program. Ancillary propagation of a covered work + occurring solely as a consequence of using peer-to-peer + transmission to receive a copy likewise does not require + acceptance. However, nothing other than this License grants you + permission to propagate or modify any covered work. These actions + infringe copyright if you do not accept this License. Therefore, + by modifying or propagating a covered work, you indicate your + acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically + receives a license from the original licensors, to run, modify and + propagate that work, subject to this License. You are not + responsible for enforcing compliance by third parties with this + License. + + An "entity transaction" is a transaction transferring control of an + organization, or substantially all assets of one, or subdividing an + organization, or merging organizations. If propagation of a + covered work results from an entity transaction, each party to that + transaction who receives a copy of the work also receives whatever + licenses to the work the party's predecessor in interest had or + could give under the previous paragraph, plus a right to possession + of the Corresponding Source of the work from the predecessor in + interest, if the predecessor has it or can get it with reasonable + efforts. + + You may not impose any further restrictions on the exercise of the + rights granted or affirmed under this License. For example, you + may not impose a license fee, royalty, or other charge for exercise + of rights granted under this License, and you may not initiate + litigation (including a cross-claim or counterclaim in a lawsuit) + alleging that any patent claim is infringed by making, using, + selling, offering for sale, or importing the Program or any portion + of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this + License of the Program or a work on which the Program is based. + The work thus licensed is called the contributor's "contributor + version". + + A contributor's "essential patent claims" are all patent claims + owned or controlled by the contributor, whether already acquired or + hereafter acquired, that would be infringed by some manner, + permitted by this License, of making, using, or selling its + contributor version, but do not include claims that would be + infringed only as a consequence of further modification of the + contributor version. For purposes of this definition, "control" + includes the right to grant patent sublicenses in a manner + consistent with the requirements of this License. + + Each contributor grants you a non-exclusive, worldwide, + royalty-free patent license under the contributor's essential + patent claims, to make, use, sell, offer for sale, import and + otherwise run, modify and propagate the contents of its contributor + version. + + In the following three paragraphs, a "patent license" is any + express agreement or commitment, however denominated, not to + enforce a patent (such as an express permission to practice a + patent or covenant not to sue for patent infringement). To "grant" + such a patent license to a party means to make such an agreement or + commitment not to enforce a patent against the party. + + If you convey a covered work, knowingly relying on a patent + license, and the Corresponding Source of the work is not available + for anyone to copy, free of charge and under the terms of this + License, through a publicly available network server or other + readily accessible means, then you must either (1) cause the + Corresponding Source to be so available, or (2) arrange to deprive + yourself of the benefit of the patent license for this particular + work, or (3) arrange, in a manner consistent with the requirements + of this License, to extend the patent license to downstream + recipients. "Knowingly relying" means you have actual knowledge + that, but for the patent license, your conveying the covered work + in a country, or your recipient's use of the covered work in a + country, would infringe one or more identifiable patents in that + country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or + arrangement, you convey, or propagate by procuring conveyance of, a + covered work, and grant a patent license to some of the parties + receiving the covered work authorizing them to use, propagate, + modify or convey a specific copy of the covered work, then the + patent license you grant is automatically extended to all + recipients of the covered work and works based on it. + + A patent license is "discriminatory" if it does not include within + the scope of its coverage, prohibits the exercise of, or is + conditioned on the non-exercise of one or more of the rights that + are specifically granted under this License. You may not convey a + covered work if you are a party to an arrangement with a third + party that is in the business of distributing software, under which + you make payment to the third party based on the extent of your + activity of conveying the work, and under which the third party + grants, to any of the parties who would receive the covered work + from you, a discriminatory patent license (a) in connection with + copies of the covered work conveyed by you (or copies made from + those copies), or (b) primarily for and in connection with specific + products or compilations that contain the covered work, unless you + entered into that arrangement, or that patent license was granted, + prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting + any implied license or other defenses to infringement that may + otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement + or otherwise) that contradict the conditions of this License, they + do not excuse you from the conditions of this License. If you + cannot convey a covered work so as to satisfy simultaneously your + obligations under this License and any other pertinent obligations, + then as a consequence you may not convey it at all. For example, + if you agree to terms that obligate you to collect a royalty for + further conveying from those to whom you convey the Program, the + only way you could satisfy both those terms and this License would + be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have + permission to link or combine any covered work with a work licensed + under version 3 of the GNU Affero General Public License into a + single combined work, and to convey the resulting work. The terms + of this License will continue to apply to the part which is the + covered work, but the special requirements of the GNU Affero + General Public License, section 13, concerning interaction through + a network will apply to the combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new + versions of the GNU General Public License from time to time. Such + new versions will be similar in spirit to the present version, but + may differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the + Program specifies that a certain numbered version of the GNU + General Public License "or any later version" applies to it, you + have the option of following the terms and conditions either of + that numbered version or of any later version published by the Free + Software Foundation. If the Program does not specify a version + number of the GNU General Public License, you may choose any + version ever published by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future + versions of the GNU General Public License can be used, that + proxy's public statement of acceptance of a version permanently + authorizes you to choose that version for the Program. + + Later license versions may give you additional or different + permissions. However, no additional obligations are imposed on any + author or copyright holder as a result of your choosing to follow a + later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY + APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE + COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" + WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, + INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE + RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. + SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL + NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN + WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES + AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR + DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR + CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE + THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA + BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD + PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER + PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF + THE POSSIBILITY OF SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided + above cannot be given local legal effect according to their terms, + reviewing courts shall apply local law that most closely + approximates an absolute waiver of all civil liability in + connection with the Program, unless a warranty or assumption of + liability accompanies a copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS +=========================== + +How to Apply These Terms to Your New Programs +============================================= + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + ONE LINE TO GIVE THE PROGRAM'S NAME AND A BRIEF IDEA OF WHAT IT DOES. + Copyright (C) YEAR NAME OF AUTHOR + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or (at + your option) any later version. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + + Also add information on how to contact you by electronic and paper +mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + PROGRAM Copyright (C) YEAR NAME OF AUTHOR + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + + The hypothetical commands 'show w' and 'show c' should show the +appropriate parts of the General Public License. Of course, your +program's commands might be different; for a GUI interface, you would +use an "about box". + + You should also get your employer (if you work as a programmer) or +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. For more information on this, and how to apply and follow +the GNU GPL, see . + + The GNU General Public License does not permit incorporating your +program into proprietary programs. If your program is a subroutine +library, you may consider it more useful to permit linking proprietary +applications with the library. If this is what you want to do, use the +GNU Lesser General Public License instead of this License. But first, +please read . + + +File: libgomp.info, Node: GNU Free Documentation License, Next: Funding, Prev: Copying, Up: Top + +GNU Free Documentation License +****************************** + + Version 1.3, 3 November 2008 + + Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. + + + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + 0. PREAMBLE + + The purpose of this License is to make a manual, textbook, or other + functional and useful document "free" in the sense of freedom: to + assure everyone the effective freedom to copy and redistribute it, + with or without modifying it, either commercially or + noncommercially. Secondarily, this License preserves for the + author and publisher a way to get credit for their work, while not + being considered responsible for modifications made by others. + + This License is a kind of "copyleft", which means that derivative + works of the document must themselves be free in the same sense. + It complements the GNU General Public License, which is a copyleft + license designed for free software. + + We have designed this License in order to use it for manuals for + free software, because free software needs free documentation: a + free program should come with manuals providing the same freedoms + that the software does. But this License is not limited to + software manuals; it can be used for any textual work, regardless + of subject matter or whether it is published as a printed book. We + recommend this License principally for works whose purpose is + instruction or reference. + + 1. APPLICABILITY AND DEFINITIONS + + This License applies to any manual or other work, in any medium, + that contains a notice placed by the copyright holder saying it can + be distributed under the terms of this License. Such a notice + grants a world-wide, royalty-free license, unlimited in duration, + to use that work under the conditions stated herein. The + "Document", below, refers to any such manual or work. Any member + of the public is a licensee, and is addressed as "you". You accept + the license if you copy, modify or distribute the work in a way + requiring permission under copyright law. + + A "Modified Version" of the Document means any work containing the + Document or a portion of it, either copied verbatim, or with + modifications and/or translated into another language. + + A "Secondary Section" is a named appendix or a front-matter section + of the Document that deals exclusively with the relationship of the + publishers or authors of the Document to the Document's overall + subject (or to related matters) and contains nothing that could + fall directly within that overall subject. (Thus, if the Document + is in part a textbook of mathematics, a Secondary Section may not + explain any mathematics.) The relationship could be a matter of + historical connection with the subject or with related matters, or + of legal, commercial, philosophical, ethical or political position + regarding them. + + The "Invariant Sections" are certain Secondary Sections whose + titles are designated, as being those of Invariant Sections, in the + notice that says that the Document is released under this License. + If a section does not fit the above definition of Secondary then it + is not allowed to be designated as Invariant. The Document may + contain zero Invariant Sections. If the Document does not identify + any Invariant Sections then there are none. + + The "Cover Texts" are certain short passages of text that are + listed, as Front-Cover Texts or Back-Cover Texts, in the notice + that says that the Document is released under this License. A + Front-Cover Text may be at most 5 words, and a Back-Cover Text may + be at most 25 words. + + A "Transparent" copy of the Document means a machine-readable copy, + represented in a format whose specification is available to the + general public, that is suitable for revising the document + straightforwardly with generic text editors or (for images composed + of pixels) generic paint programs or (for drawings) some widely + available drawing editor, and that is suitable for input to text + formatters or for automatic translation to a variety of formats + suitable for input to text formatters. A copy made in an otherwise + Transparent file format whose markup, or absence of markup, has + been arranged to thwart or discourage subsequent modification by + readers is not Transparent. An image format is not Transparent if + used for any substantial amount of text. A copy that is not + "Transparent" is called "Opaque". + + Examples of suitable formats for Transparent copies include plain + ASCII without markup, Texinfo input format, LaTeX input format, + SGML or XML using a publicly available DTD, and standard-conforming + simple HTML, PostScript or PDF designed for human modification. + Examples of transparent image formats include PNG, XCF and JPG. + Opaque formats include proprietary formats that can be read and + edited only by proprietary word processors, SGML or XML for which + the DTD and/or processing tools are not generally available, and + the machine-generated HTML, PostScript or PDF produced by some word + processors for output purposes only. + + The "Title Page" means, for a printed book, the title page itself, + plus such following pages as are needed to hold, legibly, the + material this License requires to appear in the title page. For + works in formats which do not have any title page as such, "Title + Page" means the text near the most prominent appearance of the + work's title, preceding the beginning of the body of the text. + + The "publisher" means any person or entity that distributes copies + of the Document to the public. + + A section "Entitled XYZ" means a named subunit of the Document + whose title either is precisely XYZ or contains XYZ in parentheses + following text that translates XYZ in another language. (Here XYZ + stands for a specific section name mentioned below, such as + "Acknowledgements", "Dedications", "Endorsements", or "History".) + To "Preserve the Title" of such a section when you modify the + Document means that it remains a section "Entitled XYZ" according + to this definition. + + The Document may include Warranty Disclaimers next to the notice + which states that this License applies to the Document. These + Warranty Disclaimers are considered to be included by reference in + this License, but only as regards disclaiming warranties: any other + implication that these Warranty Disclaimers may have is void and + has no effect on the meaning of this License. + + 2. VERBATIM COPYING + + You may copy and distribute the Document in any medium, either + commercially or noncommercially, provided that this License, the + copyright notices, and the license notice saying this License + applies to the Document are reproduced in all copies, and that you + add no other conditions whatsoever to those of this License. You + may not use technical measures to obstruct or control the reading + or further copying of the copies you make or distribute. However, + you may accept compensation in exchange for copies. If you + distribute a large enough number of copies you must also follow the + conditions in section 3. + + You may also lend copies, under the same conditions stated above, + and you may publicly display copies. + + 3. COPYING IN QUANTITY + + If you publish printed copies (or copies in media that commonly + have printed covers) of the Document, numbering more than 100, and + the Document's license notice requires Cover Texts, you must + enclose the copies in covers that carry, clearly and legibly, all + these Cover Texts: Front-Cover Texts on the front cover, and + Back-Cover Texts on the back cover. Both covers must also clearly + and legibly identify you as the publisher of these copies. The + front cover must present the full title with all words of the title + equally prominent and visible. You may add other material on the + covers in addition. Copying with changes limited to the covers, as + long as they preserve the title of the Document and satisfy these + conditions, can be treated as verbatim copying in other respects. + + If the required texts for either cover are too voluminous to fit + legibly, you should put the first ones listed (as many as fit + reasonably) on the actual cover, and continue the rest onto + adjacent pages. + + If you publish or distribute Opaque copies of the Document + numbering more than 100, you must either include a machine-readable + Transparent copy along with each Opaque copy, or state in or with + each Opaque copy a computer-network location from which the general + network-using public has access to download using public-standard + network protocols a complete Transparent copy of the Document, free + of added material. If you use the latter option, you must take + reasonably prudent steps, when you begin distribution of Opaque + copies in quantity, to ensure that this Transparent copy will + remain thus accessible at the stated location until at least one + year after the last time you distribute an Opaque copy (directly or + through your agents or retailers) of that edition to the public. + + It is requested, but not required, that you contact the authors of + the Document well before redistributing any large number of copies, + to give them a chance to provide you with an updated version of the + Document. + + 4. MODIFICATIONS + + You may copy and distribute a Modified Version of the Document + under the conditions of sections 2 and 3 above, provided that you + release the Modified Version under precisely this License, with the + Modified Version filling the role of the Document, thus licensing + distribution and modification of the Modified Version to whoever + possesses a copy of it. In addition, you must do these things in + the Modified Version: + + A. Use in the Title Page (and on the covers, if any) a title + distinct from that of the Document, and from those of previous + versions (which should, if there were any, be listed in the + History section of the Document). You may use the same title + as a previous version if the original publisher of that + version gives permission. + + B. List on the Title Page, as authors, one or more persons or + entities responsible for authorship of the modifications in + the Modified Version, together with at least five of the + principal authors of the Document (all of its principal + authors, if it has fewer than five), unless they release you + from this requirement. + + C. State on the Title page the name of the publisher of the + Modified Version, as the publisher. + + D. Preserve all the copyright notices of the Document. + + E. Add an appropriate copyright notice for your modifications + adjacent to the other copyright notices. + + F. Include, immediately after the copyright notices, a license + notice giving the public permission to use the Modified + Version under the terms of this License, in the form shown in + the Addendum below. + + G. Preserve in that license notice the full lists of Invariant + Sections and required Cover Texts given in the Document's + license notice. + + H. Include an unaltered copy of this License. + + I. Preserve the section Entitled "History", Preserve its Title, + and add to it an item stating at least the title, year, new + authors, and publisher of the Modified Version as given on the + Title Page. If there is no section Entitled "History" in the + Document, create one stating the title, year, authors, and + publisher of the Document as given on its Title Page, then add + an item describing the Modified Version as stated in the + previous sentence. + + J. Preserve the network location, if any, given in the Document + for public access to a Transparent copy of the Document, and + likewise the network locations given in the Document for + previous versions it was based on. These may be placed in the + "History" section. You may omit a network location for a work + that was published at least four years before the Document + itself, or if the original publisher of the version it refers + to gives permission. + + K. For any section Entitled "Acknowledgements" or "Dedications", + Preserve the Title of the section, and preserve in the section + all the substance and tone of each of the contributor + acknowledgements and/or dedications given therein. + + L. Preserve all the Invariant Sections of the Document, unaltered + in their text and in their titles. Section numbers or the + equivalent are not considered part of the section titles. + + M. Delete any section Entitled "Endorsements". Such a section + may not be included in the Modified Version. + + N. Do not retitle any existing section to be Entitled + "Endorsements" or to conflict in title with any Invariant + Section. + + O. Preserve any Warranty Disclaimers. + + If the Modified Version includes new front-matter sections or + appendices that qualify as Secondary Sections and contain no + material copied from the Document, you may at your option designate + some or all of these sections as invariant. To do this, add their + titles to the list of Invariant Sections in the Modified Version's + license notice. These titles must be distinct from any other + section titles. + + You may add a section Entitled "Endorsements", provided it contains + nothing but endorsements of your Modified Version by various + parties--for example, statements of peer review or that the text + has been approved by an organization as the authoritative + definition of a standard. + + You may add a passage of up to five words as a Front-Cover Text, + and a passage of up to 25 words as a Back-Cover Text, to the end of + the list of Cover Texts in the Modified Version. Only one passage + of Front-Cover Text and one of Back-Cover Text may be added by (or + through arrangements made by) any one entity. If the Document + already includes a cover text for the same cover, previously added + by you or by arrangement made by the same entity you are acting on + behalf of, you may not add another; but you may replace the old + one, on explicit permission from the previous publisher that added + the old one. + + The author(s) and publisher(s) of the Document do not by this + License give permission to use their names for publicity for or to + assert or imply endorsement of any Modified Version. + + 5. COMBINING DOCUMENTS + + You may combine the Document with other documents released under + this License, under the terms defined in section 4 above for + modified versions, provided that you include in the combination all + of the Invariant Sections of all of the original documents, + unmodified, and list them all as Invariant Sections of your + combined work in its license notice, and that you preserve all + their Warranty Disclaimers. + + The combined work need only contain one copy of this License, and + multiple identical Invariant Sections may be replaced with a single + copy. If there are multiple Invariant Sections with the same name + but different contents, make the title of each such section unique + by adding at the end of it, in parentheses, the name of the + original author or publisher of that section if known, or else a + unique number. Make the same adjustment to the section titles in + the list of Invariant Sections in the license notice of the + combined work. + + In the combination, you must combine any sections Entitled + "History" in the various original documents, forming one section + Entitled "History"; likewise combine any sections Entitled + "Acknowledgements", and any sections Entitled "Dedications". You + must delete all sections Entitled "Endorsements." + + 6. COLLECTIONS OF DOCUMENTS + + You may make a collection consisting of the Document and other + documents released under this License, and replace the individual + copies of this License in the various documents with a single copy + that is included in the collection, provided that you follow the + rules of this License for verbatim copying of each of the documents + in all other respects. + + You may extract a single document from such a collection, and + distribute it individually under this License, provided you insert + a copy of this License into the extracted document, and follow this + License in all other respects regarding verbatim copying of that + document. + + 7. AGGREGATION WITH INDEPENDENT WORKS + + A compilation of the Document or its derivatives with other + separate and independent documents or works, in or on a volume of a + storage or distribution medium, is called an "aggregate" if the + copyright resulting from the compilation is not used to limit the + legal rights of the compilation's users beyond what the individual + works permit. When the Document is included in an aggregate, this + License does not apply to the other works in the aggregate which + are not themselves derivative works of the Document. + + If the Cover Text requirement of section 3 is applicable to these + copies of the Document, then if the Document is less than one half + of the entire aggregate, the Document's Cover Texts may be placed + on covers that bracket the Document within the aggregate, or the + electronic equivalent of covers if the Document is in electronic + form. Otherwise they must appear on printed covers that bracket + the whole aggregate. + + 8. TRANSLATION + + Translation is considered a kind of modification, so you may + distribute translations of the Document under the terms of section + 4. Replacing Invariant Sections with translations requires special + permission from their copyright holders, but you may include + translations of some or all Invariant Sections in addition to the + original versions of these Invariant Sections. You may include a + translation of this License, and all the license notices in the + Document, and any Warranty Disclaimers, provided that you also + include the original English version of this License and the + original versions of those notices and disclaimers. In case of a + disagreement between the translation and the original version of + this License or a notice or disclaimer, the original version will + prevail. + + If a section in the Document is Entitled "Acknowledgements", + "Dedications", or "History", the requirement (section 4) to + Preserve its Title (section 1) will typically require changing the + actual title. + + 9. TERMINATION + + You may not copy, modify, sublicense, or distribute the Document + except as expressly provided under this License. Any attempt + otherwise to copy, modify, sublicense, or distribute it is void, + and will automatically terminate your rights under this License. + + However, if you cease all violation of this License, then your + license from a particular copyright holder is reinstated (a) + provisionally, unless and until the copyright holder explicitly and + finally terminates your license, and (b) permanently, if the + copyright holder fails to notify you of the violation by some + reasonable means prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is + reinstated permanently if the copyright holder notifies you of the + violation by some reasonable means, this is the first time you have + received notice of violation of this License (for any work) from + that copyright holder, and you cure the violation prior to 30 days + after your receipt of the notice. + + Termination of your rights under this section does not terminate + the licenses of parties who have received copies or rights from you + under this License. If your rights have been terminated and not + permanently reinstated, receipt of a copy of some or all of the + same material does not give you any rights to use it. + + 10. FUTURE REVISIONS OF THIS LICENSE + + The Free Software Foundation may publish new, revised versions of + the GNU Free Documentation License from time to time. Such new + versions will be similar in spirit to the present version, but may + differ in detail to address new problems or concerns. See + . + + Each version of the License is given a distinguishing version + number. If the Document specifies that a particular numbered + version of this License "or any later version" applies to it, you + have the option of following the terms and conditions either of + that specified version or of any later version that has been + published (not as a draft) by the Free Software Foundation. If the + Document does not specify a version number of this License, you may + choose any version ever published (not as a draft) by the Free + Software Foundation. If the Document specifies that a proxy can + decide which future versions of this License can be used, that + proxy's public statement of acceptance of a version permanently + authorizes you to choose that version for the Document. + + 11. RELICENSING + + "Massive Multiauthor Collaboration Site" (or "MMC Site") means any + World Wide Web server that publishes copyrightable works and also + provides prominent facilities for anybody to edit those works. A + public wiki that anybody can edit is an example of such a server. + A "Massive Multiauthor Collaboration" (or "MMC") contained in the + site means any set of copyrightable works thus published on the MMC + site. + + "CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0 + license published by Creative Commons Corporation, a not-for-profit + corporation with a principal place of business in San Francisco, + California, as well as future copyleft versions of that license + published by that same organization. + + "Incorporate" means to publish or republish a Document, in whole or + in part, as part of another Document. + + An MMC is "eligible for relicensing" if it is licensed under this + License, and if all works that were first published under this + License somewhere other than this MMC, and subsequently + incorporated in whole or in part into the MMC, (1) had no cover + texts or invariant sections, and (2) were thus incorporated prior + to November 1, 2008. + + The operator of an MMC Site may republish an MMC contained in the + site under CC-BY-SA on the same site at any time before August 1, + 2009, provided the MMC is eligible for relicensing. + +ADDENDUM: How to use this License for your documents +==================================================== + +To use this License in a document you have written, include a copy of +the License in the document and put the following copyright and license +notices just after the title page: + + Copyright (C) YEAR YOUR NAME. + Permission is granted to copy, distribute and/or modify this document + under the terms of the GNU Free Documentation License, Version 1.3 + or any later version published by the Free Software Foundation; + with no Invariant Sections, no Front-Cover Texts, and no Back-Cover + Texts. A copy of the license is included in the section entitled ``GNU + Free Documentation License''. + + If you have Invariant Sections, Front-Cover Texts and Back-Cover +Texts, replace the "with...Texts." line with this: + + with the Invariant Sections being LIST THEIR TITLES, with + the Front-Cover Texts being LIST, and with the Back-Cover Texts + being LIST. + + If you have Invariant Sections without Cover Texts, or some other +combination of the three, merge those two alternatives to suit the +situation. + + If your document contains nontrivial examples of program code, we +recommend releasing these examples in parallel under your choice of free +software license, such as the GNU General Public License, to permit +their use in free software. + + +File: libgomp.info, Node: Funding, Next: Library Index, Prev: GNU Free Documentation License, Up: Top + +Funding Free Software +********************* + +If you want to have more free software a few years from now, it makes +sense for you to help encourage people to contribute funds for its +development. The most effective approach known is to encourage +commercial redistributors to donate. + + Users of free software systems can boost the pace of development by +encouraging for-a-fee distributors to donate part of their selling price +to free software developers--the Free Software Foundation, and others. + + The way to convince distributors to do this is to demand it and +expect it from them. So when you compare distributors, judge them +partly by how much they give to free software development. Show +distributors they must compete to be the one who gives the most. + + To make this approach work, you must insist on numbers that you can +compare, such as, "We will donate ten dollars to the Frobnitz project +for each disk sold." Don't be satisfied with a vague promise, such as +"A portion of the profits are donated," since it doesn't give a basis +for comparison. + + Even a precise fraction "of the profits from this disk" is not very +meaningful, since creative accounting and unrelated business decisions +can greatly alter what fraction of the sales price counts as profit. If +the price you pay is $50, ten percent of the profit is probably less +than a dollar; it might be a few cents, or nothing at all. + + Some redistributors do development work themselves. This is useful +too; but to keep everyone honest, you need to inquire how much they do, +and what kind. Some kinds of development make much more long-term +difference than others. For example, maintaining a separate version of +a program contributes very little; maintaining the standard version of a +program for the whole community contributes much. Easy new ports +contribute little, since someone else would surely do them; difficult +ports such as adding a new CPU to the GNU Compiler Collection contribute +more; major new features or packages contribute the most. + + By establishing the idea that supporting further development is "the +proper thing to do" when distributing free software for a fee, we can +assure a steady flow of resources into making more free software. + + Copyright (C) 1994 Free Software Foundation, Inc. + Verbatim copying and redistribution of this section is permitted + without royalty; alteration is not permitted. + + +File: libgomp.info, Node: Library Index, Prev: Funding, Up: Top + +Library Index +************* + +[index] +* Menu: + +* acc_get_property: acc_get_property. (line 6) +* acc_get_property_string: acc_get_property. (line 6) +* Environment Variable: OMP_CANCELLATION. (line 6) +* Environment Variable <1>: OMP_DISPLAY_ENV. (line 6) +* Environment Variable <2>: OMP_DEFAULT_DEVICE. (line 6) +* Environment Variable <3>: OMP_DYNAMIC. (line 6) +* Environment Variable <4>: OMP_MAX_ACTIVE_LEVELS. (line 6) +* Environment Variable <5>: OMP_MAX_TASK_PRIORITY. (line 6) +* Environment Variable <6>: OMP_NESTED. (line 6) +* Environment Variable <7>: OMP_NUM_THREADS. (line 6) +* Environment Variable <8>: OMP_PROC_BIND. (line 6) +* Environment Variable <9>: OMP_PLACES. (line 6) +* Environment Variable <10>: OMP_STACKSIZE. (line 6) +* Environment Variable <11>: OMP_SCHEDULE. (line 6) +* Environment Variable <12>: OMP_TARGET_OFFLOAD. (line 6) +* Environment Variable <13>: OMP_THREAD_LIMIT. (line 6) +* Environment Variable <14>: OMP_WAIT_POLICY. (line 6) +* Environment Variable <15>: GOMP_CPU_AFFINITY. (line 6) +* Environment Variable <16>: GOMP_DEBUG. (line 6) +* Environment Variable <17>: GOMP_STACKSIZE. (line 6) +* Environment Variable <18>: GOMP_SPINCOUNT. (line 6) +* Environment Variable <19>: GOMP_RTEMS_THREAD_POOLS. + (line 6) +* FDL, GNU Free Documentation License: GNU Free Documentation License. + (line 6) +* Implementation specific setting: OMP_NESTED. (line 6) +* Implementation specific setting <1>: OMP_NUM_THREADS. (line 6) +* Implementation specific setting <2>: OMP_SCHEDULE. (line 6) +* Implementation specific setting <3>: OMP_TARGET_OFFLOAD. (line 6) +* Implementation specific setting <4>: GOMP_STACKSIZE. (line 6) +* Implementation specific setting <5>: GOMP_SPINCOUNT. (line 6) +* Implementation specific setting <6>: GOMP_RTEMS_THREAD_POOLS. + (line 6) +* Introduction: Top. (line 6) + + + +Tag Table: +Node: Top2083 +Node: Enabling OpenMP4645 +Node: Runtime Library Routines5421 +Node: omp_get_active_level8741 +Node: omp_get_ancestor_thread_num9441 +Node: omp_get_cancellation10371 +Node: omp_get_default_device11185 +Node: omp_get_dynamic11861 +Node: omp_get_initial_device12745 +Node: omp_get_level13489 +Node: omp_get_max_active_levels14116 +Node: omp_get_max_task_priority14837 +Node: omp_get_max_threads15457 +Node: omp_get_nested16216 +Node: omp_get_num_devices17824 +Node: omp_get_num_procs18345 +Node: omp_get_num_teams18884 +Node: omp_get_num_threads19400 +Node: omp_get_proc_bind20489 +Node: omp_get_schedule21410 +Node: omp_get_supported_active_levels22379 +Node: omp_get_team_num23165 +Node: omp_get_team_size23679 +Node: omp_get_thread_limit24639 +Node: omp_get_thread_num25258 +Node: omp_in_parallel26129 +Node: omp_in_final26778 +Node: omp_is_initial_device27452 +Node: omp_set_default_device28145 +Node: omp_set_dynamic28936 +Node: omp_set_max_active_levels29822 +Node: omp_set_nested30744 +Node: omp_set_num_threads31941 +Node: omp_set_schedule32809 +Node: omp_init_lock33890 +Node: omp_set_lock34543 +Node: omp_test_lock35398 +Node: omp_unset_lock36374 +Node: omp_destroy_lock37305 +Node: omp_init_nest_lock37982 +Node: omp_set_nest_lock38717 +Node: omp_test_nest_lock39632 +Node: omp_unset_nest_lock40659 +Node: omp_destroy_nest_lock41674 +Node: omp_get_wtick42425 +Node: omp_get_wtime43017 +Node: omp_fulfill_event43819 +Node: Environment Variables44840 +Node: OMP_CANCELLATION46467 +Node: OMP_DISPLAY_ENV47000 +Node: OMP_DEFAULT_DEVICE47703 +Node: OMP_DYNAMIC48483 +Node: OMP_MAX_ACTIVE_LEVELS49079 +Node: OMP_MAX_TASK_PRIORITY50006 +Node: OMP_NESTED50664 +Node: OMP_NUM_THREADS51693 +Node: OMP_PROC_BIND52495 +Node: OMP_PLACES53806 +Node: OMP_STACKSIZE55983 +Node: OMP_SCHEDULE56807 +Node: OMP_TARGET_OFFLOAD57507 +Node: OMP_THREAD_LIMIT58463 +Node: OMP_WAIT_POLICY59069 +Node: GOMP_CPU_AFFINITY59761 +Node: GOMP_DEBUG61491 +Node: GOMP_STACKSIZE61998 +Node: GOMP_SPINCOUNT62829 +Node: GOMP_RTEMS_THREAD_POOLS64033 +Node: Enabling OpenACC66211 +Node: OpenACC Runtime Library Routines67112 +Node: acc_get_num_devices71393 +Node: acc_set_device_type72119 +Node: acc_get_device_type72883 +Node: acc_set_device_num73896 +Node: acc_get_device_num74713 +Node: acc_get_property75512 +Node: acc_async_test77735 +Node: acc_async_test_all78723 +Node: acc_wait79623 +Node: acc_wait_all80486 +Node: acc_wait_all_async81247 +Node: acc_wait_async81999 +Node: acc_init82707 +Node: acc_shutdown83352 +Node: acc_on_device84019 +Node: acc_malloc85023 +Node: acc_free85522 +Node: acc_copyin85949 +Node: acc_present_or_copyin87536 +Node: acc_create89314 +Node: acc_present_or_create90946 +Node: acc_copyout92732 +Node: acc_delete95036 +Node: acc_update_device97283 +Node: acc_update_self98857 +Node: acc_map_data100447 +Node: acc_unmap_data101132 +Node: acc_deviceptr101653 +Node: acc_hostptr102223 +Node: acc_is_present102787 +Node: acc_memcpy_to_device104314 +Node: acc_memcpy_from_device104977 +Node: acc_attach105644 +Node: acc_detach106291 +Node: acc_get_current_cuda_device107070 +Node: acc_get_current_cuda_context107655 +Node: acc_get_cuda_stream108255 +Node: acc_set_cuda_stream108846 +Node: acc_prof_register109517 +Node: acc_prof_unregister110076 +Node: acc_prof_lookup110643 +Node: acc_register_library111164 +Node: OpenACC Environment Variables111730 +Node: ACC_DEVICE_TYPE112302 +Node: ACC_DEVICE_NUM112538 +Node: ACC_PROFLIB112792 +Node: GCC_ACC_NOTIFY113123 +Node: CUDA Streams Usage113343 +Ref: CUDA Streams Usage-Footnote-1115244 +Node: OpenACC Library Interoperability115353 +Ref: OpenACC Library Interoperability-Footnote-1121721 +Ref: OpenACC Library Interoperability-Footnote-2121973 +Node: OpenACC Profiling Interface122181 +Node: The libgomp ABI132205 +Node: Implementing MASTER construct133058 +Node: Implementing CRITICAL construct133474 +Node: Implementing ATOMIC construct134215 +Node: Implementing FLUSH construct134698 +Node: Implementing BARRIER construct134971 +Node: Implementing THREADPRIVATE construct135242 +Node: Implementing PRIVATE clause135897 +Node: Implementing FIRSTPRIVATE LASTPRIVATE COPYIN and COPYPRIVATE clauses136480 +Node: Implementing REDUCTION clause137806 +Node: Implementing PARALLEL construct138365 +Node: Implementing FOR construct139624 +Node: Implementing ORDERED construct141624 +Node: Implementing SECTIONS construct141932 +Node: Implementing SINGLE construct142700 +Node: Implementing OpenACC's PARALLEL construct143414 +Node: Reporting Bugs143674 +Node: Copying144037 +Node: GNU Free Documentation License181583 +Node: Funding206706 +Node: Library Index209232 + +End Tag Table + + +Local Variables: +coding: utf-8 +End: diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74ade160d9a151454377334f8be32ee878422388 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_config_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_config_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e4462ac53d2675da805bcc83fb0e4f1f5a9875 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_config_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_options_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_options_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e4b3e94637e33c0d912a99e0c4992aa72fc6ccb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/stablehlo/__pycache__/quantization_options_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py new file mode 100644 index 0000000000000000000000000000000000000000..af472e76cbe1ec2031d967c0d975e2e6122a35e1 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_algorithm.py @@ -0,0 +1,393 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Defines CalibrationAlgorithm for calculating min and max values calculated by calibration method.""" +import abc +import itertools +import logging + +import numpy as np + +from tensorflow.compiler.mlir.quantization.tensorflow import quantization_options_pb2 as quant_opts_pb2 +from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import calibration_statistics_pb2 as calib_stats_pb2 + + +_CalibrationMethod = quant_opts_pb2.CalibrationOptions.CalibrationMethod +_REGISTRY = {} + + +def _implements(calib_method: _CalibrationMethod): + def decorator(cls): + assert calib_method not in _REGISTRY + _REGISTRY[calib_method] = cls + return cls + + return decorator + + +class _CalibrationAlgorithmBase(abc.ABC): + """Abstract base class for calibration algorithm.""" + + def __init__( + self, + statistics: calib_stats_pb2.CalibrationStatistics, + calib_opts: quant_opts_pb2.CalibrationOptions, + ): + self._statistics = statistics + self._calib_opts = calib_opts + + @abc.abstractmethod + def get_min_max_value(self) -> tuple[float, float]: + pass + + +class _HistogramCalibrationAlgorithmBase(_CalibrationAlgorithmBase): + """Base class for histogram calibrators.""" + + def __init__( + self, + statistics: calib_stats_pb2.CalibrationStatistics, + calib_opts: quant_opts_pb2.CalibrationOptions, + ): + """Builds histogram using statistics.histogram_statistics. + + lower_bound hist_mid + v v + |=========|=========|=========|=========|=========| + bin width + + Args: + statistics: Collected calibration statistics. + calib_opts: Calibration options used for calculating min and max. + """ + super().__init__(statistics, calib_opts) + hist_stats = statistics.histogram_statistics + self._bin_width = hist_stats.bin_width + self._lower_bound = hist_stats.lower_bound + self._hist_freq = np.array(hist_stats.hist_freq) + self._num_bins = len(self._hist_freq) + self._num_bits = 8 + # i-th bin has a range [bins[i], bins[i + 1]). + # bins[i] = lower_bound + i * bin_width + # bins[i + 1] = lower_bound + (i + 1) * bin_width + # So hist_mids[i] = (lower_bound + bin_width / 2) + bin_width * i + first_mid = self._lower_bound + self._bin_width / 2 + last_mid = first_mid + (self._num_bins - 1) * self._bin_width + self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins) + + def _get_dequantized_hist_mids_after_quantize( + self, quant_min: float, quant_max: float + ) -> np.ndarray: + """Quantizes and dequantizes hist_mids using quant_min and quant_max. + + Quantization converts the range of numbers from [quant_min, quant_max] to + [0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and + values greater than quant_max are converted to 2^num_bits - 1. + + The histogram represents the distribution of the data, and our goal is to + find the quant_min and quant_max that best describe this distribution. To do + this, we quantize hist_mids using quant_min and quant_max and dequantize + them again. Then the difference between hist_mids and dequantized hist_mids + equates to quantization error when using quant_min and quant_max. + + + Args: + quant_min: The minimum real value that can be represented by a quantized + value. + quant_max: The maximum real value that can be represented by a quantized + value. + + Returns: + dequantized hist_mids after quantizing by quant_min and quant_max + """ + maxbound = 2**self._num_bits - 1 + minbound = 0 + scale = (quant_max - quant_min) / maxbound + zero_point = -quant_min / scale + + # Limit the range of zero_point and scale in case (quant_max - quant_min) + # is unusually small. + if abs(zero_point) > 9e9: + zero_point = 9e9 + if abs(scale) < 1e-9: + scale = 1e-9 + + zero_point = round(zero_point) + quantized_hist_mids = np.clip( + np.round(self._hist_mids / scale) + zero_point, minbound, maxbound + ) + dequantized_hist_mids = scale * (quantized_hist_mids - zero_point) + return dequantized_hist_mids + + def _get_weighted_mean_squared_error( + self, quant_min, quant_max + ) -> tuple[float, float, float]: + """Gets mean squared error between hist_mids and dequantized hist_mids. + + Quantization converts the range of numbers from [quant_min, quant_max] to + [0, 2^num_bits - 1]. Values less than quant_min are converted to 0, and + values greater than quant_max are converted to 2^num_bits - 1. + + Args: + quant_min: The minimum real value that can be represented by a quantized + value. + quant_max: The maximum real value that can be represented by a quantized + value. + + Returns: + (error, quant_min, quant_max): Tuple of weighted mean squared error. + error = (hist_mids - dequantized_hist_mids)**2 * hist_freq + """ + dequantized_hist_mids = self._get_dequantized_hist_mids_after_quantize( + quant_min, quant_max + ) + squared_error = (self._hist_mids - dequantized_hist_mids) ** 2 + weighted_error = np.sum(squared_error * self._hist_freq) + return (weighted_error, quant_min, quant_max) + + def _get_min_max_value_by_expanding_range( + self, start_idx: int + ) -> tuple[float, float]: + """Starting from start_idx, expand left and right alternately to find the min value of mse loss. + + Args: + start_idx: Index to start quantization. + + Returns: + (min_value, max_value): Min and max calculated. + """ + # Tuple of (mse_error, quant_min, quant_max). + mse_min = (float('inf'), float('inf'), float('inf')) + left, right = start_idx, start_idx + + # If this value is true, it moves left, otherwise it moves right. + move_left = True + while not (left == 0 and right == self._num_bins - 1): + # Decrease left if right can't be moved or move_left is true. + if (move_left and left > 0) or (right == self._num_bins - 1): + left = max(left - 1, 0) + # Else increase right. + else: + right = min(right + 1, self._num_bins - 1) + # Toogle the move_left. + move_left = not move_left + quant_min, quant_max = self._hist_mids[left], self._hist_mids[right] + mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max) + mse_min = min(mse_tuple, mse_min) + # Extract (quant_min, quant_max) from (mse_error, quant_min, quant_max). + min_value, max_value = mse_min[1], mse_min[2] + return min_value, max_value + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_MIN_MAX) +class _MinMax(_CalibrationAlgorithmBase): + """MinMaxCalibrationAlgorithm for calculating min and max values of calibration result. + + MinMax calibration calculates the global min and global max values. + + global min = min of given sample inputs + global max = max of given sample inputs + """ + + def get_min_max_value(self) -> tuple[float, float]: + """Calculates the global min and max values. + + Returns: + (min_value, max_value): Min and max calculated using MinMax + """ + return ( + self._statistics.min_max_statistics.global_min, + self._statistics.min_max_statistics.global_max, + ) + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_AVERAGE_MIN_MAX) +class _AverageMinMax(_CalibrationAlgorithmBase): + """AverageMinMaxCalibrationAlgorithm for calculating min and max values of calibration result. + + AverageMinMax calibration calculates the average of min and max values. + average of min = sum of min values / number of samples + average of max = sum of max values / number of samples + """ + + def get_min_max_value(self) -> tuple[float, float]: + """Calculates the average of min and max values. + + Returns: + (min_value, max_value): Min and max calculated using AverageMinMax + + Raises: + ValueError: num_samples is 0. + """ + average_min_max_statistics = self._statistics.average_min_max_statistics + # num_samples is guaranteed to be larger than 0 because + # get_statistics_from_calibrator throws an exception if num_samples == 0. + num_samples = average_min_max_statistics.num_samples + if num_samples == 0: + raise ValueError( + 'num_samples must not be 0 when calibration method is' + f' AverageMinMax: {self._calib_opts}' + ) + min_value, max_value = ( + average_min_max_statistics.min_sum / num_samples, + average_min_max_statistics.max_sum / num_samples, + ) + + return min_value, max_value + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE) +class _HistogramPercentile(_HistogramCalibrationAlgorithmBase): + """HistogramPercentile for calculating min and max values of calibration result.""" + + def get_min_max_value(self) -> tuple[float, float]: + """Calculates min and max from statistics using calibration options. + + A "percentile" is a statistical concept that represents the value below + which a given percentage of data falls in a dataset. It involves sorting the + data from smallest to largest and then finding the value at a specified + percentage position. For example, the 0.01 percentile represents the value + in a given data set that corresponds to the lowest 0.01% of the data. + + HistogramPercentile calibration uses min_percentile and max_percentile to + find min and max. + + min_percentile and max_percentile must be in range [0, 100]. + min_percentile is 0.001 by default. + max_percentile is 99.999 by default. + + Returns: + (min_value, max_value): Min and max calculated using HistogramPercentile + """ + total_freq = sum(self._hist_freq) + # hist_freq_cumsum is dividing cumulative sum of hist_freq by total_freq + # hist_freq_cumsum's value is in range [0, 1] by its definition + hist_freq_cumsum = np.cumsum(self._hist_freq) / total_freq + + # min_percentile and max_percentile are converted from [0, 100] to [0, 1]. + min_quantile, max_quantile = ( + self._calib_opts.calibration_parameters.min_percentile / 100.0, + self._calib_opts.calibration_parameters.max_percentile / 100.0, + ) + + # Get index of min/max quantile. + min_quantile_idx, max_quantile_idx = ( + np.searchsorted(hist_freq_cumsum, min_quantile, side='right'), + np.searchsorted(hist_freq_cumsum, max_quantile, side='left'), + ) + + # Get value of min/max quantile index. + min_value, max_value = ( + self._hist_mids[min_quantile_idx], + self._hist_mids[max_quantile_idx], + ) + + return min_value, max_value + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE) +class _HistogramMseBruteforce(_HistogramCalibrationAlgorithmBase): + """HistogramMseBruteforce for calculating min and max values of calibration result.""" + + def get_min_max_value(self) -> tuple[float, float]: + """Finds the optimal quant_min and quant_max by testing all possible cases. + + It guarantees optimal quant_min and quant_max for the representative + dataset, but not for the test dataset. + + Returns: + (min_value, max_value): Min and max calculated using + HistogramMseBruteforce. + """ + if self._num_bins > 512: + logging.warning( + 'num_bins=%d is too large. The HISTOGRAM_MSE_BRUTEFORCE method tests' + ' all histogram mid value pairs, so it may take a long time.', + self._num_bins, + ) + # Tuple of (mse_error, quant_min, quant_max). + mse_min = (float('inf'), float('inf'), float('inf')) + + # Calculate the error for all hist_mid pairs. + for left, right in itertools.combinations(range(self._num_bins), 2): + quant_min, quant_max = self._hist_mids[left], self._hist_mids[right] + mse_tuple = self._get_weighted_mean_squared_error(quant_min, quant_max) + mse_min = min(mse_tuple, mse_min) + min_value, max_value = mse_min[1], mse_min[2] + + return min_value, max_value + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY) +class _HistogramMseMaxFrequency(_HistogramCalibrationAlgorithmBase): + """HistogramMseMaxFrequency for calculating min and max values of calibration result.""" + + def get_min_max_value(self) -> tuple[float, float]: + """Finds min and max starting from the index of the max frequency. + + The HistogramMseMaxFrequency method starts from the bin with the highest + frequency and expands the range to both sides. This performs well when data + is well spread on both sides of the max frequency. + + Returns: + (min_value, max_value): Min and max calculated using method to expand the + range based on max frequency. + """ + # Find the index of max frequency. + freq_max_idx = np.argmax(self._hist_freq) + return self._get_min_max_value_by_expanding_range(freq_max_idx) + + +@_implements(_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC) +class _HistogramMseSymmetric(_HistogramCalibrationAlgorithmBase): + """HistogramMseSymmetric for calculating min and max values of calibration result.""" + + def get_min_max_value(self) -> tuple[float, float]: + """Finds min and max starting from the center index. + + The HistogramMseSymmetric method starts from the center bin and expands the + range to both sides. This works better when the data is well-centered. + + Returns: + (min_value, max_value): Min and max calculated using the method starting + from center and expanding. + """ + + # This function is currently only called in this method, but will be used in + # other methods in the future. + return self._get_min_max_value_by_expanding_range(self._num_bins // 2) + + +def get_min_max_value( + statistics: calib_stats_pb2.CalibrationStatistics, + calib_opts: quant_opts_pb2.CalibrationOptions, +) -> tuple[float, float]: + """Calculates min and max from statistics using calibration options. + + Args: + statistics: Collected calibration statistics. + calib_opts: Calibration options used for calculating min and max. + + Returns: + (min_value, max_value): Min and max calculated using calib_opts. + + Raises: + ValueError: Unsupported calibration method is given. + """ + calib_method = calib_opts.calibration_method + if calib_method not in _REGISTRY: + raise ValueError(f'Unsupported calibration method: {calib_method}') + + calibration_algorithm = _REGISTRY[calib_method](statistics, calib_opts) + return calibration_algorithm.get_min_max_value() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..df5929d1bab49045b0ae00aa3f43ea9be0fce5f9 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nXtensorflow/compiler/mlir/quantization/tensorflow/calibrator/calibration_statistics.proto\x12\x15tensorflow.calibrator\"\x9c\x04\n\x15\x43\x61librationStatistics\x12Y\n\x12min_max_statistics\x18\x01 \x01(\x0b\x32=.tensorflow.calibrator.CalibrationStatistics.MinMaxStatistics\x12h\n\x1a\x61verage_min_max_statistics\x18\x02 \x01(\x0b\x32\x44.tensorflow.calibrator.CalibrationStatistics.AverageMinMaxStatistics\x12^\n\x14histogram_statistics\x18\x03 \x01(\x0b\x32@.tensorflow.calibrator.CalibrationStatistics.HistogramStatistics\x1a:\n\x10MinMaxStatistics\x12\x12\n\nglobal_min\x18\x01 \x01(\x02\x12\x12\n\nglobal_max\x18\x02 \x01(\x02\x1aP\n\x17\x41verageMinMaxStatistics\x12\x0f\n\x07min_sum\x18\x01 \x01(\x02\x12\x0f\n\x07max_sum\x18\x02 \x01(\x02\x12\x13\n\x0bnum_samples\x18\x03 \x01(\x05\x1aP\n\x13HistogramStatistics\x12\x11\n\tbin_width\x18\x01 \x01(\x02\x12\x13\n\x0blower_bound\x18\x02 \x01(\x02\x12\x11\n\thist_freq\x18\x03 \x03(\x03\x42\x03\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.calibrator.calibration_statistics_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\370\001\001' + _CALIBRATIONSTATISTICS._serialized_start=116 + _CALIBRATIONSTATISTICS._serialized_end=656 + _CALIBRATIONSTATISTICS_MINMAXSTATISTICS._serialized_start=434 + _CALIBRATIONSTATISTICS_MINMAXSTATISTICS._serialized_end=492 + _CALIBRATIONSTATISTICS_AVERAGEMINMAXSTATISTICS._serialized_start=494 + _CALIBRATIONSTATISTICS_AVERAGEMINMAXSTATISTICS._serialized_end=574 + _CALIBRATIONSTATISTICS_HISTOGRAMSTATISTICS._serialized_start=576 + _CALIBRATIONSTATISTICS_HISTOGRAMSTATISTICS._serialized_end=656 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.pyi new file mode 100644 index 0000000000000000000000000000000000000000..5d859fee947364a62933c3befe6e958e656df73f --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/calibrator/pywrap_calibration.pyi @@ -0,0 +1,32 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from tensorflow.compiler.mlir.quantization.tensorflow.calibrator import calibration_statistics_pb2 + +# LINT.IfChange(clear_calibrator) +def clear_calibrator() -> None: ... + +# LINT.ThenChange() + +# LINT.IfChange(clear_data_from_calibrator) +def clear_data_from_calibrator(id: bytes) -> None: ... + +# LINT.ThenChange() + +# LINT.IfChange(get_statistics_from_calibrator) +def get_statistics_from_calibrator( + id: bytes, +) -> calibration_statistics_pb2.CalibrationStatistics: ... + +# LINT.ThenChange() diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/exported_model_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/exported_model_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..45d3f9c0b26523fa356814d7ca5c35c77bcf71f8 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/exported_model_pb2.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/compiler/mlir/quantization/tensorflow/exported_model.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2 +from tensorflow.core.protobuf import meta_graph_pb2 as tensorflow_dot_core_dot_protobuf_dot_meta__graph__pb2 +from tensorflow.core.protobuf import saver_pb2 as tensorflow_dot_core_dot_protobuf_dot_saver__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nEtensorflow/compiler/mlir/quantization/tensorflow/exported_model.proto\x12\x17tensorflow.quantization\x1a%tensorflow/core/framework/graph.proto\x1a)tensorflow/core/protobuf/meta_graph.proto\x1a$tensorflow/core/protobuf/saver.proto\"\xbe\x03\n\rExportedModel\x12\'\n\tgraph_def\x18\x01 \x01(\x0b\x32\x14.tensorflow.GraphDef\x12\x16\n\x0einit_node_name\x18\x02 \x01(\t\x12\x16\n\x0e\x63heckpoint_dir\x18\x05 \x01(\t\x12U\n\x10\x66unction_aliases\x18\x06 \x03(\x0b\x32;.tensorflow.quantization.ExportedModel.FunctionAliasesEntry\x12\x31\n\x0f\x61sset_file_defs\x18\x08 \x03(\x0b\x32\x18.tensorflow.AssetFileDef\x12\'\n\tsaver_def\x18\n \x01(\x0b\x32\x14.tensorflow.SaverDef\x1a\x36\n\x14\x46unctionAliasesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x07\x10\x08J\x04\x08\t\x10\nR\x15variable_shared_namesR\x11restore_node_nameR\x0esave_node_nameR\x17\x66ile_prefix_tensor_nameB\x03\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.exported_model_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\370\001\001' + _EXPORTEDMODEL_FUNCTIONALIASESENTRY._options = None + _EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_options = b'8\001' + _EXPORTEDMODEL._serialized_start=219 + _EXPORTEDMODEL._serialized_end=665 + _EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_start=504 + _EXPORTEDMODEL_FUNCTIONALIASESENTRY._serialized_end=558 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/quantization_options_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/quantization_options_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..d72e1a7b3e2d8908c43dcf0a25676aad1451981b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/mlir/quantization/tensorflow/quantization_options_pb2.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nKtensorflow/compiler/mlir/quantization/tensorflow/quantization_options.proto\x12\x17tensorflow.quantization\x1a&tensorflow/core/framework/tensor.proto\"\xed\x02\n\x12QuantizationMethod\x12O\n\rpreset_method\x18\x04 \x01(\x0e\x32\x38.tensorflow.quantization.QuantizationMethod.PresetMethod\x12X\n\x1cquantization_component_specs\x18\x03 \x03(\x0b\x32\x32.tensorflow.quantization.QuantizationComponentSpec\"\xa5\x01\n\x0cPresetMethod\x12\x16\n\x12METHOD_UNSPECIFIED\x10\x00\x12\x16\n\x12METHOD_NO_QUANTIZE\x10\x01\x12\x1c\n\x18METHOD_STATIC_RANGE_INT8\x10\x02\x12\x1d\n\x19METHOD_DYNAMIC_RANGE_INT8\x10\x03\x12(\n$METHOD_STATIC_RANGE_WEIGHT_ONLY_INT8\x10\x04J\x04\x08\x01\x10\x03\"\xbe\x03\n\x19QuantizationComponentSpec\x12h\n\x16quantization_component\x18\x01 \x01(\x0e\x32H.tensorflow.quantization.QuantizationComponentSpec.QuantizationComponent\x12R\n\x0btensor_type\x18\x02 \x01(\x0e\x32=.tensorflow.quantization.QuantizationComponentSpec.TensorType\"v\n\x15QuantizationComponent\x12\x19\n\x15\x43OMPONENT_UNSPECIFIED\x10\x00\x12\x18\n\x14\x43OMPONENT_ACTIVATION\x10\x01\x12\x14\n\x10\x43OMPONENT_WEIGHT\x10\x02\x12\x12\n\x0e\x43OMPONENT_BIAS\x10\x03\"k\n\nTensorType\x12\x1a\n\x16TENSORTYPE_UNSPECIFIED\x10\x00\x12\x14\n\x10TENSORTYPE_INT_4\x10\x01\x12\x14\n\x10TENSORTYPE_INT_8\x10\x02\x12\x15\n\x11TENSORTYPE_INT_32\x10\x03\"\x87\x02\n\x18UnitWiseQuantizationSpec\x12P\n\x04unit\x18\x05 \x03(\x0b\x32\x42.tensorflow.quantization.UnitWiseQuantizationSpec.QuantizationUnit\x12H\n\x13quantization_method\x18\x06 \x01(\x0b\x32+.tensorflow.quantization.QuantizationMethod\x1aI\n\x10QuantizationUnit\x12\x0f\n\x07op_type\x18\x01 \x01(\t\x12\x11\n\tnode_name\x18\x02 \x01(\t\x12\x11\n\tfunc_name\x18\x03 \x01(\tJ\x04\x08\x01\x10\x05\"\xf9\x04\n\x12\x43\x61librationOptions\x12Y\n\x12\x63\x61libration_method\x18\x01 \x01(\x0e\x32=.tensorflow.quantization.CalibrationOptions.CalibrationMethod\x12\x61\n\x16\x63\x61libration_parameters\x18\x02 \x01(\x0b\x32\x41.tensorflow.quantization.CalibrationOptions.CalibrationParameters\x1a\x61\n\x15\x43\x61librationParameters\x12\x18\n\x10initial_num_bins\x18\x01 \x01(\x05\x12\x16\n\x0emin_percentile\x18\x02 \x01(\x02\x12\x16\n\x0emax_percentile\x18\x03 \x01(\x02\"\xc1\x02\n\x11\x43\x61librationMethod\x12\"\n\x1e\x43\x41LIBRATION_METHOD_UNSPECIFIED\x10\x00\x12\x1e\n\x1a\x43\x41LIBRATION_METHOD_MIN_MAX\x10\x01\x12&\n\"CALIBRATION_METHOD_AVERAGE_MIN_MAX\x10\x02\x12+\n\'CALIBRATION_METHOD_HISTOGRAM_PERCENTILE\x10\x03\x12/\n+CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE\x10\x04\x12\x32\n.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY\x10\x05\x12.\n*CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC\x10\x06\"\xd4\x01\n\x18RepresentativeDataSample\x12\x65\n\x13tensor_proto_inputs\x18\x02 \x03(\x0b\x32H.tensorflow.quantization.RepresentativeDataSample.TensorProtoInputsEntry\x1aQ\n\x16TensorProtoInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.tensorflow.TensorProto:\x02\x38\x01\"I\n\x19RepresentativeDatasetFile\x12\x1c\n\x12tfrecord_file_path\x18\x01 \x01(\tH\x00\x42\x0e\n\x0c\x64\x61taset_file\"\xad\x02\n\x0f\x44\x65\x62uggerOptions\x12L\n\rdebugger_type\x18\x01 \x01(\x0e\x32\x35.tensorflow.quantization.DebuggerOptions.DebuggerType\x12#\n\x1bunquantized_dump_model_path\x18\x02 \x01(\t\x12\x14\n\x0clog_dir_path\x18\x03 \x01(\t\"\x90\x01\n\x0c\x44\x65\x62uggerType\x12\x1d\n\x19\x44\x45\x42UGGER_TYPE_UNSPECIFIED\x10\x00\x12\x1d\n\x19\x44\x45\x42UGGER_TYPE_WHOLE_MODEL\x10\x01\x12\x1f\n\x1b\x44\x45\x42UGGER_TYPE_INT_PER_LAYER\x10\x02\x12!\n\x1d\x44\x45\x42UGGER_TYPE_FLOAT_PER_LAYER\x10\x03\"\xa5\x07\n\x13QuantizationOptions\x12H\n\x13quantization_method\x18\x01 \x01(\x0b\x32+.tensorflow.quantization.QuantizationMethod\x12.\n\x06op_set\x18\x02 \x01(\x0e\x32\x1e.tensorflow.quantization.OpSet\x12W\n\x1cunit_wise_quantization_specs\x18\x11 \x03(\x0b\x32\x31.tensorflow.quantization.UnitWiseQuantizationSpec\x12\x0c\n\x04tags\x18\x05 \x03(\t\x12\x16\n\x0esignature_keys\x18\x06 \x03(\t\x12i\n\x17representative_datasets\x18\x07 \x03(\x0b\x32H.tensorflow.quantization.QuantizationOptions.RepresentativeDatasetsEntry\x12$\n\x1cmin_num_elements_for_weights\x18\x08 \x01(\x03\x12!\n\x14\x66reeze_all_variables\x18\t \x01(\x08H\x00\x88\x01\x01\x12\'\n\x1f\x65nable_per_channel_quantization\x18\n \x01(\x08\x12 \n\x18\x65nable_two_input_tensors\x18\x0b \x01(\x08\x12-\n%experimental_enable_tpu_model_support\x18\x0c \x01(\x08\x12!\n\x19\x65nable_legacy_weight_only\x18\r \x01(\x08\x12$\n\x1c\x66orce_graph_mode_calibration\x18\x0e \x01(\x08\x12H\n\x13\x63\x61libration_options\x18\x0f \x01(\x0b\x32+.tensorflow.quantization.CalibrationOptions\x12\x42\n\x10\x64\x65\x62ugger_options\x18\x10 \x01(\x0b\x32(.tensorflow.quantization.DebuggerOptions\x1aq\n\x1bRepresentativeDatasetsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x41\n\x05value\x18\x02 \x01(\x0b\x32\x32.tensorflow.quantization.RepresentativeDatasetFile:\x02\x38\x01\x42\x17\n\x15_freeze_all_variablesJ\x04\x08\x03\x10\x04*V\n\x05OpSet\x12\x16\n\x12OP_SET_UNSPECIFIED\x10\x00\x12\x06\n\x02TF\x10\x01\x12\x07\n\x03XLA\x10\x02\x12\x15\n\x11UNIFORM_QUANTIZED\x10\x03\x12\r\n\tSTABLEHLO\x10\x04\x42\x03\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.mlir.quantization.tensorflow.quantization_options_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\370\001\001' + _REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._options = None + _REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_options = b'8\001' + _QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._options = None + _QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_options = b'8\001' + _OPSET._serialized_start=3393 + _OPSET._serialized_end=3479 + _QUANTIZATIONMETHOD._serialized_start=145 + _QUANTIZATIONMETHOD._serialized_end=510 + _QUANTIZATIONMETHOD_PRESETMETHOD._serialized_start=339 + _QUANTIZATIONMETHOD_PRESETMETHOD._serialized_end=504 + _QUANTIZATIONCOMPONENTSPEC._serialized_start=513 + _QUANTIZATIONCOMPONENTSPEC._serialized_end=959 + _QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_start=732 + _QUANTIZATIONCOMPONENTSPEC_QUANTIZATIONCOMPONENT._serialized_end=850 + _QUANTIZATIONCOMPONENTSPEC_TENSORTYPE._serialized_start=852 + _QUANTIZATIONCOMPONENTSPEC_TENSORTYPE._serialized_end=959 + _UNITWISEQUANTIZATIONSPEC._serialized_start=962 + _UNITWISEQUANTIZATIONSPEC._serialized_end=1225 + _UNITWISEQUANTIZATIONSPEC_QUANTIZATIONUNIT._serialized_start=1146 + _UNITWISEQUANTIZATIONSPEC_QUANTIZATIONUNIT._serialized_end=1219 + _CALIBRATIONOPTIONS._serialized_start=1228 + _CALIBRATIONOPTIONS._serialized_end=1861 + _CALIBRATIONOPTIONS_CALIBRATIONPARAMETERS._serialized_start=1440 + _CALIBRATIONOPTIONS_CALIBRATIONPARAMETERS._serialized_end=1537 + _CALIBRATIONOPTIONS_CALIBRATIONMETHOD._serialized_start=1540 + _CALIBRATIONOPTIONS_CALIBRATIONMETHOD._serialized_end=1861 + _REPRESENTATIVEDATASAMPLE._serialized_start=1864 + _REPRESENTATIVEDATASAMPLE._serialized_end=2076 + _REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_start=1995 + _REPRESENTATIVEDATASAMPLE_TENSORPROTOINPUTSENTRY._serialized_end=2076 + _REPRESENTATIVEDATASETFILE._serialized_start=2078 + _REPRESENTATIVEDATASETFILE._serialized_end=2151 + _DEBUGGEROPTIONS._serialized_start=2154 + _DEBUGGEROPTIONS._serialized_end=2455 + _DEBUGGEROPTIONS_DEBUGGERTYPE._serialized_start=2311 + _DEBUGGEROPTIONS_DEBUGGERTYPE._serialized_end=2455 + _QUANTIZATIONOPTIONS._serialized_start=2458 + _QUANTIZATIONOPTIONS._serialized_end=3391 + _QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_start=3247 + _QUANTIZATIONOPTIONS_REPRESENTATIVEDATASETSENTRY._serialized_end=3360 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_py_utils.pyi b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_py_utils.pyi new file mode 100644 index 0000000000000000000000000000000000000000..1ef7abbd7d14b6cc491ad6b7f07dc371ae019141 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2tensorrt/_pywrap_py_utils.pyi @@ -0,0 +1,19 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +def get_linked_tensorrt_version() -> tuple[int,int,int]: ... +def get_loaded_tensorrt_version() -> tuple[int,int,int]: ... +def get_registered_op_converters() -> list[str]: ... +def is_tensorrt_enabled() -> bool: ... diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f0dd74f8e3100de1d28bd788b12717fd2f57ce5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/tf2xla_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/tf2xla_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e14b3defed4d26de841393003abfad33e87fb8fb Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/__pycache__/tf2xla_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21087b297300f9656e38ed9bd1080961d42715ae Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/gen_xla_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/gen_xla_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..7558fcccdf5f814c41184ff1c47e66876e9ae10b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/ops/gen_xla_ops.py @@ -0,0 +1,4855 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +TV_XlaAllReduce_T = TypeVar("TV_XlaAllReduce_T", _atypes.BFloat16, _atypes.Float32, _atypes.Half, _atypes.Int32, _atypes.UInt32) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_all_reduce') +def xla_all_reduce(input: Annotated[Any, TV_XlaAllReduce_T], group_assignment: Annotated[Any, _atypes.Int32], reduce_op: str, mode: str, name=None) -> Annotated[Any, TV_XlaAllReduce_T]: + r"""Wraps the XLA AllReduce operator + + documented at https://www.tensorflow.org/xla/operation_semantics#allreduce. + + Args: + input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `int32`, `uint32`. + Array or a non-empty tuple of arrays to reduce across replicas. + group_assignment: A `Tensor` of type `int32`. + Groups between which the reductions are performed. + reduce_op: A `string` from: `"Min", "Max", "Mul", "Add", "Mean"`. + Reduction computation. + mode: A `string` from: `"CrossReplica", "CrossReplicaAndPartition"`. + group mode. + CrossReplica: group_assignment contains replica_id. Each group contains the + replicas for the current partition. + CrossReplicaAndPartition: group_assignment contains replica_id. Each group + contains the replicas for all partitions. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaAllReduce", name, input, group_assignment, "reduce_op", + reduce_op, "mode", mode) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_all_reduce( + (input, group_assignment, reduce_op, mode, name,), None) + if _result is not NotImplemented: + return _result + return xla_all_reduce_eager_fallback( + input, group_assignment, reduce_op=reduce_op, mode=mode, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_all_reduce, (), dict(input=input, + group_assignment=group_assignment, + reduce_op=reduce_op, mode=mode, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_all_reduce( + (input, group_assignment, reduce_op, mode, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + reduce_op = _execute.make_str(reduce_op, "reduce_op") + mode = _execute.make_str(mode, "mode") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaAllReduce", input=input, group_assignment=group_assignment, + reduce_op=reduce_op, mode=mode, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_all_reduce, (), dict(input=input, + group_assignment=group_assignment, + reduce_op=reduce_op, mode=mode, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "reduce_op", + _op.get_attr("reduce_op"), "mode", _op.get_attr("mode")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaAllReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaAllReduce = tf_export("raw_ops.XlaAllReduce")(_ops.to_raw_op(xla_all_reduce)) +_dispatcher_for_xla_all_reduce = xla_all_reduce._tf_type_based_dispatcher.Dispatch + + +def xla_all_reduce_eager_fallback(input: Annotated[Any, TV_XlaAllReduce_T], group_assignment: Annotated[Any, _atypes.Int32], reduce_op: str, mode: str, name, ctx) -> Annotated[Any, TV_XlaAllReduce_T]: + reduce_op = _execute.make_str(reduce_op, "reduce_op") + mode = _execute.make_str(mode, "mode") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.int32, _dtypes.uint32, ]) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + _inputs_flat = [input, group_assignment] + _attrs = ("T", _attr_T, "reduce_op", reduce_op, "mode", mode) + _result = _execute.execute(b"XlaAllReduce", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaAllReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_XlaBroadcastHelperOutput = collections.namedtuple( + "XlaBroadcastHelper", + ["lhs_output", "rhs_output"]) + + +TV_XlaBroadcastHelper_T = TypeVar("TV_XlaBroadcastHelper_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaBroadcastHelper_Tindices = TypeVar("TV_XlaBroadcastHelper_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_broadcast_helper') +def xla_broadcast_helper(lhs: Annotated[Any, TV_XlaBroadcastHelper_T], rhs: Annotated[Any, TV_XlaBroadcastHelper_T], broadcast_dims: Annotated[Any, TV_XlaBroadcastHelper_Tindices], name=None): + r"""Helper operator for performing XLA-style broadcasts + + Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to + whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules + for binary operators. + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the LHS input tensor + rhs: A `Tensor`. Must have the same type as `lhs`. the RHS input tensor + broadcast_dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. + an XLA-style broadcast dimension specification + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (lhs_output, rhs_output). + + lhs_output: A `Tensor`. Has the same type as `lhs`. the broadcasted LHS tensor + rhs_output: A `Tensor`. Has the same type as `lhs`. the broadcasted RHS tensor + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaBroadcastHelper", name, lhs, rhs, broadcast_dims) + _result = _XlaBroadcastHelperOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_broadcast_helper( + (lhs, rhs, broadcast_dims, name,), None) + if _result is not NotImplemented: + return _result + return xla_broadcast_helper_eager_fallback( + lhs, rhs, broadcast_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_broadcast_helper, (), dict(lhs=lhs, rhs=rhs, + broadcast_dims=broadcast_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_broadcast_helper( + (lhs, rhs, broadcast_dims, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaBroadcastHelper", lhs=lhs, rhs=rhs, broadcast_dims=broadcast_dims, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_broadcast_helper, (), dict(lhs=lhs, rhs=rhs, + broadcast_dims=broadcast_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaBroadcastHelper", _inputs_flat, _attrs, _result) + _result = _XlaBroadcastHelperOutput._make(_result) + return _result + +XlaBroadcastHelper = tf_export("raw_ops.XlaBroadcastHelper")(_ops.to_raw_op(xla_broadcast_helper)) +_dispatcher_for_xla_broadcast_helper = xla_broadcast_helper._tf_type_based_dispatcher.Dispatch + + +def xla_broadcast_helper_eager_fallback(lhs: Annotated[Any, TV_XlaBroadcastHelper_T], rhs: Annotated[Any, TV_XlaBroadcastHelper_T], broadcast_dims: Annotated[Any, TV_XlaBroadcastHelper_Tindices], name, ctx): + _attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lhs, rhs) = _inputs_T + _attr_Tindices, (broadcast_dims,) = _execute.args_to_matching_eager([broadcast_dims], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [lhs, rhs, broadcast_dims] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaBroadcastHelper", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaBroadcastHelper", _inputs_flat, _attrs, _result) + _result = _XlaBroadcastHelperOutput._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_call_module') +def xla_call_module(args, version: int, module: str, Sout, Tout, dim_args_spec=[], platforms=[], function_list=[], has_token_input_output:bool=False, disabled_checks=[], name=None): + r"""Invokes a StableHLO module. + + This op is used with JAX native serialization in a TensorFlow context with + stability guarantees. + + Args: + args: A list of `Tensor` objects. + A list of `Tensor` with possibly different types to be passed as arguments + to the `module`. These are the actual arguments and do not include the + platform argument (see `platforms`) nor the dimension arguments (see + `dim_args_spec`). + version: An `int`. + Tracks changes the semantics of the op, to support backwards + compatibility. Minimum supported version is 2. From + version 2, the op carries a StableHLO text or bytecode `module`. From + version 3, the op also supports the `platforms` attribute. From version 4, + the op carries a StableHLO module with compatibility guarantees. From version + 5, XLACallModule can include `stablehlo.custom_call` op to execute tf + functions. From version 6 the op supports the `disabled_checks` attribute. + See more versioning details at https://github.com/search?q=repo%3Atensorflow%2Ftensorflow+path%3Axla_call_module+%22int+VERSION_MAXIMUM_SUPPORTED%22&type=code. + module: A `string`. + A serialized computation, a text or bytecode representation of + an mlir.Module. The return type must be a tuple if and only if the `Sout` is + a list with 0 or more than 1 elements. The length of `Tout` and + `Sout` must match. This op always returns a tuple of results, even if the + module returns a single result. + Sout: A list of shapes (each a `tf.TensorShape` or list of `ints`). + List of output tensor shapes. + Tout: A list of `tf.DTypes`. List of output tensor data types. + dim_args_spec: An optional list of `strings`. Defaults to `[]`. + this attribute is not supported anymore. + platforms: An optional list of `strings`. Defaults to `[]`. + the list of platforms supported by `module`. The list can contain + the strings "CPU", "CUDA", "ROCM", or "TPU". It is an error to compile + this op for a platform that does not appear in the list. This check can be + disabled using `disabled_checks`. If the list contains more than + one platform, then the `module` takes one additional 0-dimensional + integer-tensor parameter in the first position, encoding the index in + `platforms` of the current compilation platform. This parameter has value 0 + if the plaform is not among `platforms` and the check has been disabled. + The list can be empty in old versions (earlier than 6) to denote that no + platform checking must be performed at loading time. + function_list: An optional list of functions decorated with @Defun. Defaults to `[]`. + This list contains the TensorFlow FunctionDefs that are used by + the XLACallModule. If the XLACallModule contains `stablehlo.custom_call` + operations, they can call TensorFlow graph functions outside of the + XLACallModule. This `function_list` attribute registers the dependency of the + XLACallModule on those functions. This attribute was added in version 5. + has_token_input_output: An optional `bool`. Defaults to `False`. + If true, the embedded StableHLO module's main function + must take a `!stablehlo.token` as its first argument and returns a token as + its first result. This can be used in conjunction with the TF2XLA's side + effect mechanism in order to model side effects. This is used only in versions + prior to version 9. After that, the number and position of tokens among + the arguments and results are obtained from the main function type. This + allows us to support more than one token and not necessarily at the start. + disabled_checks: An optional list of `strings`. Defaults to `[]`. + A list of strings describing the safety checks that were + disabled at serialization time. This attribute was added in version 6. + For more details see + https://github.com/search?q=repo%3Agoogle%2Fjax+path%3Ajax_export+%22class+DisabledSafetyCheck%22&type=code. + This list, supplemented with a comma-separate list of directives specified + using the flag --tf_xla_call_module_disabled_checks, + is used at module loading time to skip the corresponding checks. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaCallModule", name, args, "version", version, "module", + module, "Sout", Sout, "Tout", Tout, "dim_args_spec", dim_args_spec, + "platforms", platforms, "function_list", function_list, + "has_token_input_output", has_token_input_output, "disabled_checks", + disabled_checks) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_call_module( + (args, version, module, Sout, Tout, dim_args_spec, platforms, + function_list, has_token_input_output, disabled_checks, name,), None) + if _result is not NotImplemented: + return _result + return xla_call_module_eager_fallback( + args, version=version, module=module, Sout=Sout, Tout=Tout, + dim_args_spec=dim_args_spec, platforms=platforms, + function_list=function_list, + has_token_input_output=has_token_input_output, + disabled_checks=disabled_checks, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_call_module, (), dict(args=args, version=version, + module=module, Sout=Sout, Tout=Tout, + dim_args_spec=dim_args_spec, + platforms=platforms, + function_list=function_list, + has_token_input_output=has_token_input_output, + disabled_checks=disabled_checks, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_call_module( + (args, version, module, Sout, Tout, dim_args_spec, platforms, + function_list, has_token_input_output, disabled_checks, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + version = _execute.make_int(version, "version") + module = _execute.make_str(module, "module") + if not isinstance(Sout, (list, tuple)): + raise TypeError( + "Expected list for 'Sout' argument to " + "'xla_call_module' Op, not %r." % Sout) + Sout = [_execute.make_shape(_s, "Sout") for _s in Sout] + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'xla_call_module' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if dim_args_spec is None: + dim_args_spec = [] + if not isinstance(dim_args_spec, (list, tuple)): + raise TypeError( + "Expected list for 'dim_args_spec' argument to " + "'xla_call_module' Op, not %r." % dim_args_spec) + dim_args_spec = [_execute.make_str(_s, "dim_args_spec") for _s in dim_args_spec] + if platforms is None: + platforms = [] + if not isinstance(platforms, (list, tuple)): + raise TypeError( + "Expected list for 'platforms' argument to " + "'xla_call_module' Op, not %r." % platforms) + platforms = [_execute.make_str(_s, "platforms") for _s in platforms] + if function_list is None: + function_list = [] + if not isinstance(function_list, (list, tuple)): + raise TypeError( + "Expected list for 'function_list' argument to " + "'xla_call_module' Op, not %r." % function_list) + if has_token_input_output is None: + has_token_input_output = False + has_token_input_output = _execute.make_bool(has_token_input_output, "has_token_input_output") + if disabled_checks is None: + disabled_checks = [] + if not isinstance(disabled_checks, (list, tuple)): + raise TypeError( + "Expected list for 'disabled_checks' argument to " + "'xla_call_module' Op, not %r." % disabled_checks) + disabled_checks = [_execute.make_str(_s, "disabled_checks") for _s in disabled_checks] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaCallModule", args=args, version=version, module=module, Sout=Sout, + Tout=Tout, dim_args_spec=dim_args_spec, + platforms=platforms, function_list=function_list, + has_token_input_output=has_token_input_output, + disabled_checks=disabled_checks, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_call_module, (), dict(args=args, version=version, module=module, + Sout=Sout, Tout=Tout, + dim_args_spec=dim_args_spec, + platforms=platforms, + function_list=function_list, + has_token_input_output=has_token_input_output, + disabled_checks=disabled_checks, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("version", _op._get_attr_int("version"), "module", + _op.get_attr("module"), "Sout", _op.get_attr("Sout"), "Tout", + _op.get_attr("Tout"), "Tin", _op.get_attr("Tin"), + "dim_args_spec", _op.get_attr("dim_args_spec"), "platforms", + _op.get_attr("platforms"), "function_list", + _op.get_attr("function_list"), "has_token_input_output", + _op._get_attr_bool("has_token_input_output"), "disabled_checks", + _op.get_attr("disabled_checks")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaCallModule", _inputs_flat, _attrs, _result) + return _result + +XlaCallModule = tf_export("raw_ops.XlaCallModule")(_ops.to_raw_op(xla_call_module)) +_dispatcher_for_xla_call_module = xla_call_module._tf_type_based_dispatcher.Dispatch + + +def xla_call_module_eager_fallback(args, version: int, module: str, Sout, Tout, dim_args_spec, platforms, function_list, has_token_input_output: bool, disabled_checks, name, ctx): + version = _execute.make_int(version, "version") + module = _execute.make_str(module, "module") + if not isinstance(Sout, (list, tuple)): + raise TypeError( + "Expected list for 'Sout' argument to " + "'xla_call_module' Op, not %r." % Sout) + Sout = [_execute.make_shape(_s, "Sout") for _s in Sout] + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'xla_call_module' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + if dim_args_spec is None: + dim_args_spec = [] + if not isinstance(dim_args_spec, (list, tuple)): + raise TypeError( + "Expected list for 'dim_args_spec' argument to " + "'xla_call_module' Op, not %r." % dim_args_spec) + dim_args_spec = [_execute.make_str(_s, "dim_args_spec") for _s in dim_args_spec] + if platforms is None: + platforms = [] + if not isinstance(platforms, (list, tuple)): + raise TypeError( + "Expected list for 'platforms' argument to " + "'xla_call_module' Op, not %r." % platforms) + platforms = [_execute.make_str(_s, "platforms") for _s in platforms] + if function_list is None: + function_list = [] + if not isinstance(function_list, (list, tuple)): + raise TypeError( + "Expected list for 'function_list' argument to " + "'xla_call_module' Op, not %r." % function_list) + if has_token_input_output is None: + has_token_input_output = False + has_token_input_output = _execute.make_bool(has_token_input_output, "has_token_input_output") + if disabled_checks is None: + disabled_checks = [] + if not isinstance(disabled_checks, (list, tuple)): + raise TypeError( + "Expected list for 'disabled_checks' argument to " + "'xla_call_module' Op, not %r." % disabled_checks) + disabled_checks = [_execute.make_str(_s, "disabled_checks") for _s in disabled_checks] + _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx) + _inputs_flat = list(args) + _attrs = ("version", version, "module", module, "Sout", Sout, "Tout", Tout, + "Tin", _attr_Tin, "dim_args_spec", dim_args_spec, "platforms", platforms, + "function_list", function_list, "has_token_input_output", + has_token_input_output, "disabled_checks", disabled_checks) + _result = _execute.execute(b"XlaCallModule", len(Tout), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaCallModule", _inputs_flat, _attrs, _result) + return _result + + +TV_XlaConv_T = TypeVar("TV_XlaConv_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaConv_Tindices = TypeVar("TV_XlaConv_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_conv') +def xla_conv(lhs: Annotated[Any, TV_XlaConv_T], rhs: Annotated[Any, TV_XlaConv_T], window_strides: Annotated[Any, TV_XlaConv_Tindices], padding: Annotated[Any, TV_XlaConv_Tindices], lhs_dilation: Annotated[Any, TV_XlaConv_Tindices], rhs_dilation: Annotated[Any, TV_XlaConv_Tindices], feature_group_count: Annotated[Any, TV_XlaConv_Tindices], dimension_numbers: str, precision_config: str, name=None) -> Annotated[Any, TV_XlaConv_T]: + r"""Wraps the XLA ConvGeneralDilated operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + . + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the input tensor + rhs: A `Tensor`. Must have the same type as `lhs`. the kernel tensor + window_strides: A `Tensor`. Must be one of the following types: `int32`, `int64`. + the inter-window strides + padding: A `Tensor`. Must have the same type as `window_strides`. + the padding to apply at the start and end of each input dimensions + lhs_dilation: A `Tensor`. Must have the same type as `window_strides`. + dilation to apply between input elements + rhs_dilation: A `Tensor`. Must have the same type as `window_strides`. + dilation to apply between kernel elements + feature_group_count: A `Tensor`. Must have the same type as `window_strides`. + number of feature groups for grouped convolution. + dimension_numbers: A `string`. + a serialized xla::ConvolutionDimensionNumbers proto. + precision_config: A `string`. a serialized xla::PrecisionConfig proto. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `lhs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaConv", name, lhs, rhs, window_strides, padding, + lhs_dilation, rhs_dilation, feature_group_count, "dimension_numbers", + dimension_numbers, "precision_config", precision_config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_conv( + (lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers, precision_config, name,), + None) + if _result is not NotImplemented: + return _result + return xla_conv_eager_fallback( + lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_conv, (), dict(lhs=lhs, rhs=rhs, + window_strides=window_strides, padding=padding, + lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_conv( + (lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers, precision_config, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaConv", lhs=lhs, rhs=rhs, window_strides=window_strides, + padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_conv, (), dict(lhs=lhs, rhs=rhs, window_strides=window_strides, + padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "precision_config", + _op.get_attr("precision_config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaConv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaConv = tf_export("raw_ops.XlaConv")(_ops.to_raw_op(xla_conv)) +_dispatcher_for_xla_conv = xla_conv._tf_type_based_dispatcher.Dispatch + + +def xla_conv_eager_fallback(lhs: Annotated[Any, TV_XlaConv_T], rhs: Annotated[Any, TV_XlaConv_T], window_strides: Annotated[Any, TV_XlaConv_Tindices], padding: Annotated[Any, TV_XlaConv_Tindices], lhs_dilation: Annotated[Any, TV_XlaConv_Tindices], rhs_dilation: Annotated[Any, TV_XlaConv_Tindices], feature_group_count: Annotated[Any, TV_XlaConv_Tindices], dimension_numbers: str, precision_config: str, name, ctx) -> Annotated[Any, TV_XlaConv_T]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lhs, rhs) = _inputs_T + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count], ctx, [_dtypes.int32, _dtypes.int64, ]) + (window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count) = _inputs_Tindices + _inputs_flat = [lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "dimension_numbers", + dimension_numbers, "precision_config", precision_config) + _result = _execute.execute(b"XlaConv", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaConv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaConvV2_LhsT = TypeVar("TV_XlaConvV2_LhsT", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaConvV2_RhsT = TypeVar("TV_XlaConvV2_RhsT", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaConvV2_Tindices = TypeVar("TV_XlaConvV2_Tindices", _atypes.Int32, _atypes.Int64) +TV_XlaConvV2_preferred_element_type = TypeVar("TV_XlaConvV2_preferred_element_type", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_conv_v2') +def xla_conv_v2(lhs: Annotated[Any, TV_XlaConvV2_LhsT], rhs: Annotated[Any, TV_XlaConvV2_RhsT], window_strides: Annotated[Any, TV_XlaConvV2_Tindices], padding: Annotated[Any, TV_XlaConvV2_Tindices], lhs_dilation: Annotated[Any, TV_XlaConvV2_Tindices], rhs_dilation: Annotated[Any, TV_XlaConvV2_Tindices], feature_group_count: Annotated[Any, TV_XlaConvV2_Tindices], dimension_numbers: str, precision_config: str, preferred_element_type: TV_XlaConvV2_preferred_element_type, batch_group_count:int=1, name=None) -> Annotated[Any, TV_XlaConvV2_preferred_element_type]: + r"""Wraps the XLA ConvGeneralDilated operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + . + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + input tensor + rhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + kernel tensor + window_strides: A `Tensor`. Must be one of the following types: `int32`, `int64`. + inter-window strides + padding: A `Tensor`. Must have the same type as `window_strides`. + padding to apply at the start and end of each input dimensions + lhs_dilation: A `Tensor`. Must have the same type as `window_strides`. + dilation to apply between input elements + rhs_dilation: A `Tensor`. Must have the same type as `window_strides`. + dilation to apply between kernel elements + feature_group_count: A `Tensor`. Must have the same type as `window_strides`. + number of feature groups for grouped convolution. + dimension_numbers: A `string`. + serialized xla::ConvolutionDimensionNumbers proto. + precision_config: A `string`. serialized xla::PrecisionConfig proto. + preferred_element_type: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. + type of the tensor. + batch_group_count: An optional `int`. Defaults to `1`. + number of batch groups or grouped filters. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `preferred_element_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaConvV2", name, lhs, rhs, window_strides, padding, + lhs_dilation, rhs_dilation, feature_group_count, "dimension_numbers", + dimension_numbers, "precision_config", precision_config, + "preferred_element_type", preferred_element_type, "batch_group_count", + batch_group_count) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_conv_v2( + (lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers, precision_config, + preferred_element_type, batch_group_count, name,), None) + if _result is not NotImplemented: + return _result + return xla_conv_v2_eager_fallback( + lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + batch_group_count=batch_group_count, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_conv_v2, (), dict(lhs=lhs, rhs=rhs, + window_strides=window_strides, + padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + batch_group_count=batch_group_count, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_conv_v2( + (lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, + feature_group_count, dimension_numbers, precision_config, + preferred_element_type, batch_group_count, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + preferred_element_type = _execute.make_type(preferred_element_type, "preferred_element_type") + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaConvV2", lhs=lhs, rhs=rhs, window_strides=window_strides, + padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + batch_group_count=batch_group_count, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_conv_v2, (), dict(lhs=lhs, rhs=rhs, + window_strides=window_strides, + padding=padding, lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + batch_group_count=batch_group_count, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("LhsT", _op._get_attr_type("LhsT"), "RhsT", + _op._get_attr_type("RhsT"), "Tindices", + _op._get_attr_type("Tindices"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "precision_config", + _op.get_attr("precision_config"), "preferred_element_type", + _op._get_attr_type("preferred_element_type"), + "batch_group_count", _op._get_attr_int("batch_group_count")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaConvV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaConvV2 = tf_export("raw_ops.XlaConvV2")(_ops.to_raw_op(xla_conv_v2)) +_dispatcher_for_xla_conv_v2 = xla_conv_v2._tf_type_based_dispatcher.Dispatch + + +def xla_conv_v2_eager_fallback(lhs: Annotated[Any, TV_XlaConvV2_LhsT], rhs: Annotated[Any, TV_XlaConvV2_RhsT], window_strides: Annotated[Any, TV_XlaConvV2_Tindices], padding: Annotated[Any, TV_XlaConvV2_Tindices], lhs_dilation: Annotated[Any, TV_XlaConvV2_Tindices], rhs_dilation: Annotated[Any, TV_XlaConvV2_Tindices], feature_group_count: Annotated[Any, TV_XlaConvV2_Tindices], dimension_numbers: str, precision_config: str, preferred_element_type: TV_XlaConvV2_preferred_element_type, batch_group_count: int, name, ctx) -> Annotated[Any, TV_XlaConvV2_preferred_element_type]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + preferred_element_type = _execute.make_type(preferred_element_type, "preferred_element_type") + if batch_group_count is None: + batch_group_count = 1 + batch_group_count = _execute.make_int(batch_group_count, "batch_group_count") + _attr_LhsT, (lhs,) = _execute.args_to_matching_eager([lhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_RhsT, (rhs,) = _execute.args_to_matching_eager([rhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count], ctx, [_dtypes.int32, _dtypes.int64, ]) + (window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count) = _inputs_Tindices + _inputs_flat = [lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation, feature_group_count] + _attrs = ("LhsT", _attr_LhsT, "RhsT", _attr_RhsT, "Tindices", + _attr_Tindices, "dimension_numbers", dimension_numbers, "precision_config", + precision_config, "preferred_element_type", preferred_element_type, + "batch_group_count", batch_group_count) + _result = _execute.execute(b"XlaConvV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaConvV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaCustomCall_dtype = TypeVar("TV_XlaCustomCall_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_custom_call') +def xla_custom_call(args, target_name: str, backend_config: str, dtype: TV_XlaCustomCall_dtype, shape, name=None) -> Annotated[Any, TV_XlaCustomCall_dtype]: + r"""Wraps the XLA CustomCall operator + + documented at https://www.tensorflow.org/xla/operation_semantics#customcall. + + Args: + args: A list of `Tensor` objects. + A list of `Tensor` with possibly different types. + target_name: A `string`. + Name of the function. A call instruction will be emitted which + targets this symbol name. + backend_config: A `string`. + String, used to encode serialized metadata to the backend. + dtype: A `tf.DType`. Output tensor data type. + shape: A `tf.TensorShape` or list of `ints`. Output tensor shape. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaCustomCall", name, args, "target_name", target_name, + "backend_config", backend_config, "dtype", dtype, "shape", shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_custom_call( + (args, target_name, backend_config, dtype, shape, name,), None) + if _result is not NotImplemented: + return _result + return xla_custom_call_eager_fallback( + args, target_name=target_name, backend_config=backend_config, + dtype=dtype, shape=shape, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_custom_call, (), dict(args=args, target_name=target_name, + backend_config=backend_config, + dtype=dtype, shape=shape, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_custom_call( + (args, target_name, backend_config, dtype, shape, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + target_name = _execute.make_str(target_name, "target_name") + backend_config = _execute.make_str(backend_config, "backend_config") + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaCustomCall", args=args, target_name=target_name, + backend_config=backend_config, dtype=dtype, + shape=shape, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_custom_call, (), dict(args=args, target_name=target_name, + backend_config=backend_config, + dtype=dtype, shape=shape, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("target_name", _op.get_attr("target_name"), "backend_config", + _op.get_attr("backend_config"), "T", _op.get_attr("T"), "dtype", + _op._get_attr_type("dtype"), "shape", _op.get_attr("shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaCustomCall", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaCustomCall = tf_export("raw_ops.XlaCustomCall")(_ops.to_raw_op(xla_custom_call)) +_dispatcher_for_xla_custom_call = xla_custom_call._tf_type_based_dispatcher.Dispatch + + +def xla_custom_call_eager_fallback(args, target_name: str, backend_config: str, dtype: TV_XlaCustomCall_dtype, shape, name, ctx) -> Annotated[Any, TV_XlaCustomCall_dtype]: + target_name = _execute.make_str(target_name, "target_name") + backend_config = _execute.make_str(backend_config, "backend_config") + dtype = _execute.make_type(dtype, "dtype") + shape = _execute.make_shape(shape, "shape") + _attr_T, args = _execute.convert_to_mixed_eager_tensors(args, ctx) + _inputs_flat = list(args) + _attrs = ("target_name", target_name, "backend_config", backend_config, "T", + _attr_T, "dtype", dtype, "shape", shape) + _result = _execute.execute(b"XlaCustomCall", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaCustomCall", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_custom_call_v2') +def xla_custom_call_v2(operands, call_target_name: str, backend_config: str, has_side_effect: bool, result_dtypes, result_shapes, name=None): + r"""Emits an HLO `CustomCall` operation with multiple outputs. + + As opposed to `XlaCustomCall`, this operation supports multiple outputs. + + See `CustomCall` specification at + https://tensorflow.org/xla/operation_semantics#customcall, + and `mhlo.custom_call` specification at + https://tensorflow.org/mlir/hlo_ops#mhlocustom_call_mlirmhlocustomcallop. + + Args: + operands: A list of `Tensor` objects. + A sequence of tensors with possibly different types. + call_target_name: A `string`. + Name of the user function. The function signature must conform + to version 3 of the API, see `API_VERSION_STATUS_RETURNING_UNIFIED`. All + operands and results assumed to be in the default layout. + backend_config: A `string`. + A string that encodes a metadata for the backend. + has_side_effect: A `bool`. + Indicates whether the custom call has side effects. + result_dtypes: A list of `tf.DTypes`. Types of all results. + result_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`). + Shapes of all results. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `result_dtypes`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaCustomCallV2", name, operands, "call_target_name", + call_target_name, "backend_config", backend_config, "has_side_effect", + has_side_effect, "result_dtypes", result_dtypes, "result_shapes", + result_shapes) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_custom_call_v2( + (operands, call_target_name, backend_config, has_side_effect, + result_dtypes, result_shapes, name,), None) + if _result is not NotImplemented: + return _result + return xla_custom_call_v2_eager_fallback( + operands, call_target_name=call_target_name, + backend_config=backend_config, has_side_effect=has_side_effect, + result_dtypes=result_dtypes, result_shapes=result_shapes, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_custom_call_v2, (), dict(operands=operands, + call_target_name=call_target_name, + backend_config=backend_config, + has_side_effect=has_side_effect, + result_dtypes=result_dtypes, + result_shapes=result_shapes, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_custom_call_v2( + (operands, call_target_name, backend_config, has_side_effect, + result_dtypes, result_shapes, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + call_target_name = _execute.make_str(call_target_name, "call_target_name") + backend_config = _execute.make_str(backend_config, "backend_config") + has_side_effect = _execute.make_bool(has_side_effect, "has_side_effect") + if not isinstance(result_dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'result_dtypes' argument to " + "'xla_custom_call_v2' Op, not %r." % result_dtypes) + result_dtypes = [_execute.make_type(_t, "result_dtypes") for _t in result_dtypes] + if not isinstance(result_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'result_shapes' argument to " + "'xla_custom_call_v2' Op, not %r." % result_shapes) + result_shapes = [_execute.make_shape(_s, "result_shapes") for _s in result_shapes] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaCustomCallV2", operands=operands, + call_target_name=call_target_name, + backend_config=backend_config, + has_side_effect=has_side_effect, + result_dtypes=result_dtypes, + result_shapes=result_shapes, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_custom_call_v2, (), dict(operands=operands, + call_target_name=call_target_name, + backend_config=backend_config, + has_side_effect=has_side_effect, + result_dtypes=result_dtypes, + result_shapes=result_shapes, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("call_target_name", _op.get_attr("call_target_name"), + "backend_config", _op.get_attr("backend_config"), + "has_side_effect", _op._get_attr_bool("has_side_effect"), + "operand_dtypes", _op.get_attr("operand_dtypes"), + "result_dtypes", _op.get_attr("result_dtypes"), "result_shapes", + _op.get_attr("result_shapes")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaCustomCallV2", _inputs_flat, _attrs, _result) + return _result + +XlaCustomCallV2 = tf_export("raw_ops.XlaCustomCallV2")(_ops.to_raw_op(xla_custom_call_v2)) +_dispatcher_for_xla_custom_call_v2 = xla_custom_call_v2._tf_type_based_dispatcher.Dispatch + + +def xla_custom_call_v2_eager_fallback(operands, call_target_name: str, backend_config: str, has_side_effect: bool, result_dtypes, result_shapes, name, ctx): + call_target_name = _execute.make_str(call_target_name, "call_target_name") + backend_config = _execute.make_str(backend_config, "backend_config") + has_side_effect = _execute.make_bool(has_side_effect, "has_side_effect") + if not isinstance(result_dtypes, (list, tuple)): + raise TypeError( + "Expected list for 'result_dtypes' argument to " + "'xla_custom_call_v2' Op, not %r." % result_dtypes) + result_dtypes = [_execute.make_type(_t, "result_dtypes") for _t in result_dtypes] + if not isinstance(result_shapes, (list, tuple)): + raise TypeError( + "Expected list for 'result_shapes' argument to " + "'xla_custom_call_v2' Op, not %r." % result_shapes) + result_shapes = [_execute.make_shape(_s, "result_shapes") for _s in result_shapes] + _attr_operand_dtypes, operands = _execute.convert_to_mixed_eager_tensors(operands, ctx) + _inputs_flat = list(operands) + _attrs = ("call_target_name", call_target_name, "backend_config", + backend_config, "has_side_effect", has_side_effect, "operand_dtypes", + _attr_operand_dtypes, "result_dtypes", result_dtypes, "result_shapes", + result_shapes) + _result = _execute.execute(b"XlaCustomCallV2", len(result_dtypes), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaCustomCallV2", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_dequantize') +def xla_dequantize(input: Annotated[Any, _atypes.UInt32], min_range: float, max_range: float, mode: str, transpose_output: bool, name=None) -> Annotated[Any, _atypes.BFloat16]: + r"""Takes the packed uint32 input and unpacks the input to uint8 to do + + Dequantization on device. + + Args: + input: A `Tensor` of type `uint32`. + Input tensors whose types is uint32, shape is [d0, ..., dn]. + min_range: A `float`. + The minimum scalar value possibly produced for the input. + max_range: A `float`. + The maximum scalar value possibly produced for the input. + mode: A `string`. + String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", "SCALED"}. + transpose_output: A `bool`. + Boolean to determine if output is transposed. transpose_output + is faster when input is large and rank of input is higher than 1. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `bfloat16`. + Output tensors whose types is bfloat16. If transpose_output is true, + output shape is [dn * 4, dn-1, ..., d1, d0]. If transpose_output + is false, output shape is [d0,..., dn * 4]. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaDequantize", name, input, "min_range", min_range, + "max_range", max_range, "mode", mode, "transpose_output", + transpose_output) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_dequantize( + (input, min_range, max_range, mode, transpose_output, name,), None) + if _result is not NotImplemented: + return _result + return xla_dequantize_eager_fallback( + input, min_range=min_range, max_range=max_range, mode=mode, + transpose_output=transpose_output, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dequantize, (), dict(input=input, min_range=min_range, + max_range=max_range, mode=mode, + transpose_output=transpose_output, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_dequantize( + (input, min_range, max_range, mode, transpose_output, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + min_range = _execute.make_float(min_range, "min_range") + max_range = _execute.make_float(max_range, "max_range") + mode = _execute.make_str(mode, "mode") + transpose_output = _execute.make_bool(transpose_output, "transpose_output") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaDequantize", input=input, min_range=min_range, + max_range=max_range, mode=mode, + transpose_output=transpose_output, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dequantize, (), dict(input=input, min_range=min_range, + max_range=max_range, mode=mode, + transpose_output=transpose_output, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("min_range", _op.get_attr("min_range"), "max_range", + _op.get_attr("max_range"), "mode", _op.get_attr("mode"), + "transpose_output", _op._get_attr_bool("transpose_output")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaDequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaDequantize = tf_export("raw_ops.XlaDequantize")(_ops.to_raw_op(xla_dequantize)) +_dispatcher_for_xla_dequantize = xla_dequantize._tf_type_based_dispatcher.Dispatch + + +def xla_dequantize_eager_fallback(input: Annotated[Any, _atypes.UInt32], min_range: float, max_range: float, mode: str, transpose_output: bool, name, ctx) -> Annotated[Any, _atypes.BFloat16]: + min_range = _execute.make_float(min_range, "min_range") + max_range = _execute.make_float(max_range, "max_range") + mode = _execute.make_str(mode, "mode") + transpose_output = _execute.make_bool(transpose_output, "transpose_output") + input = _ops.convert_to_tensor(input, _dtypes.uint32) + _inputs_flat = [input] + _attrs = ("min_range", min_range, "max_range", max_range, "mode", mode, + "transpose_output", transpose_output) + _result = _execute.execute(b"XlaDequantize", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaDequantize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaDot_T = TypeVar("TV_XlaDot_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_dot') +def xla_dot(lhs: Annotated[Any, TV_XlaDot_T], rhs: Annotated[Any, TV_XlaDot_T], dimension_numbers: str, precision_config: str, name=None) -> Annotated[Any, TV_XlaDot_T]: + r"""Wraps the XLA DotGeneral operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + . + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the LHS tensor + rhs: A `Tensor`. Must have the same type as `lhs`. the RHS tensor + dimension_numbers: A `string`. + a serialized xla::DotDimensionNumbers proto. + precision_config: A `string`. a serialized xla::PrecisionConfig proto. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `lhs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaDot", name, lhs, rhs, "dimension_numbers", + dimension_numbers, "precision_config", precision_config) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_dot( + (lhs, rhs, dimension_numbers, precision_config, name,), None) + if _result is not NotImplemented: + return _result + return xla_dot_eager_fallback( + lhs, rhs, dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dot, (), dict(lhs=lhs, rhs=rhs, + dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_dot( + (lhs, rhs, dimension_numbers, precision_config, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaDot", lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dot, (), dict(lhs=lhs, rhs=rhs, + dimension_numbers=dimension_numbers, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "precision_config", + _op.get_attr("precision_config")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaDot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaDot = tf_export("raw_ops.XlaDot")(_ops.to_raw_op(xla_dot)) +_dispatcher_for_xla_dot = xla_dot._tf_type_based_dispatcher.Dispatch + + +def xla_dot_eager_fallback(lhs: Annotated[Any, TV_XlaDot_T], rhs: Annotated[Any, TV_XlaDot_T], dimension_numbers: str, precision_config: str, name, ctx) -> Annotated[Any, TV_XlaDot_T]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + _attr_T, _inputs_T = _execute.args_to_matching_eager([lhs, rhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (lhs, rhs) = _inputs_T + _inputs_flat = [lhs, rhs] + _attrs = ("T", _attr_T, "dimension_numbers", dimension_numbers, + "precision_config", precision_config) + _result = _execute.execute(b"XlaDot", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaDot", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaDotV2_LhsT = TypeVar("TV_XlaDotV2_LhsT", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaDotV2_RhsT = TypeVar("TV_XlaDotV2_RhsT", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaDotV2_preferred_element_type = TypeVar("TV_XlaDotV2_preferred_element_type", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_dot_v2') +def xla_dot_v2(lhs: Annotated[Any, TV_XlaDotV2_LhsT], rhs: Annotated[Any, TV_XlaDotV2_RhsT], dimension_numbers: str, precision_config: str, preferred_element_type: TV_XlaDotV2_preferred_element_type, name=None) -> Annotated[Any, TV_XlaDotV2_preferred_element_type]: + r"""Wraps the XLA DotGeneral operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + . + + Args: + lhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the LHS tensor + rhs: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the RHS tensor + dimension_numbers: A `string`. + a serialized xla::DotDimensionNumbers proto. + precision_config: A `string`. a serialized xla::PrecisionConfig proto. + preferred_element_type: A `tf.DType` from: `tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.complex64, tf.int64, tf.qint8, tf.quint8, tf.qint32, tf.bfloat16, tf.qint16, tf.quint16, tf.uint16, tf.complex128, tf.half, tf.uint32, tf.uint64`. + The type of the tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `preferred_element_type`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaDotV2", name, lhs, rhs, "dimension_numbers", + dimension_numbers, "precision_config", precision_config, + "preferred_element_type", preferred_element_type) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_dot_v2( + (lhs, rhs, dimension_numbers, precision_config, + preferred_element_type, name,), None) + if _result is not NotImplemented: + return _result + return xla_dot_v2_eager_fallback( + lhs, rhs, dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dot_v2, (), dict(lhs=lhs, rhs=rhs, + dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_dot_v2( + (lhs, rhs, dimension_numbers, precision_config, + preferred_element_type, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + preferred_element_type = _execute.make_type(preferred_element_type, "preferred_element_type") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaDotV2", lhs=lhs, rhs=rhs, dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dot_v2, (), dict(lhs=lhs, rhs=rhs, + dimension_numbers=dimension_numbers, + precision_config=precision_config, + preferred_element_type=preferred_element_type, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("LhsT", _op._get_attr_type("LhsT"), "RhsT", + _op._get_attr_type("RhsT"), "dimension_numbers", + _op.get_attr("dimension_numbers"), "precision_config", + _op.get_attr("precision_config"), "preferred_element_type", + _op._get_attr_type("preferred_element_type")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaDotV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaDotV2 = tf_export("raw_ops.XlaDotV2")(_ops.to_raw_op(xla_dot_v2)) +_dispatcher_for_xla_dot_v2 = xla_dot_v2._tf_type_based_dispatcher.Dispatch + + +def xla_dot_v2_eager_fallback(lhs: Annotated[Any, TV_XlaDotV2_LhsT], rhs: Annotated[Any, TV_XlaDotV2_RhsT], dimension_numbers: str, precision_config: str, preferred_element_type: TV_XlaDotV2_preferred_element_type, name, ctx) -> Annotated[Any, TV_XlaDotV2_preferred_element_type]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + precision_config = _execute.make_str(precision_config, "precision_config") + preferred_element_type = _execute.make_type(preferred_element_type, "preferred_element_type") + _attr_LhsT, (lhs,) = _execute.args_to_matching_eager([lhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_RhsT, (rhs,) = _execute.args_to_matching_eager([rhs], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = [lhs, rhs] + _attrs = ("LhsT", _attr_LhsT, "RhsT", _attr_RhsT, "dimension_numbers", + dimension_numbers, "precision_config", precision_config, + "preferred_element_type", preferred_element_type) + _result = _execute.execute(b"XlaDotV2", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaDotV2", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaDynamicSlice_T = TypeVar("TV_XlaDynamicSlice_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_XlaDynamicSlice_Tindices = TypeVar("TV_XlaDynamicSlice_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_dynamic_slice') +def xla_dynamic_slice(input: Annotated[Any, TV_XlaDynamicSlice_T], start_indices: Annotated[Any, TV_XlaDynamicSlice_Tindices], size_indices: Annotated[Any, TV_XlaDynamicSlice_Tindices], name=None) -> Annotated[Any, TV_XlaDynamicSlice_T]: + r"""Wraps the XLA DynamicSlice operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice + . + + DynamicSlice extracts a sub-array from the input array at dynamic + start_indices. The size of the slice in each dimension is passed in + size_indices, which specify the end point of exclusive slice intervals in each + dimension -- [start, start + size). The shape of start_indices must have rank 1, + with dimension size equal to the rank of operand. + + Args: + input: A `Tensor`. A `Tensor` of type T. + start_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + List of N integers containing the slice size for each + dimension. Each value must be strictly greater than zero, and start + size + must be less than or equal to the size of the dimension to avoid + implementation defined behavior. + size_indices: A `Tensor`. Must have the same type as `start_indices`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaDynamicSlice", name, input, start_indices, size_indices) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_dynamic_slice( + (input, start_indices, size_indices, name,), None) + if _result is not NotImplemented: + return _result + return xla_dynamic_slice_eager_fallback( + input, start_indices, size_indices, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dynamic_slice, (), dict(input=input, + start_indices=start_indices, + size_indices=size_indices, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_dynamic_slice( + (input, start_indices, size_indices, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaDynamicSlice", input=input, start_indices=start_indices, + size_indices=size_indices, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dynamic_slice, (), dict(input=input, + start_indices=start_indices, + size_indices=size_indices, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaDynamicSlice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaDynamicSlice = tf_export("raw_ops.XlaDynamicSlice")(_ops.to_raw_op(xla_dynamic_slice)) +_dispatcher_for_xla_dynamic_slice = xla_dynamic_slice._tf_type_based_dispatcher.Dispatch + + +def xla_dynamic_slice_eager_fallback(input: Annotated[Any, TV_XlaDynamicSlice_T], start_indices: Annotated[Any, TV_XlaDynamicSlice_Tindices], size_indices: Annotated[Any, TV_XlaDynamicSlice_Tindices], name, ctx) -> Annotated[Any, TV_XlaDynamicSlice_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([start_indices, size_indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + (start_indices, size_indices) = _inputs_Tindices + _inputs_flat = [input, start_indices, size_indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaDynamicSlice", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaDynamicSlice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaDynamicUpdateSlice_T = TypeVar("TV_XlaDynamicUpdateSlice_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_XlaDynamicUpdateSlice_Tindices = TypeVar("TV_XlaDynamicUpdateSlice_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_dynamic_update_slice') +def xla_dynamic_update_slice(input: Annotated[Any, TV_XlaDynamicUpdateSlice_T], update: Annotated[Any, TV_XlaDynamicUpdateSlice_T], indices: Annotated[Any, TV_XlaDynamicUpdateSlice_Tindices], name=None) -> Annotated[Any, TV_XlaDynamicUpdateSlice_T]: + r"""Wraps the XLA DynamicUpdateSlice operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice + . + + XlaDynamicUpdateSlice generates a result which is the value of the `input` + operand, with a slice update overwritten at `indices`. The shape of `update` + determines the shape of the sub-array of the result which is updated. The shape + of indices must be rank == 1, with dimension size equal to the rank of `input`. + + Handling of out-of-bounds slice indices is implementation-defined. + + Args: + input: A `Tensor`. A `Tensor` of type T. + update: A `Tensor`. Must have the same type as `input`. + A `Tensor` of type T. Same rank as `input`. + indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + A vector of indices into `input`. Must have length equal to the rank of + `input`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. A `Tensor` of type T. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaDynamicUpdateSlice", name, input, update, indices) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_dynamic_update_slice( + (input, update, indices, name,), None) + if _result is not NotImplemented: + return _result + return xla_dynamic_update_slice_eager_fallback( + input, update, indices, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dynamic_update_slice, (), dict(input=input, update=update, + indices=indices, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_dynamic_update_slice( + (input, update, indices, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaDynamicUpdateSlice", input=input, update=update, indices=indices, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_dynamic_update_slice, (), dict(input=input, update=update, + indices=indices, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaDynamicUpdateSlice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaDynamicUpdateSlice = tf_export("raw_ops.XlaDynamicUpdateSlice")(_ops.to_raw_op(xla_dynamic_update_slice)) +_dispatcher_for_xla_dynamic_update_slice = xla_dynamic_update_slice._tf_type_based_dispatcher.Dispatch + + +def xla_dynamic_update_slice_eager_fallback(input: Annotated[Any, TV_XlaDynamicUpdateSlice_T], update: Annotated[Any, TV_XlaDynamicUpdateSlice_T], indices: Annotated[Any, TV_XlaDynamicUpdateSlice_Tindices], name, ctx) -> Annotated[Any, TV_XlaDynamicUpdateSlice_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([input, update], ctx, []) + (input, update) = _inputs_T + _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [input, update, indices] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaDynamicUpdateSlice", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaDynamicUpdateSlice", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaEinsum_T = TypeVar("TV_XlaEinsum_T", _atypes.BFloat16, _atypes.Complex64, _atypes.Float32) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_einsum') +def xla_einsum(a: Annotated[Any, TV_XlaEinsum_T], b: Annotated[Any, TV_XlaEinsum_T], equation: str, name=None) -> Annotated[Any, TV_XlaEinsum_T]: + r"""An op which supports basic einsum op with 2 inputs and 1 output. + + This op has better TPU performance since it doesn't have explicitly reshape and + transpose operations as tf.einsum does. + + Args: + a: A `Tensor`. Must be one of the following types: `complex64`, `bfloat16`, `float32`. + b: A `Tensor`. Must have the same type as `a`. + equation: A `string`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `a`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaEinsum", name, a, b, "equation", equation) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_einsum( + (a, b, equation, name,), None) + if _result is not NotImplemented: + return _result + return xla_einsum_eager_fallback( + a, b, equation=equation, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_einsum, (), dict(a=a, b=b, equation=equation, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_einsum( + (a, b, equation, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + equation = _execute.make_str(equation, "equation") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaEinsum", a=a, b=b, equation=equation, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_einsum, (), dict(a=a, b=b, equation=equation, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("equation", _op.get_attr("equation"), "T", + _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaEinsum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaEinsum = tf_export("raw_ops.XlaEinsum")(_ops.to_raw_op(xla_einsum)) +_dispatcher_for_xla_einsum = xla_einsum._tf_type_based_dispatcher.Dispatch + + +def xla_einsum_eager_fallback(a: Annotated[Any, TV_XlaEinsum_T], b: Annotated[Any, TV_XlaEinsum_T], equation: str, name, ctx) -> Annotated[Any, TV_XlaEinsum_T]: + equation = _execute.make_str(equation, "equation") + _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], ctx, [_dtypes.complex64, _dtypes.bfloat16, _dtypes.float32, ]) + (a, b) = _inputs_T + _inputs_flat = [a, b] + _attrs = ("equation", equation, "T", _attr_T) + _result = _execute.execute(b"XlaEinsum", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaEinsum", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaGather_T = TypeVar("TV_XlaGather_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaGather_Tindices = TypeVar("TV_XlaGather_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_gather') +def xla_gather(operand: Annotated[Any, TV_XlaGather_T], start_indices: Annotated[Any, TV_XlaGather_Tindices], slice_sizes: Annotated[Any, TV_XlaGather_Tindices], dimension_numbers: str, indices_are_sorted: bool, name=None) -> Annotated[Any, TV_XlaGather_T]: + r"""Wraps the XLA Gather operator documented at + + https://www.tensorflow.org/xla/operation_semantics#gather + + Args: + operand: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + The array we're gathering from. + start_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Array containing the starting indices of the slices we gather. + slice_sizes: A `Tensor`. Must have the same type as `start_indices`. + slice_sizes[i] is the bounds for the slice on dimension i. + dimension_numbers: A `string`. + A serialized xla::GatherDimensionNumbers proto. + indices_are_sorted: A `bool`. + Boolean indicating if the indices are sorted. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `operand`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaGather", name, operand, start_indices, slice_sizes, + "dimension_numbers", dimension_numbers, "indices_are_sorted", + indices_are_sorted) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_gather( + (operand, start_indices, slice_sizes, dimension_numbers, + indices_are_sorted, name,), None) + if _result is not NotImplemented: + return _result + return xla_gather_eager_fallback( + operand, start_indices, slice_sizes, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_gather, (), dict(operand=operand, start_indices=start_indices, + slice_sizes=slice_sizes, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_gather( + (operand, start_indices, slice_sizes, dimension_numbers, + indices_are_sorted, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + indices_are_sorted = _execute.make_bool(indices_are_sorted, "indices_are_sorted") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaGather", operand=operand, start_indices=start_indices, + slice_sizes=slice_sizes, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_gather, (), dict(operand=operand, start_indices=start_indices, + slice_sizes=slice_sizes, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dimension_numbers", _op.get_attr("dimension_numbers"), + "indices_are_sorted", _op._get_attr_bool("indices_are_sorted"), + "T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaGather = tf_export("raw_ops.XlaGather")(_ops.to_raw_op(xla_gather)) +_dispatcher_for_xla_gather = xla_gather._tf_type_based_dispatcher.Dispatch + + +def xla_gather_eager_fallback(operand: Annotated[Any, TV_XlaGather_T], start_indices: Annotated[Any, TV_XlaGather_Tindices], slice_sizes: Annotated[Any, TV_XlaGather_Tindices], dimension_numbers: str, indices_are_sorted: bool, name, ctx) -> Annotated[Any, TV_XlaGather_T]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + indices_are_sorted = _execute.make_bool(indices_are_sorted, "indices_are_sorted") + _attr_T, (operand,) = _execute.args_to_matching_eager([operand], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([start_indices, slice_sizes], ctx, [_dtypes.int32, _dtypes.int64, ]) + (start_indices, slice_sizes) = _inputs_Tindices + _inputs_flat = [operand, start_indices, slice_sizes] + _attrs = ("dimension_numbers", dimension_numbers, "indices_are_sorted", + indices_are_sorted, "T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaGather", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaGather", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaIf_Tcond = TypeVar("TV_XlaIf_Tcond", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_if') +def xla_if(cond: Annotated[Any, TV_XlaIf_Tcond], inputs, then_branch, else_branch, Tout, name=None): + r"""output = cond ? then_branch(inputs) : else_branch(inputs). + + Args: + cond: A `Tensor`. A boolean scalar. + inputs: A list of `Tensor` objects. A list of input tensors. + then_branch: A function decorated with @Defun. + A function takes 'inputs' and returns a list of tensors, + whose types are the same as what else_branch returns. + else_branch: A function decorated with @Defun. + A function takes 'inputs' and returns a list of tensors. + whose types are the same as what then_branch returns. + Tout: A list of `tf.DTypes`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + A list of tensors returned by either then_branch(inputs) or + else_branch(inputs). The input shapes of the then_branch and + else_branch must match. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaIf", name, cond, inputs, "then_branch", then_branch, + "else_branch", else_branch, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_if( + (cond, inputs, then_branch, else_branch, Tout, name,), None) + if _result is not NotImplemented: + return _result + return xla_if_eager_fallback( + cond, inputs, then_branch=then_branch, else_branch=else_branch, + Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_if, (), dict(cond=cond, inputs=inputs, + then_branch=then_branch, else_branch=else_branch, + Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_if( + (cond, inputs, then_branch, else_branch, Tout, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'xla_if' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaIf", cond=cond, inputs=inputs, then_branch=then_branch, + else_branch=else_branch, Tout=Tout, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_if, (), dict(cond=cond, inputs=inputs, then_branch=then_branch, + else_branch=else_branch, Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("Tcond", _op._get_attr_type("Tcond"), "then_branch", + _op.get_attr("then_branch"), "else_branch", + _op.get_attr("else_branch"), "Tin", _op.get_attr("Tin"), "Tout", + _op.get_attr("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaIf", _inputs_flat, _attrs, _result) + return _result + +XlaIf = tf_export("raw_ops.XlaIf")(_ops.to_raw_op(xla_if)) +_dispatcher_for_xla_if = xla_if._tf_type_based_dispatcher.Dispatch + + +def xla_if_eager_fallback(cond: Annotated[Any, TV_XlaIf_Tcond], inputs, then_branch, else_branch, Tout, name, ctx): + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'xla_if' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + _attr_Tcond, (cond,) = _execute.args_to_matching_eager([cond], ctx, []) + _attr_Tin, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + _inputs_flat = [cond] + list(inputs) + _attrs = ("Tcond", _attr_Tcond, "then_branch", then_branch, "else_branch", + else_branch, "Tin", _attr_Tin, "Tout", Tout) + _result = _execute.execute(b"XlaIf", len(Tout), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaIf", _inputs_flat, _attrs, _result) + return _result + +_XlaKeyValueSortOutput = collections.namedtuple( + "XlaKeyValueSort", + ["sorted_keys", "sorted_values"]) + + +TV_XlaKeyValueSort_K = TypeVar("TV_XlaKeyValueSort_K", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaKeyValueSort_V = TypeVar("TV_XlaKeyValueSort_V", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_key_value_sort') +def xla_key_value_sort(keys: Annotated[Any, TV_XlaKeyValueSort_K], values: Annotated[Any, TV_XlaKeyValueSort_V], name=None): + r"""Wraps the XLA Sort operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#sort + . + + Sorts a tensor. Currently only sorts in ascending order are supported. + + Args: + keys: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. + A `Tensor` of type K. + values: A `Tensor`. A `Tensor` of type V. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (sorted_keys, sorted_values). + + sorted_keys: A `Tensor`. Has the same type as `keys`. A `Tensor` of type K. + sorted_values: A `Tensor`. Has the same type as `values`. A `Tensor` of type V. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaKeyValueSort", name, keys, values) + _result = _XlaKeyValueSortOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_key_value_sort( + (keys, values, name,), None) + if _result is not NotImplemented: + return _result + return xla_key_value_sort_eager_fallback( + keys, values, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_key_value_sort, (), dict(keys=keys, values=values, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_key_value_sort( + (keys, values, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaKeyValueSort", keys=keys, values=values, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_key_value_sort, (), dict(keys=keys, values=values, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("K", _op._get_attr_type("K"), "V", _op._get_attr_type("V")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaKeyValueSort", _inputs_flat, _attrs, _result) + _result = _XlaKeyValueSortOutput._make(_result) + return _result + +XlaKeyValueSort = tf_export("raw_ops.XlaKeyValueSort")(_ops.to_raw_op(xla_key_value_sort)) +_dispatcher_for_xla_key_value_sort = xla_key_value_sort._tf_type_based_dispatcher.Dispatch + + +def xla_key_value_sort_eager_fallback(keys: Annotated[Any, TV_XlaKeyValueSort_K], values: Annotated[Any, TV_XlaKeyValueSort_V], name, ctx): + _attr_K, (keys,) = _execute.args_to_matching_eager([keys], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _attr_V, (values,) = _execute.args_to_matching_eager([values], ctx, []) + _inputs_flat = [keys, values] + _attrs = ("K", _attr_K, "V", _attr_V) + _result = _execute.execute(b"XlaKeyValueSort", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaKeyValueSort", _inputs_flat, _attrs, _result) + _result = _XlaKeyValueSortOutput._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_optimization_barrier') +def xla_optimization_barrier(input, name=None): + r"""Wraps the XLA OptimizationBarrier operator. + + Documented at https://www.tensorflow.org/xla/operation_semantics#optimizationbarrier. + + Args: + input: A list of `Tensor` objects. A Tuple of Arrays of any type. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaOptimizationBarrier", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_optimization_barrier( + (input, name,), None) + if _result is not NotImplemented: + return _result + return xla_optimization_barrier_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_optimization_barrier, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_optimization_barrier( + (input, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaOptimizationBarrier", input=input, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_optimization_barrier, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op.get_attr("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaOptimizationBarrier", _inputs_flat, _attrs, _result) + return _result + +XlaOptimizationBarrier = tf_export("raw_ops.XlaOptimizationBarrier")(_ops.to_raw_op(xla_optimization_barrier)) +_dispatcher_for_xla_optimization_barrier = xla_optimization_barrier._tf_type_based_dispatcher.Dispatch + + +def xla_optimization_barrier_eager_fallback(input, name, ctx): + _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx) + _inputs_flat = list(input) + _attrs = ("T", _attr_T) + _result = _execute.execute(b"XlaOptimizationBarrier", len(input), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaOptimizationBarrier", _inputs_flat, _attrs, _result) + return _result + + +TV_XlaPad_T = TypeVar("TV_XlaPad_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) +TV_XlaPad_Tindices = TypeVar("TV_XlaPad_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_pad') +def xla_pad(input: Annotated[Any, TV_XlaPad_T], padding_value: Annotated[Any, TV_XlaPad_T], padding_low: Annotated[Any, TV_XlaPad_Tindices], padding_high: Annotated[Any, TV_XlaPad_Tindices], padding_interior: Annotated[Any, TV_XlaPad_Tindices], name=None) -> Annotated[Any, TV_XlaPad_T]: + r"""Wraps the XLA Pad operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#pad + . + + Args: + input: A `Tensor`. A `Tensor` of type T. + padding_value: A `Tensor`. Must have the same type as `input`. + A scalar `Tensor` of type T. + padding_low: A `Tensor`. Must be one of the following types: `int32`, `int64`. + the padding to apply at the start of each input dimensions. Must + be a compile-time constant 1D tensor of length equal to rank of input. + padding_high: A `Tensor`. Must have the same type as `padding_low`. + the padding to apply at the end of each input dimension. Must + be a compile-time constant 1D tensor of length equal to rank of input. + padding_interior: A `Tensor`. Must have the same type as `padding_low`. + the padding to apply between each input element. Must + be a compile-time constant 1D tensor of length equal to rank of input, + containing only non-negative values. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. A `Tensor` of type T. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaPad", name, input, padding_value, padding_low, padding_high, + padding_interior) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_pad( + (input, padding_value, padding_low, padding_high, padding_interior, + name,), None) + if _result is not NotImplemented: + return _result + return xla_pad_eager_fallback( + input, padding_value, padding_low, padding_high, padding_interior, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_pad, (), dict(input=input, padding_value=padding_value, + padding_low=padding_low, + padding_high=padding_high, + padding_interior=padding_interior, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_pad( + (input, padding_value, padding_low, padding_high, padding_interior, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaPad", input=input, padding_value=padding_value, + padding_low=padding_low, padding_high=padding_high, + padding_interior=padding_interior, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_pad, (), dict(input=input, padding_value=padding_value, + padding_low=padding_low, + padding_high=padding_high, + padding_interior=padding_interior, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaPad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaPad = tf_export("raw_ops.XlaPad")(_ops.to_raw_op(xla_pad)) +_dispatcher_for_xla_pad = xla_pad._tf_type_based_dispatcher.Dispatch + + +def xla_pad_eager_fallback(input: Annotated[Any, TV_XlaPad_T], padding_value: Annotated[Any, TV_XlaPad_T], padding_low: Annotated[Any, TV_XlaPad_Tindices], padding_high: Annotated[Any, TV_XlaPad_Tindices], padding_interior: Annotated[Any, TV_XlaPad_Tindices], name, ctx) -> Annotated[Any, TV_XlaPad_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([input, padding_value], ctx, []) + (input, padding_value) = _inputs_T + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([padding_low, padding_high, padding_interior], ctx, [_dtypes.int32, _dtypes.int64, ]) + (padding_low, padding_high, padding_interior) = _inputs_Tindices + _inputs_flat = [input, padding_value, padding_low, padding_high, padding_interior] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaPad", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaPad", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaRecv_dtype = TypeVar("TV_XlaRecv_dtype", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_recv') +def xla_recv(dtype: TV_XlaRecv_dtype, tensor_name: str, shape, name=None) -> Annotated[Any, TV_XlaRecv_dtype]: + r"""Receives the named tensor from another XLA computation. Wraps the XLA Recv + + operator documented at + https://www.tensorflow.org/performance/xla/operation_semantics#recv . + + Args: + dtype: A `tf.DType`. The type of the tensor. + tensor_name: A `string`. A string key that identifies the channel. + shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `dtype`. The tensor to receive. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaRecv", name, "dtype", dtype, "tensor_name", tensor_name, + "shape", shape) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_recv( + (dtype, tensor_name, shape, name,), None) + if _result is not NotImplemented: + return _result + return xla_recv_eager_fallback( + dtype=dtype, tensor_name=tensor_name, shape=shape, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_recv, (), dict(dtype=dtype, tensor_name=tensor_name, + shape=shape, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_recv( + (dtype, tensor_name, shape, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dtype = _execute.make_type(dtype, "dtype") + tensor_name = _execute.make_str(tensor_name, "tensor_name") + shape = _execute.make_shape(shape, "shape") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaRecv", dtype=dtype, tensor_name=tensor_name, shape=shape, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_recv, (), dict(dtype=dtype, tensor_name=tensor_name, + shape=shape, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "tensor_name", + _op.get_attr("tensor_name"), "shape", _op.get_attr("shape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaRecv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaRecv = tf_export("raw_ops.XlaRecv")(_ops.to_raw_op(xla_recv)) +_dispatcher_for_xla_recv = xla_recv._tf_type_based_dispatcher.Dispatch + + +def xla_recv_eager_fallback(dtype: TV_XlaRecv_dtype, tensor_name: str, shape, name, ctx) -> Annotated[Any, TV_XlaRecv_dtype]: + dtype = _execute.make_type(dtype, "dtype") + tensor_name = _execute.make_str(tensor_name, "tensor_name") + shape = _execute.make_shape(shape, "shape") + _inputs_flat = [] + _attrs = ("dtype", dtype, "tensor_name", tensor_name, "shape", shape) + _result = _execute.execute(b"XlaRecv", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaRecv", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaReduce_T = TypeVar("TV_XlaReduce_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_reduce') +def xla_reduce(input: Annotated[Any, TV_XlaReduce_T], init_value: Annotated[Any, TV_XlaReduce_T], dimensions_to_reduce, reducer, name=None) -> Annotated[Any, TV_XlaReduce_T]: + r"""Wraps the XLA Reduce operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#reduce . + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + the input tensor + init_value: A `Tensor`. Must have the same type as `input`. + a scalar representing the initial value for the reduction + dimensions_to_reduce: A list of `ints`. + dimension numbers over which to reduce + reducer: A function decorated with @Defun. a reducer function to apply + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaReduce", name, input, init_value, "dimensions_to_reduce", + dimensions_to_reduce, "reducer", reducer) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_reduce( + (input, init_value, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + return xla_reduce_eager_fallback( + input, init_value, dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce, (), dict(input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_reduce( + (input, init_value, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_reduce' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaReduce", input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce, (), dict(input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "dimensions_to_reduce", + _op.get_attr("dimensions_to_reduce"), "reducer", + _op.get_attr("reducer")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaReduce = tf_export("raw_ops.XlaReduce")(_ops.to_raw_op(xla_reduce)) +_dispatcher_for_xla_reduce = xla_reduce._tf_type_based_dispatcher.Dispatch + + +def xla_reduce_eager_fallback(input: Annotated[Any, TV_XlaReduce_T], init_value: Annotated[Any, TV_XlaReduce_T], dimensions_to_reduce, reducer, name, ctx) -> Annotated[Any, TV_XlaReduce_T]: + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_reduce' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + _attr_T, _inputs_T = _execute.args_to_matching_eager([input, init_value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + (input, init_value) = _inputs_T + _inputs_flat = [input, init_value] + _attrs = ("T", _attr_T, "dimensions_to_reduce", dimensions_to_reduce, + "reducer", reducer) + _result = _execute.execute(b"XlaReduce", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaReduce", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaReducePrecision_T = TypeVar("TV_XlaReducePrecision_T", _atypes.BFloat16, _atypes.Float32, _atypes.Float64, _atypes.Half) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_reduce_precision') +def xla_reduce_precision(operand: Annotated[Any, TV_XlaReducePrecision_T], exponent_bits: int, mantissa_bits: int, name=None) -> Annotated[Any, TV_XlaReducePrecision_T]: + r"""Wraps the XLA ReducePrecision operator + + documented at https://www.tensorflow.org/xla/operation_semantics#reduceprecision. + + Args: + operand: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. + array of floating-point type. + exponent_bits: An `int`. number of exponent bits in lower-precision format + mantissa_bits: An `int`. number of mantissa bits in lower-precision format + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `operand`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaReducePrecision", name, operand, "exponent_bits", + exponent_bits, "mantissa_bits", mantissa_bits) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_reduce_precision( + (operand, exponent_bits, mantissa_bits, name,), None) + if _result is not NotImplemented: + return _result + return xla_reduce_precision_eager_fallback( + operand, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_precision, (), dict(operand=operand, + exponent_bits=exponent_bits, + mantissa_bits=mantissa_bits, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_reduce_precision( + (operand, exponent_bits, mantissa_bits, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + exponent_bits = _execute.make_int(exponent_bits, "exponent_bits") + mantissa_bits = _execute.make_int(mantissa_bits, "mantissa_bits") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaReducePrecision", operand=operand, exponent_bits=exponent_bits, + mantissa_bits=mantissa_bits, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_precision, (), dict(operand=operand, + exponent_bits=exponent_bits, + mantissa_bits=mantissa_bits, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "exponent_bits", + _op._get_attr_int("exponent_bits"), "mantissa_bits", + _op._get_attr_int("mantissa_bits")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaReducePrecision", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaReducePrecision = tf_export("raw_ops.XlaReducePrecision")(_ops.to_raw_op(xla_reduce_precision)) +_dispatcher_for_xla_reduce_precision = xla_reduce_precision._tf_type_based_dispatcher.Dispatch + + +def xla_reduce_precision_eager_fallback(operand: Annotated[Any, TV_XlaReducePrecision_T], exponent_bits: int, mantissa_bits: int, name, ctx) -> Annotated[Any, TV_XlaReducePrecision_T]: + exponent_bits = _execute.make_int(exponent_bits, "exponent_bits") + mantissa_bits = _execute.make_int(mantissa_bits, "mantissa_bits") + _attr_T, (operand,) = _execute.args_to_matching_eager([operand], ctx, [_dtypes.bfloat16, _dtypes.half, _dtypes.float32, _dtypes.float64, ]) + _inputs_flat = [operand] + _attrs = ("T", _attr_T, "exponent_bits", exponent_bits, "mantissa_bits", + mantissa_bits) + _result = _execute.execute(b"XlaReducePrecision", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaReducePrecision", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaReduceScatter_T = TypeVar("TV_XlaReduceScatter_T", _atypes.BFloat16, _atypes.Float32, _atypes.Half, _atypes.Int32, _atypes.UInt32) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_reduce_scatter') +def xla_reduce_scatter(input: Annotated[Any, TV_XlaReduceScatter_T], group_assignment: Annotated[Any, _atypes.Int32], scatter_dimension: Annotated[Any, _atypes.Int32], reduce_op: str, name=None) -> Annotated[Any, TV_XlaReduceScatter_T]: + r"""Wraps the XLA ReduceScatter operator + + documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter. + + Args: + input: A `Tensor`. Must be one of the following types: `half`, `bfloat16`, `float32`, `int32`, `uint32`. + Array or a non-empty tuple of arrays to reduce across replicas. + group_assignment: A `Tensor` of type `int32`. + Groups between which the reductions are performed. + scatter_dimension: A `Tensor` of type `int32`. Dimension to scatter. + reduce_op: A `string` from: `"Min", "Max", "Mul", "Add", "Mean"`. + Reduction computation. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaReduceScatter", name, input, group_assignment, + scatter_dimension, "reduce_op", reduce_op) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_reduce_scatter( + (input, group_assignment, scatter_dimension, reduce_op, name,), None) + if _result is not NotImplemented: + return _result + return xla_reduce_scatter_eager_fallback( + input, group_assignment, scatter_dimension, reduce_op=reduce_op, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_scatter, (), dict(input=input, + group_assignment=group_assignment, + scatter_dimension=scatter_dimension, + reduce_op=reduce_op, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_reduce_scatter( + (input, group_assignment, scatter_dimension, reduce_op, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + reduce_op = _execute.make_str(reduce_op, "reduce_op") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaReduceScatter", input=input, group_assignment=group_assignment, + scatter_dimension=scatter_dimension, + reduce_op=reduce_op, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_scatter, (), dict(input=input, + group_assignment=group_assignment, + scatter_dimension=scatter_dimension, + reduce_op=reduce_op, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "reduce_op", + _op.get_attr("reduce_op")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaReduceScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaReduceScatter = tf_export("raw_ops.XlaReduceScatter")(_ops.to_raw_op(xla_reduce_scatter)) +_dispatcher_for_xla_reduce_scatter = xla_reduce_scatter._tf_type_based_dispatcher.Dispatch + + +def xla_reduce_scatter_eager_fallback(input: Annotated[Any, TV_XlaReduceScatter_T], group_assignment: Annotated[Any, _atypes.Int32], scatter_dimension: Annotated[Any, _atypes.Int32], reduce_op: str, name, ctx) -> Annotated[Any, TV_XlaReduceScatter_T]: + reduce_op = _execute.make_str(reduce_op, "reduce_op") + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.bfloat16, _dtypes.float32, _dtypes.int32, _dtypes.uint32, ]) + group_assignment = _ops.convert_to_tensor(group_assignment, _dtypes.int32) + scatter_dimension = _ops.convert_to_tensor(scatter_dimension, _dtypes.int32) + _inputs_flat = [input, group_assignment, scatter_dimension] + _attrs = ("T", _attr_T, "reduce_op", reduce_op) + _result = _execute.execute(b"XlaReduceScatter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaReduceScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaReduceWindow_T = TypeVar("TV_XlaReduceWindow_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaReduceWindow_Tindices = TypeVar("TV_XlaReduceWindow_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_reduce_window') +def xla_reduce_window(input: Annotated[Any, TV_XlaReduceWindow_T], init_value: Annotated[Any, TV_XlaReduceWindow_T], window_dimensions: Annotated[Any, TV_XlaReduceWindow_Tindices], window_strides: Annotated[Any, TV_XlaReduceWindow_Tindices], base_dilations: Annotated[Any, TV_XlaReduceWindow_Tindices], window_dilations: Annotated[Any, TV_XlaReduceWindow_Tindices], padding: Annotated[Any, TV_XlaReduceWindow_Tindices], computation, name=None) -> Annotated[Any, TV_XlaReduceWindow_T]: + r"""Wraps the XLA ReduceWindow operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . + + Args: + input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + the input tensor + init_value: A `Tensor`. Must have the same type as `input`. + a scalar representing the initial value for the reduction + window_dimensions: A `Tensor`. Must be one of the following types: `int32`, `int64`. + the shape of the window + window_strides: A `Tensor`. Must have the same type as `window_dimensions`. + the inter-window strides + base_dilations: A `Tensor`. Must have the same type as `window_dimensions`. + window_dilations: A `Tensor`. Must have the same type as `window_dimensions`. + padding: A `Tensor`. Must have the same type as `window_dimensions`. + the padding to apply at the start and end of each input dimensions + computation: A function decorated with @Defun. a reducer function to apply + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaReduceWindow", name, input, init_value, window_dimensions, + window_strides, base_dilations, window_dilations, padding, + "computation", computation) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_reduce_window( + (input, init_value, window_dimensions, window_strides, + base_dilations, window_dilations, padding, computation, name,), None) + if _result is not NotImplemented: + return _result + return xla_reduce_window_eager_fallback( + input, init_value, window_dimensions, window_strides, + base_dilations, window_dilations, padding, computation=computation, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_window, (), dict(input=input, init_value=init_value, + window_dimensions=window_dimensions, + window_strides=window_strides, + base_dilations=base_dilations, + window_dilations=window_dilations, + padding=padding, + computation=computation, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_reduce_window( + (input, init_value, window_dimensions, window_strides, base_dilations, + window_dilations, padding, computation, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaReduceWindow", input=input, init_value=init_value, + window_dimensions=window_dimensions, + window_strides=window_strides, + base_dilations=base_dilations, + window_dilations=window_dilations, padding=padding, + computation=computation, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_reduce_window, (), dict(input=input, init_value=init_value, + window_dimensions=window_dimensions, + window_strides=window_strides, + base_dilations=base_dilations, + window_dilations=window_dilations, + padding=padding, + computation=computation, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "computation", + _op.get_attr("computation")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaReduceWindow", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaReduceWindow = tf_export("raw_ops.XlaReduceWindow")(_ops.to_raw_op(xla_reduce_window)) +_dispatcher_for_xla_reduce_window = xla_reduce_window._tf_type_based_dispatcher.Dispatch + + +def xla_reduce_window_eager_fallback(input: Annotated[Any, TV_XlaReduceWindow_T], init_value: Annotated[Any, TV_XlaReduceWindow_T], window_dimensions: Annotated[Any, TV_XlaReduceWindow_Tindices], window_strides: Annotated[Any, TV_XlaReduceWindow_Tindices], base_dilations: Annotated[Any, TV_XlaReduceWindow_Tindices], window_dilations: Annotated[Any, TV_XlaReduceWindow_Tindices], padding: Annotated[Any, TV_XlaReduceWindow_Tindices], computation, name, ctx) -> Annotated[Any, TV_XlaReduceWindow_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([input, init_value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + (input, init_value) = _inputs_T + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_dimensions, window_strides, base_dilations, window_dilations, padding], ctx, [_dtypes.int32, _dtypes.int64, ]) + (window_dimensions, window_strides, base_dilations, window_dilations, padding) = _inputs_Tindices + _inputs_flat = [input, init_value, window_dimensions, window_strides, base_dilations, window_dilations, padding] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "computation", + computation) + _result = _execute.execute(b"XlaReduceWindow", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaReduceWindow", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaRemoveDynamicDimensionSize_T = TypeVar("TV_XlaRemoveDynamicDimensionSize_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_remove_dynamic_dimension_size') +def xla_remove_dynamic_dimension_size(input: Annotated[Any, TV_XlaRemoveDynamicDimensionSize_T], dim_index: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_XlaRemoveDynamicDimensionSize_T]: + r"""Inverse of XlaSetDynamicDimensionSize. + + Make an xla bounded dynamic dimension into a static dimension. The bound of the + size of dimension `dim_index` becomes the static dimension size. + + Args: + input: A `Tensor`. + dim_index: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaRemoveDynamicDimensionSize", name, input, dim_index) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_remove_dynamic_dimension_size( + (input, dim_index, name,), None) + if _result is not NotImplemented: + return _result + return xla_remove_dynamic_dimension_size_eager_fallback( + input, dim_index, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_remove_dynamic_dimension_size, (), dict(input=input, + dim_index=dim_index, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_remove_dynamic_dimension_size( + (input, dim_index, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaRemoveDynamicDimensionSize", input=input, dim_index=dim_index, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_remove_dynamic_dimension_size, (), dict(input=input, + dim_index=dim_index, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaRemoveDynamicDimensionSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaRemoveDynamicDimensionSize = tf_export("raw_ops.XlaRemoveDynamicDimensionSize")(_ops.to_raw_op(xla_remove_dynamic_dimension_size)) +_dispatcher_for_xla_remove_dynamic_dimension_size = xla_remove_dynamic_dimension_size._tf_type_based_dispatcher.Dispatch + + +def xla_remove_dynamic_dimension_size_eager_fallback(input: Annotated[Any, TV_XlaRemoveDynamicDimensionSize_T], dim_index: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_XlaRemoveDynamicDimensionSize_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + dim_index = _ops.convert_to_tensor(dim_index, _dtypes.int32) + _inputs_flat = [input, dim_index] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"XlaRemoveDynamicDimensionSize", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaRemoveDynamicDimensionSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_replica_id') +def xla_replica_id(name=None) -> Annotated[Any, _atypes.Int32]: + r"""Replica ID. + + Args: + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaReplicaId", name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_replica_id( + (name,), None) + if _result is not NotImplemented: + return _result + return xla_replica_id_eager_fallback( + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_replica_id, (), dict(name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_replica_id( + (name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaReplicaId", name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_replica_id, (), dict(name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaReplicaId", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaReplicaId = tf_export("raw_ops.XlaReplicaId")(_ops.to_raw_op(xla_replica_id)) +_dispatcher_for_xla_replica_id = xla_replica_id._tf_type_based_dispatcher.Dispatch + + +def xla_replica_id_eager_fallback(name, ctx) -> Annotated[Any, _atypes.Int32]: + _inputs_flat = [] + _attrs = None + _result = _execute.execute(b"XlaReplicaId", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaReplicaId", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_XlaRngBitGeneratorOutput = collections.namedtuple( + "XlaRngBitGenerator", + ["output_key", "output"]) + + +TV_XlaRngBitGenerator_dtype = TypeVar("TV_XlaRngBitGenerator_dtype", _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaRngBitGenerator_Tshape = TypeVar("TV_XlaRngBitGenerator_Tshape", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_rng_bit_generator') +def xla_rng_bit_generator(algorithm: Annotated[Any, _atypes.Int32], initial_state: Annotated[Any, _atypes.UInt64], shape: Annotated[Any, TV_XlaRngBitGenerator_Tshape], dtype:TV_XlaRngBitGenerator_dtype=_dtypes.uint64, name=None): + r"""Stateless PRNG bit generator. + + Wraps the XLA RngBitGenerator operator, documented at + https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + + Args: + algorithm: A `Tensor` of type `int32`. The PRNG algorithm to use, one of + tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + initial_state: A `Tensor` of type `uint64`. + Initial state for the PRNG algorithm. For THREEFRY, it should be + a u64[2] and for PHILOX a u64[3]. + shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. + The output shape of the generated data. + dtype: An optional `tf.DType` from: `tf.uint8, tf.int8, tf.int32, tf.int64, tf.uint32, tf.uint64`. Defaults to `tf.uint64`. + The type of the tensor. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (output_key, output). + + output_key: A `Tensor` of type `uint64`. + output: A `Tensor` of type `dtype`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaRngBitGenerator", name, algorithm, initial_state, shape, + "dtype", dtype) + _result = _XlaRngBitGeneratorOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_rng_bit_generator( + (algorithm, initial_state, shape, dtype, name,), None) + if _result is not NotImplemented: + return _result + return xla_rng_bit_generator_eager_fallback( + algorithm, initial_state, shape, dtype=dtype, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_rng_bit_generator, (), dict(algorithm=algorithm, + initial_state=initial_state, + shape=shape, dtype=dtype, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_rng_bit_generator( + (algorithm, initial_state, shape, dtype, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaRngBitGenerator", algorithm=algorithm, + initial_state=initial_state, shape=shape, + dtype=dtype, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_rng_bit_generator, (), dict(algorithm=algorithm, + initial_state=initial_state, + shape=shape, dtype=dtype, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("dtype", _op._get_attr_type("dtype"), "Tshape", + _op._get_attr_type("Tshape")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaRngBitGenerator", _inputs_flat, _attrs, _result) + _result = _XlaRngBitGeneratorOutput._make(_result) + return _result + +XlaRngBitGenerator = tf_export("raw_ops.XlaRngBitGenerator")(_ops.to_raw_op(xla_rng_bit_generator)) +_dispatcher_for_xla_rng_bit_generator = xla_rng_bit_generator._tf_type_based_dispatcher.Dispatch + + +def xla_rng_bit_generator_eager_fallback(algorithm: Annotated[Any, _atypes.Int32], initial_state: Annotated[Any, _atypes.UInt64], shape: Annotated[Any, TV_XlaRngBitGenerator_Tshape], dtype: TV_XlaRngBitGenerator_dtype, name, ctx): + if dtype is None: + dtype = _dtypes.uint64 + dtype = _execute.make_type(dtype, "dtype") + _attr_Tshape, (shape,) = _execute.args_to_matching_eager([shape], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int32) + algorithm = _ops.convert_to_tensor(algorithm, _dtypes.int32) + initial_state = _ops.convert_to_tensor(initial_state, _dtypes.uint64) + _inputs_flat = [algorithm, initial_state, shape] + _attrs = ("dtype", dtype, "Tshape", _attr_Tshape) + _result = _execute.execute(b"XlaRngBitGenerator", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaRngBitGenerator", _inputs_flat, _attrs, _result) + _result = _XlaRngBitGeneratorOutput._make(_result) + return _result + + +TV_XlaScatter_T = TypeVar("TV_XlaScatter_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaScatter_Tindices = TypeVar("TV_XlaScatter_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_scatter') +def xla_scatter(operand: Annotated[Any, TV_XlaScatter_T], scatter_indices: Annotated[Any, TV_XlaScatter_Tindices], updates: Annotated[Any, TV_XlaScatter_T], update_computation, dimension_numbers: str, indices_are_sorted: bool, name=None) -> Annotated[Any, TV_XlaScatter_T]: + r"""Wraps the XLA Scatter operator documented at + + https://www.tensorflow.org/xla/operation_semantics#scatter. + + Args: + operand: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + Array to be scattered into. + scatter_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. + Array containing the starting indices of the slices that must + be scattered to. + updates: A `Tensor`. Must have the same type as `operand`. + Array containing the values that must be used for scattering. + update_computation: A function decorated with @Defun. + Computation to be used for combining the existing values in + the input array and the updates during scatter. + dimension_numbers: A `string`. + A serialized xla::ScatterDimensionNumbers proto. + indices_are_sorted: A `bool`. + Boolean indicating if the indices are sorted. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `operand`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaScatter", name, operand, scatter_indices, updates, + "update_computation", update_computation, "dimension_numbers", + dimension_numbers, "indices_are_sorted", indices_are_sorted) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_scatter( + (operand, scatter_indices, updates, update_computation, + dimension_numbers, indices_are_sorted, name,), None) + if _result is not NotImplemented: + return _result + return xla_scatter_eager_fallback( + operand, scatter_indices, updates, + update_computation=update_computation, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_scatter, (), dict(operand=operand, + scatter_indices=scatter_indices, + updates=updates, + update_computation=update_computation, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_scatter( + (operand, scatter_indices, updates, update_computation, + dimension_numbers, indices_are_sorted, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + indices_are_sorted = _execute.make_bool(indices_are_sorted, "indices_are_sorted") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaScatter", operand=operand, scatter_indices=scatter_indices, + updates=updates, update_computation=update_computation, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_scatter, (), dict(operand=operand, + scatter_indices=scatter_indices, + updates=updates, + update_computation=update_computation, + dimension_numbers=dimension_numbers, + indices_are_sorted=indices_are_sorted, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("update_computation", _op.get_attr("update_computation"), + "dimension_numbers", _op.get_attr("dimension_numbers"), + "indices_are_sorted", _op._get_attr_bool("indices_are_sorted"), + "T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaScatter = tf_export("raw_ops.XlaScatter")(_ops.to_raw_op(xla_scatter)) +_dispatcher_for_xla_scatter = xla_scatter._tf_type_based_dispatcher.Dispatch + + +def xla_scatter_eager_fallback(operand: Annotated[Any, TV_XlaScatter_T], scatter_indices: Annotated[Any, TV_XlaScatter_Tindices], updates: Annotated[Any, TV_XlaScatter_T], update_computation, dimension_numbers: str, indices_are_sorted: bool, name, ctx) -> Annotated[Any, TV_XlaScatter_T]: + dimension_numbers = _execute.make_str(dimension_numbers, "dimension_numbers") + indices_are_sorted = _execute.make_bool(indices_are_sorted, "indices_are_sorted") + _attr_T, _inputs_T = _execute.args_to_matching_eager([operand, updates], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + (operand, updates) = _inputs_T + _attr_Tindices, (scatter_indices,) = _execute.args_to_matching_eager([scatter_indices], ctx, [_dtypes.int32, _dtypes.int64, ]) + _inputs_flat = [operand, scatter_indices, updates] + _attrs = ("update_computation", update_computation, "dimension_numbers", + dimension_numbers, "indices_are_sorted", indices_are_sorted, "T", _attr_T, + "Tindices", _attr_Tindices) + _result = _execute.execute(b"XlaScatter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSelectAndScatter_T = TypeVar("TV_XlaSelectAndScatter_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) +TV_XlaSelectAndScatter_Tindices = TypeVar("TV_XlaSelectAndScatter_Tindices", _atypes.Int32, _atypes.Int64) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_select_and_scatter') +def xla_select_and_scatter(operand: Annotated[Any, TV_XlaSelectAndScatter_T], window_dimensions: Annotated[Any, TV_XlaSelectAndScatter_Tindices], window_strides: Annotated[Any, TV_XlaSelectAndScatter_Tindices], padding: Annotated[Any, TV_XlaSelectAndScatter_Tindices], source: Annotated[Any, TV_XlaSelectAndScatter_T], init_value: Annotated[Any, TV_XlaSelectAndScatter_T], select, scatter, name=None) -> Annotated[Any, TV_XlaSelectAndScatter_T]: + r"""Wraps the XLA SelectAndScatter operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter + . + + Args: + operand: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the input tensor + window_dimensions: A `Tensor`. Must be one of the following types: `int32`, `int64`. + the shape of the window + window_strides: A `Tensor`. Must have the same type as `window_dimensions`. + the inter-window strides + padding: A `Tensor`. Must have the same type as `window_dimensions`. + the padding to apply at the start and end of each input dimensions + source: A `Tensor`. Must have the same type as `operand`. + a tensor of values to scatter + init_value: A `Tensor`. Must have the same type as `operand`. + a scalar representing the initial value for the output tensor + select: A function decorated with @Defun. a selection function to apply + scatter: A function decorated with @Defun. a scatter function to apply + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `operand`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSelectAndScatter", name, operand, window_dimensions, + window_strides, padding, source, init_value, "select", select, + "scatter", scatter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_select_and_scatter( + (operand, window_dimensions, window_strides, padding, source, + init_value, select, scatter, name,), None) + if _result is not NotImplemented: + return _result + return xla_select_and_scatter_eager_fallback( + operand, window_dimensions, window_strides, padding, source, + init_value, select=select, scatter=scatter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_select_and_scatter, (), dict(operand=operand, + window_dimensions=window_dimensions, + window_strides=window_strides, + padding=padding, source=source, + init_value=init_value, + select=select, scatter=scatter, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_select_and_scatter( + (operand, window_dimensions, window_strides, padding, source, + init_value, select, scatter, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSelectAndScatter", operand=operand, + window_dimensions=window_dimensions, + window_strides=window_strides, padding=padding, + source=source, init_value=init_value, + select=select, scatter=scatter, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_select_and_scatter, (), dict(operand=operand, + window_dimensions=window_dimensions, + window_strides=window_strides, + padding=padding, source=source, + init_value=init_value, + select=select, scatter=scatter, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "Tindices", + _op._get_attr_type("Tindices"), "select", + _op.get_attr("select"), "scatter", _op.get_attr("scatter")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSelectAndScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSelectAndScatter = tf_export("raw_ops.XlaSelectAndScatter")(_ops.to_raw_op(xla_select_and_scatter)) +_dispatcher_for_xla_select_and_scatter = xla_select_and_scatter._tf_type_based_dispatcher.Dispatch + + +def xla_select_and_scatter_eager_fallback(operand: Annotated[Any, TV_XlaSelectAndScatter_T], window_dimensions: Annotated[Any, TV_XlaSelectAndScatter_Tindices], window_strides: Annotated[Any, TV_XlaSelectAndScatter_Tindices], padding: Annotated[Any, TV_XlaSelectAndScatter_Tindices], source: Annotated[Any, TV_XlaSelectAndScatter_T], init_value: Annotated[Any, TV_XlaSelectAndScatter_T], select, scatter, name, ctx) -> Annotated[Any, TV_XlaSelectAndScatter_T]: + _attr_T, _inputs_T = _execute.args_to_matching_eager([operand, source, init_value], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + (operand, source, init_value) = _inputs_T + _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([window_dimensions, window_strides, padding], ctx, [_dtypes.int32, _dtypes.int64, ]) + (window_dimensions, window_strides, padding) = _inputs_Tindices + _inputs_flat = [operand, window_dimensions, window_strides, padding, source, init_value] + _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "select", select, + "scatter", scatter) + _result = _execute.execute(b"XlaSelectAndScatter", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSelectAndScatter", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_XlaSelfAdjointEigOutput = collections.namedtuple( + "XlaSelfAdjointEig", + ["w", "v"]) + + +TV_XlaSelfAdjointEig_T = TypeVar("TV_XlaSelfAdjointEig_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_self_adjoint_eig') +def xla_self_adjoint_eig(a: Annotated[Any, TV_XlaSelfAdjointEig_T], lower: bool, max_iter: int, epsilon: float, name=None): + r"""Computes the eigen decomposition of a batch of self-adjoint matrices + + (Note: Only real inputs are supported). + + Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in + tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for + i=0...N-1. + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the input tensor. + lower: A `bool`. + a boolean specifies whether the calculation is done with the lower + triangular part or the upper triangular part. + max_iter: An `int`. + maximum number of sweep update, i.e., the whole lower triangular + part or upper triangular part based on parameter lower. Heuristically, it has + been argued that approximately logN sweeps are needed in practice (Ref: Golub & + van Loan "Matrix Computation"). + epsilon: A `float`. the tolerance ratio. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (w, v). + + w: A `Tensor`. Has the same type as `a`. The eigenvalues in ascending order, each repeated according to its + multiplicity. + v: A `Tensor`. Has the same type as `a`. The column v[..., :, i] is the normalized eigenvector corresponding to the + eigenvalue w[..., i]. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSelfAdjointEig", name, a, "lower", lower, "max_iter", + max_iter, "epsilon", epsilon) + _result = _XlaSelfAdjointEigOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_self_adjoint_eig( + (a, lower, max_iter, epsilon, name,), None) + if _result is not NotImplemented: + return _result + return xla_self_adjoint_eig_eager_fallback( + a, lower=lower, max_iter=max_iter, epsilon=epsilon, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_self_adjoint_eig, (), dict(a=a, lower=lower, + max_iter=max_iter, epsilon=epsilon, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_self_adjoint_eig( + (a, lower, max_iter, epsilon, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + lower = _execute.make_bool(lower, "lower") + max_iter = _execute.make_int(max_iter, "max_iter") + epsilon = _execute.make_float(epsilon, "epsilon") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSelfAdjointEig", a=a, lower=lower, max_iter=max_iter, + epsilon=epsilon, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_self_adjoint_eig, (), dict(a=a, lower=lower, max_iter=max_iter, + epsilon=epsilon, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("lower", _op._get_attr_bool("lower"), "max_iter", + _op._get_attr_int("max_iter"), "epsilon", + _op.get_attr("epsilon"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSelfAdjointEig", _inputs_flat, _attrs, _result) + _result = _XlaSelfAdjointEigOutput._make(_result) + return _result + +XlaSelfAdjointEig = tf_export("raw_ops.XlaSelfAdjointEig")(_ops.to_raw_op(xla_self_adjoint_eig)) +_dispatcher_for_xla_self_adjoint_eig = xla_self_adjoint_eig._tf_type_based_dispatcher.Dispatch + + +def xla_self_adjoint_eig_eager_fallback(a: Annotated[Any, TV_XlaSelfAdjointEig_T], lower: bool, max_iter: int, epsilon: float, name, ctx): + lower = _execute.make_bool(lower, "lower") + max_iter = _execute.make_int(max_iter, "max_iter") + epsilon = _execute.make_float(epsilon, "epsilon") + _attr_T, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = [a] + _attrs = ("lower", lower, "max_iter", max_iter, "epsilon", epsilon, "T", + _attr_T) + _result = _execute.execute(b"XlaSelfAdjointEig", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSelfAdjointEig", _inputs_flat, _attrs, _result) + _result = _XlaSelfAdjointEigOutput._make(_result) + return _result + + +TV_XlaSend_T = TypeVar("TV_XlaSend_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_send') +def xla_send(tensor: Annotated[Any, TV_XlaSend_T], tensor_name: str, name=None): + r"""Sends the named tensor to another XLA computation. Wraps the XLA Send operator + + documented at + https://www.tensorflow.org/performance/xla/operation_semantics#send . + + Args: + tensor: A `Tensor`. The tensor to send. + tensor_name: A `string`. A string key that identifies the channel. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSend", name, tensor, "tensor_name", tensor_name) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_send( + (tensor, tensor_name, name,), None) + if _result is not NotImplemented: + return _result + return xla_send_eager_fallback( + tensor, tensor_name=tensor_name, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_send, (), dict(tensor=tensor, tensor_name=tensor_name, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_send( + (tensor, tensor_name, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + tensor_name = _execute.make_str(tensor_name, "tensor_name") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSend", tensor=tensor, tensor_name=tensor_name, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_send, (), dict(tensor=tensor, tensor_name=tensor_name, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +XlaSend = tf_export("raw_ops.XlaSend")(_ops.to_raw_op(xla_send)) +_dispatcher_for_xla_send = xla_send._tf_type_based_dispatcher.Dispatch + + +def xla_send_eager_fallback(tensor: Annotated[Any, TV_XlaSend_T], tensor_name: str, name, ctx): + tensor_name = _execute.make_str(tensor_name, "tensor_name") + _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx, []) + _inputs_flat = [tensor] + _attrs = ("T", _attr_T, "tensor_name", tensor_name) + _result = _execute.execute(b"XlaSend", 0, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + _result = None + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_set_bound') +def xla_set_bound(input: Annotated[Any, _atypes.Int32], bound: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, _atypes.Int32]: + r"""Set a bound for the given input value as a hint to Xla compiler, + + returns the same value. + + Args: + input: A `Tensor` of type `int32`. + bound: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `int32`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSetBound", name, input, bound) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_set_bound( + (input, bound, name,), None) + if _result is not NotImplemented: + return _result + return xla_set_bound_eager_fallback( + input, bound, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_set_bound, (), dict(input=input, bound=bound, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_set_bound( + (input, bound, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSetBound", input=input, bound=bound, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_set_bound, (), dict(input=input, bound=bound, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSetBound", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSetBound = tf_export("raw_ops.XlaSetBound")(_ops.to_raw_op(xla_set_bound)) +_dispatcher_for_xla_set_bound = xla_set_bound._tf_type_based_dispatcher.Dispatch + + +def xla_set_bound_eager_fallback(input: Annotated[Any, _atypes.Int32], bound: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, _atypes.Int32]: + input = _ops.convert_to_tensor(input, _dtypes.int32) + bound = _ops.convert_to_tensor(bound, _dtypes.int32) + _inputs_flat = [input, bound] + _attrs = None + _result = _execute.execute(b"XlaSetBound", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSetBound", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSetDynamicDimensionSize_T = TypeVar("TV_XlaSetDynamicDimensionSize_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_set_dynamic_dimension_size') +def xla_set_dynamic_dimension_size(input: Annotated[Any, TV_XlaSetDynamicDimensionSize_T], dim_index: Annotated[Any, _atypes.Int32], size: Annotated[Any, _atypes.Int32], name=None) -> Annotated[Any, TV_XlaSetDynamicDimensionSize_T]: + r"""Make a static dimension into a xla bounded dynamic dimension. + + The current static dimension size will become the bound and the second + operand becomes the dynamic size of the dimension. + + Args: + input: A `Tensor`. + dim_index: A `Tensor` of type `int32`. + size: A `Tensor` of type `int32`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSetDynamicDimensionSize", name, input, dim_index, size) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_set_dynamic_dimension_size( + (input, dim_index, size, name,), None) + if _result is not NotImplemented: + return _result + return xla_set_dynamic_dimension_size_eager_fallback( + input, dim_index, size, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_set_dynamic_dimension_size, (), dict(input=input, + dim_index=dim_index, + size=size, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_set_dynamic_dimension_size( + (input, dim_index, size, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSetDynamicDimensionSize", input=input, dim_index=dim_index, + size=size, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_set_dynamic_dimension_size, (), dict(input=input, + dim_index=dim_index, + size=size, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSetDynamicDimensionSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSetDynamicDimensionSize = tf_export("raw_ops.XlaSetDynamicDimensionSize")(_ops.to_raw_op(xla_set_dynamic_dimension_size)) +_dispatcher_for_xla_set_dynamic_dimension_size = xla_set_dynamic_dimension_size._tf_type_based_dispatcher.Dispatch + + +def xla_set_dynamic_dimension_size_eager_fallback(input: Annotated[Any, TV_XlaSetDynamicDimensionSize_T], dim_index: Annotated[Any, _atypes.Int32], size: Annotated[Any, _atypes.Int32], name, ctx) -> Annotated[Any, TV_XlaSetDynamicDimensionSize_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + dim_index = _ops.convert_to_tensor(dim_index, _dtypes.int32) + size = _ops.convert_to_tensor(size, _dtypes.int32) + _inputs_flat = [input, dim_index, size] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"XlaSetDynamicDimensionSize", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSetDynamicDimensionSize", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSharding_T = TypeVar("TV_XlaSharding_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_sharding') +def xla_sharding(input: Annotated[Any, TV_XlaSharding_T], sharding:str="", unspecified_dims=[], name=None) -> Annotated[Any, TV_XlaSharding_T]: + r"""An op which shards the input based on the given sharding attribute. It can + + selectively annotate a subset of tensor dimensions by skipping unspecified_dims, + and the sharding annotation should be replicated in those dims. + + Args: + input: A `Tensor`. + sharding: An optional `string`. Defaults to `""`. + unspecified_dims: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSharding", name, input, "sharding", sharding, + "unspecified_dims", unspecified_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_sharding( + (input, sharding, unspecified_dims, name,), None) + if _result is not NotImplemented: + return _result + return xla_sharding_eager_fallback( + input, sharding=sharding, unspecified_dims=unspecified_dims, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_sharding, (), dict(input=input, sharding=sharding, + unspecified_dims=unspecified_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_sharding( + (input, sharding, unspecified_dims, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if sharding is None: + sharding = "" + sharding = _execute.make_str(sharding, "sharding") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_sharding' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSharding", input=input, sharding=sharding, + unspecified_dims=unspecified_dims, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_sharding, (), dict(input=input, sharding=sharding, + unspecified_dims=unspecified_dims, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "sharding", + _op.get_attr("sharding"), "unspecified_dims", + _op.get_attr("unspecified_dims")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSharding", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSharding = tf_export("raw_ops.XlaSharding")(_ops.to_raw_op(xla_sharding)) +_dispatcher_for_xla_sharding = xla_sharding._tf_type_based_dispatcher.Dispatch + + +def xla_sharding_eager_fallback(input: Annotated[Any, TV_XlaSharding_T], sharding: str, unspecified_dims, name, ctx) -> Annotated[Any, TV_XlaSharding_T]: + if sharding is None: + sharding = "" + sharding = _execute.make_str(sharding, "sharding") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_sharding' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "sharding", sharding, "unspecified_dims", + unspecified_dims) + _result = _execute.execute(b"XlaSharding", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSharding", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSort_T = TypeVar("TV_XlaSort_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_sort') +def xla_sort(input: Annotated[Any, TV_XlaSort_T], name=None) -> Annotated[Any, TV_XlaSort_T]: + r"""Wraps the XLA Sort operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#sort + . + + Sorts a tensor. Currently only sorts in ascending order are supported. + + Args: + input: A `Tensor`. A `Tensor` of type T. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. A `Tensor` of type T. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSort", name, input) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_sort( + (input, name,), None) + if _result is not NotImplemented: + return _result + return xla_sort_eager_fallback( + input, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_sort, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_sort( + (input, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSort", input=input, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_sort, (), dict(input=input, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSort", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSort = tf_export("raw_ops.XlaSort")(_ops.to_raw_op(xla_sort)) +_dispatcher_for_xla_sort = xla_sort._tf_type_based_dispatcher.Dispatch + + +def xla_sort_eager_fallback(input: Annotated[Any, TV_XlaSort_T], name, ctx) -> Annotated[Any, TV_XlaSort_T]: + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("T", _attr_T) + _result = _execute.execute(b"XlaSort", 1, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSort", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSpmdFullToShardShape_T = TypeVar("TV_XlaSpmdFullToShardShape_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_spmd_full_to_shard_shape') +def xla_spmd_full_to_shard_shape(input: Annotated[Any, TV_XlaSpmdFullToShardShape_T], manual_sharding: str, dim:int=-1, unspecified_dims=[], name=None) -> Annotated[Any, TV_XlaSpmdFullToShardShape_T]: + r"""An op used by XLA SPMD partitioner to switch from automatic partitioning to + + manual partitioning. It annotates the input (full-shape, to be automatically + partitioned) with the same sharding used by manual partitioning, and outputs a + shard-shaped tensor to be consumed by later manually-partitioned ops. If the + shape is not evenly partitionable, the padding region will be masked with 0s. + The conversion can happen partially in subgroups, by specifying the dim + attribute, where only that dim will be converted. + + Args: + input: A `Tensor`. + manual_sharding: A `string`. + dim: An optional `int`. Defaults to `-1`. + unspecified_dims: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSpmdFullToShardShape", name, input, "manual_sharding", + manual_sharding, "dim", dim, "unspecified_dims", unspecified_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_spmd_full_to_shard_shape( + (input, manual_sharding, dim, unspecified_dims, name,), None) + if _result is not NotImplemented: + return _result + return xla_spmd_full_to_shard_shape_eager_fallback( + input, manual_sharding=manual_sharding, dim=dim, + unspecified_dims=unspecified_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_spmd_full_to_shard_shape, (), dict(input=input, + manual_sharding=manual_sharding, + dim=dim, + unspecified_dims=unspecified_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_spmd_full_to_shard_shape( + (input, manual_sharding, dim, unspecified_dims, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + manual_sharding = _execute.make_str(manual_sharding, "manual_sharding") + if dim is None: + dim = -1 + dim = _execute.make_int(dim, "dim") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_spmd_full_to_shard_shape' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSpmdFullToShardShape", input=input, + manual_sharding=manual_sharding, dim=dim, + unspecified_dims=unspecified_dims, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_spmd_full_to_shard_shape, (), dict(input=input, + manual_sharding=manual_sharding, + dim=dim, + unspecified_dims=unspecified_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "manual_sharding", + _op.get_attr("manual_sharding"), "dim", + _op._get_attr_int("dim"), "unspecified_dims", + _op.get_attr("unspecified_dims")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSpmdFullToShardShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSpmdFullToShardShape = tf_export("raw_ops.XlaSpmdFullToShardShape")(_ops.to_raw_op(xla_spmd_full_to_shard_shape)) +_dispatcher_for_xla_spmd_full_to_shard_shape = xla_spmd_full_to_shard_shape._tf_type_based_dispatcher.Dispatch + + +def xla_spmd_full_to_shard_shape_eager_fallback(input: Annotated[Any, TV_XlaSpmdFullToShardShape_T], manual_sharding: str, dim: int, unspecified_dims, name, ctx) -> Annotated[Any, TV_XlaSpmdFullToShardShape_T]: + manual_sharding = _execute.make_str(manual_sharding, "manual_sharding") + if dim is None: + dim = -1 + dim = _execute.make_int(dim, "dim") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_spmd_full_to_shard_shape' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "manual_sharding", manual_sharding, "dim", dim, + "unspecified_dims", unspecified_dims) + _result = _execute.execute(b"XlaSpmdFullToShardShape", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSpmdFullToShardShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +TV_XlaSpmdShardToFullShape_T = TypeVar("TV_XlaSpmdShardToFullShape_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, _atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, _atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, _atypes.Variant) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_spmd_shard_to_full_shape') +def xla_spmd_shard_to_full_shape(input: Annotated[Any, TV_XlaSpmdShardToFullShape_T], manual_sharding: str, full_shape, dim:int=-1, unspecified_dims=[], name=None) -> Annotated[Any, TV_XlaSpmdShardToFullShape_T]: + r"""An op used by XLA SPMD partitioner to switch from manual partitioning to + + automatic partitioning. It converts the shard-shaped, manually partitioned input + into full-shaped tensor to be partitioned automatically with the same sharding + used by manual partitioning. The conversion can happen partially in subgroups, + by specifying the dim attribute, where only that dim will be converted. + + Args: + input: A `Tensor`. + manual_sharding: A `string`. + full_shape: A `tf.TensorShape` or list of `ints`. + dim: An optional `int`. Defaults to `-1`. + unspecified_dims: An optional list of `ints`. Defaults to `[]`. + name: A name for the operation (optional). + + Returns: + A `Tensor`. Has the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSpmdShardToFullShape", name, input, "manual_sharding", + manual_sharding, "full_shape", full_shape, "dim", dim, + "unspecified_dims", unspecified_dims) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_spmd_shard_to_full_shape( + (input, manual_sharding, full_shape, dim, unspecified_dims, name,), + None) + if _result is not NotImplemented: + return _result + return xla_spmd_shard_to_full_shape_eager_fallback( + input, manual_sharding=manual_sharding, full_shape=full_shape, + dim=dim, unspecified_dims=unspecified_dims, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_spmd_shard_to_full_shape, (), dict(input=input, + manual_sharding=manual_sharding, + full_shape=full_shape, + dim=dim, + unspecified_dims=unspecified_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_spmd_shard_to_full_shape( + (input, manual_sharding, full_shape, dim, unspecified_dims, name,), + None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + manual_sharding = _execute.make_str(manual_sharding, "manual_sharding") + full_shape = _execute.make_shape(full_shape, "full_shape") + if dim is None: + dim = -1 + dim = _execute.make_int(dim, "dim") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_spmd_shard_to_full_shape' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSpmdShardToFullShape", input=input, + manual_sharding=manual_sharding, + full_shape=full_shape, dim=dim, + unspecified_dims=unspecified_dims, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_spmd_shard_to_full_shape, (), dict(input=input, + manual_sharding=manual_sharding, + full_shape=full_shape, + dim=dim, + unspecified_dims=unspecified_dims, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op._get_attr_type("T"), "manual_sharding", + _op.get_attr("manual_sharding"), "full_shape", + _op.get_attr("full_shape"), "dim", _op._get_attr_int("dim"), + "unspecified_dims", _op.get_attr("unspecified_dims")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSpmdShardToFullShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +XlaSpmdShardToFullShape = tf_export("raw_ops.XlaSpmdShardToFullShape")(_ops.to_raw_op(xla_spmd_shard_to_full_shape)) +_dispatcher_for_xla_spmd_shard_to_full_shape = xla_spmd_shard_to_full_shape._tf_type_based_dispatcher.Dispatch + + +def xla_spmd_shard_to_full_shape_eager_fallback(input: Annotated[Any, TV_XlaSpmdShardToFullShape_T], manual_sharding: str, full_shape, dim: int, unspecified_dims, name, ctx) -> Annotated[Any, TV_XlaSpmdShardToFullShape_T]: + manual_sharding = _execute.make_str(manual_sharding, "manual_sharding") + full_shape = _execute.make_shape(full_shape, "full_shape") + if dim is None: + dim = -1 + dim = _execute.make_int(dim, "dim") + if unspecified_dims is None: + unspecified_dims = [] + if not isinstance(unspecified_dims, (list, tuple)): + raise TypeError( + "Expected list for 'unspecified_dims' argument to " + "'xla_spmd_shard_to_full_shape' Op, not %r." % unspecified_dims) + unspecified_dims = [_execute.make_int(_i, "unspecified_dims") for _i in unspecified_dims] + _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, []) + _inputs_flat = [input] + _attrs = ("T", _attr_T, "manual_sharding", manual_sharding, "full_shape", + full_shape, "dim", dim, "unspecified_dims", unspecified_dims) + _result = _execute.execute(b"XlaSpmdShardToFullShape", 1, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSpmdShardToFullShape", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +_XlaSvdOutput = collections.namedtuple( + "XlaSvd", + ["s", "u", "v"]) + + +TV_XlaSvd_T = TypeVar("TV_XlaSvd_T", _atypes.BFloat16, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_svd') +def xla_svd(a: Annotated[Any, TV_XlaSvd_T], max_iter: int, epsilon: float, precision_config: str, name=None): + r"""Computes the eigen decomposition of a batch of self-adjoint matrices + + (Note: Only real inputs are supported). + + Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in + tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]). + + Args: + a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`. + the input tensor. + max_iter: An `int`. + maximum number of sweep update, i.e., the whole lower triangular + part or upper triangular part based on parameter lower. Heuristically, it has + been argued that approximately log(min (M, N)) sweeps are needed in practice + (Ref: Golub & van Loan "Matrix Computation"). + epsilon: A `float`. the tolerance ratio. + precision_config: A `string`. a serialized xla::PrecisionConfig proto. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (s, u, v). + + s: A `Tensor`. Has the same type as `a`. Singular values. The values are sorted in reverse order of magnitude, so + s[..., 0] is the largest value, s[..., 1] is the second largest, etc. + u: A `Tensor`. Has the same type as `a`. Left singular vectors. + v: A `Tensor`. Has the same type as `a`. Right singular vectors. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaSvd", name, a, "max_iter", max_iter, "epsilon", epsilon, + "precision_config", precision_config) + _result = _XlaSvdOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_svd( + (a, max_iter, epsilon, precision_config, name,), None) + if _result is not NotImplemented: + return _result + return xla_svd_eager_fallback( + a, max_iter=max_iter, epsilon=epsilon, + precision_config=precision_config, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_svd, (), dict(a=a, max_iter=max_iter, epsilon=epsilon, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_svd( + (a, max_iter, epsilon, precision_config, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + max_iter = _execute.make_int(max_iter, "max_iter") + epsilon = _execute.make_float(epsilon, "epsilon") + precision_config = _execute.make_str(precision_config, "precision_config") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaSvd", a=a, max_iter=max_iter, epsilon=epsilon, + precision_config=precision_config, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_svd, (), dict(a=a, max_iter=max_iter, epsilon=epsilon, + precision_config=precision_config, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("max_iter", _op._get_attr_int("max_iter"), "epsilon", + _op.get_attr("epsilon"), "precision_config", + _op.get_attr("precision_config"), "T", _op._get_attr_type("T")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaSvd", _inputs_flat, _attrs, _result) + _result = _XlaSvdOutput._make(_result) + return _result + +XlaSvd = tf_export("raw_ops.XlaSvd")(_ops.to_raw_op(xla_svd)) +_dispatcher_for_xla_svd = xla_svd._tf_type_based_dispatcher.Dispatch + + +def xla_svd_eager_fallback(a: Annotated[Any, TV_XlaSvd_T], max_iter: int, epsilon: float, precision_config: str, name, ctx): + max_iter = _execute.make_int(max_iter, "max_iter") + epsilon = _execute.make_float(epsilon, "epsilon") + precision_config = _execute.make_str(precision_config, "precision_config") + _attr_T, (a,) = _execute.args_to_matching_eager([a], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ]) + _inputs_flat = [a] + _attrs = ("max_iter", max_iter, "epsilon", epsilon, "precision_config", + precision_config, "T", _attr_T) + _result = _execute.execute(b"XlaSvd", 3, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaSvd", _inputs_flat, _attrs, _result) + _result = _XlaSvdOutput._make(_result) + return _result + + +TV_XlaVariadicReduce_T = TypeVar("TV_XlaVariadicReduce_T", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, _atypes.Complex64, _atypes.Float32, _atypes.Float64, _atypes.Half, _atypes.Int16, _atypes.Int32, _atypes.Int64, _atypes.Int8, _atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, _atypes.QUInt8, _atypes.UInt16, _atypes.UInt32, _atypes.UInt64, _atypes.UInt8) + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_variadic_reduce') +def xla_variadic_reduce(input: Annotated[List[Any], TV_XlaVariadicReduce_T], init_value: Annotated[List[Any], TV_XlaVariadicReduce_T], dimensions_to_reduce, reducer, name=None): + r"""Wraps the variadic XLA Reduce operator. + + Semantics are documented at + https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. + + This version is limited to operands of the same dtype. + XlaVariadicReduceV2 is a version that supports heterogeneous operands. + + Args: + input: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `bool`. + the input tensor(s) + init_value: A list with the same length as `input` of `Tensor` objects with the same type as `input`. + scalar initial value(s) for the reduction + dimensions_to_reduce: A list of `ints`. + dimension numbers over which to reduce + reducer: A function decorated with @Defun. a reducer function to apply + name: A name for the operation (optional). + + Returns: + A list with the same length as `input` of `Tensor` objects with the same type as `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaVariadicReduce", name, input, init_value, + "dimensions_to_reduce", dimensions_to_reduce, "reducer", reducer) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_variadic_reduce( + (input, init_value, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + return xla_variadic_reduce_eager_fallback( + input, init_value, dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_reduce, (), dict(input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_variadic_reduce( + (input, init_value, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(input, (list, tuple)): + raise TypeError( + "Expected list for 'input' argument to " + "'xla_variadic_reduce' Op, not %r." % input) + _attr_N = len(input) + if not isinstance(init_value, (list, tuple)): + raise TypeError( + "Expected list for 'init_value' argument to " + "'xla_variadic_reduce' Op, not %r." % init_value) + if len(init_value) != _attr_N: + raise ValueError( + "List argument 'init_value' to 'xla_variadic_reduce' Op with length %d " + "must match length %d of argument 'input'." % + (len(init_value), _attr_N)) + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_variadic_reduce' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaVariadicReduce", input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_reduce, (), dict(input=input, init_value=init_value, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), + "dimensions_to_reduce", _op.get_attr("dimensions_to_reduce"), + "reducer", _op.get_attr("reducer")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaVariadicReduce", _inputs_flat, _attrs, _result) + return _result + +XlaVariadicReduce = tf_export("raw_ops.XlaVariadicReduce")(_ops.to_raw_op(xla_variadic_reduce)) +_dispatcher_for_xla_variadic_reduce = xla_variadic_reduce._tf_type_based_dispatcher.Dispatch + + +def xla_variadic_reduce_eager_fallback(input: Annotated[List[Any], TV_XlaVariadicReduce_T], init_value: Annotated[List[Any], TV_XlaVariadicReduce_T], dimensions_to_reduce, reducer, name, ctx): + if not isinstance(input, (list, tuple)): + raise TypeError( + "Expected list for 'input' argument to " + "'xla_variadic_reduce' Op, not %r." % input) + _attr_N = len(input) + if not isinstance(init_value, (list, tuple)): + raise TypeError( + "Expected list for 'init_value' argument to " + "'xla_variadic_reduce' Op, not %r." % init_value) + if len(init_value) != _attr_N: + raise ValueError( + "List argument 'init_value' to 'xla_variadic_reduce' Op with length %d " + "must match length %d of argument 'input'." % + (len(init_value), _attr_N)) + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_variadic_reduce' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + _attr_T, _inputs_T = _execute.args_to_matching_eager(list(input) + list(init_value), ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, _dtypes.bool, ]) + _inputs_T = [_inputs_T[:_attr_N]] + _inputs_T[_attr_N:] + _inputs_T = _inputs_T[:1] + [_inputs_T[1:]] + (input, init_value) = _inputs_T + _inputs_flat = list(input) + list(init_value) + _attrs = ("N", _attr_N, "T", _attr_T, "dimensions_to_reduce", + dimensions_to_reduce, "reducer", reducer) + _result = _execute.execute(b"XlaVariadicReduce", _attr_N, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaVariadicReduce", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_variadic_reduce_v2') +def xla_variadic_reduce_v2(inputs, init_values, dimensions_to_reduce, reducer, name=None): + r"""Wraps the variadic XLA Reduce operator. + + Semantics are documented at + https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. + + This is an expanded version of XlaVariadicReduce, with support for + operands of different dtypes, and improved shape inference. + + Args: + inputs: A list of `Tensor` objects. the input tensor(s) + init_values: A list of `Tensor` objects. Must have the same type as `inputs`. + scalar initial value(s) for the reduction + dimensions_to_reduce: A list of `ints`. + dimension numbers over which to reduce + reducer: A function decorated with @Defun. a reducer function to apply + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects. Has the same type as `inputs`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaVariadicReduceV2", name, inputs, init_values, + "dimensions_to_reduce", dimensions_to_reduce, "reducer", reducer) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_variadic_reduce_v2( + (inputs, init_values, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + return xla_variadic_reduce_v2_eager_fallback( + inputs, init_values, dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_reduce_v2, (), dict(inputs=inputs, + init_values=init_values, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_variadic_reduce_v2( + (inputs, init_values, dimensions_to_reduce, reducer, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_variadic_reduce_v2' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaVariadicReduceV2", inputs=inputs, init_values=init_values, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_reduce_v2, (), dict(inputs=inputs, + init_values=init_values, + dimensions_to_reduce=dimensions_to_reduce, + reducer=reducer, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op.get_attr("T"), "dimensions_to_reduce", + _op.get_attr("dimensions_to_reduce"), "reducer", + _op.get_attr("reducer")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaVariadicReduceV2", _inputs_flat, _attrs, _result) + return _result + +XlaVariadicReduceV2 = tf_export("raw_ops.XlaVariadicReduceV2")(_ops.to_raw_op(xla_variadic_reduce_v2)) +_dispatcher_for_xla_variadic_reduce_v2 = xla_variadic_reduce_v2._tf_type_based_dispatcher.Dispatch + + +def xla_variadic_reduce_v2_eager_fallback(inputs, init_values, dimensions_to_reduce, reducer, name, ctx): + if not isinstance(dimensions_to_reduce, (list, tuple)): + raise TypeError( + "Expected list for 'dimensions_to_reduce' argument to " + "'xla_variadic_reduce_v2' Op, not %r." % dimensions_to_reduce) + dimensions_to_reduce = [_execute.make_int(_i, "dimensions_to_reduce") for _i in dimensions_to_reduce] + _attr_T, (inputs, init_values) = _execute.args_to_mixed_eager_tensors((inputs, init_values), ctx) + _inputs_flat = list(inputs) + list(init_values) + _attrs = ("T", _attr_T, "dimensions_to_reduce", dimensions_to_reduce, + "reducer", reducer) + _result = _execute.execute(b"XlaVariadicReduceV2", len(inputs), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaVariadicReduceV2", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_variadic_sort') +def xla_variadic_sort(inputs, dimension: Annotated[Any, _atypes.Int32], comparator, is_stable: bool, name=None): + r"""Wraps the XLA Sort operator, documented at + + https://www.tensorflow.org/performance/xla/operation_semantics#sort + . + + Sorts one or more tensors, with support for custom comparator, dimension, and + is_stable attributes. + + Args: + inputs: A list of `Tensor` objects. + A list of `Tensor` of identical shape but possibly different types. + dimension: A `Tensor` of type `int32`. + The dimension along which to sort. Must be a compile-time constant. + comparator: A function decorated with @Defun. + A comparator function to apply to 2*N scalars and returning a + boolean. N is the number of sort inputs. If you want to sort in ascending + order then the comparator should perform a less-than comparison. + is_stable: A `bool`. Whether to use stable sort. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects. Has the same type as `inputs`. + A list of `Tensor` of same shape and types as the `input`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaVariadicSort", name, inputs, dimension, "comparator", + comparator, "is_stable", is_stable) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_variadic_sort( + (inputs, dimension, comparator, is_stable, name,), None) + if _result is not NotImplemented: + return _result + return xla_variadic_sort_eager_fallback( + inputs, dimension, comparator=comparator, is_stable=is_stable, + name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_sort, (), dict(inputs=inputs, dimension=dimension, + comparator=comparator, + is_stable=is_stable, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_variadic_sort( + (inputs, dimension, comparator, is_stable, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + is_stable = _execute.make_bool(is_stable, "is_stable") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaVariadicSort", inputs=inputs, dimension=dimension, + comparator=comparator, is_stable=is_stable, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_variadic_sort, (), dict(inputs=inputs, dimension=dimension, + comparator=comparator, + is_stable=is_stable, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("T", _op.get_attr("T"), "comparator", + _op.get_attr("comparator"), "is_stable", + _op._get_attr_bool("is_stable")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaVariadicSort", _inputs_flat, _attrs, _result) + return _result + +XlaVariadicSort = tf_export("raw_ops.XlaVariadicSort")(_ops.to_raw_op(xla_variadic_sort)) +_dispatcher_for_xla_variadic_sort = xla_variadic_sort._tf_type_based_dispatcher.Dispatch + + +def xla_variadic_sort_eager_fallback(inputs, dimension: Annotated[Any, _atypes.Int32], comparator, is_stable: bool, name, ctx): + is_stable = _execute.make_bool(is_stable, "is_stable") + _attr_T, inputs = _execute.convert_to_mixed_eager_tensors(inputs, ctx) + dimension = _ops.convert_to_tensor(dimension, _dtypes.int32) + _inputs_flat = list(inputs) + [dimension] + _attrs = ("T", _attr_T, "comparator", comparator, "is_stable", is_stable) + _result = _execute.execute(b"XlaVariadicSort", len(inputs), + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaVariadicSort", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('xla_while') +def xla_while(input, cond, body, name=None): + r"""output = input; While (Cond(output)) { output = Body(output) } + + Args: + input: A list of `Tensor` objects. + A list of input tensors whose types are T. + cond: A function decorated with @Defun. + A function takes 'input' and returns a tensor. If the tensor is + a scalar of non-boolean, the scalar is converted to a boolean + according to the following rule: if the scalar is a numerical + value, non-zero means True and zero means False; if the scalar is + a string, non-empty means True and empty means False. If the + tensor is not a scalar, non-emptiness means True and False + otherwise. + body: A function decorated with @Defun. + A function that takes a list of tensors and returns another + list of tensors. Both lists have the same types as specified by T. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects. Has the same type as `input`. + A list of output tensors whose types are T. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "XlaWhile", name, input, "cond", cond, "body", body) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_xla_while( + (input, cond, body, name,), None) + if _result is not NotImplemented: + return _result + return xla_while_eager_fallback( + input, cond=cond, body=body, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_while, (), dict(input=input, cond=cond, body=body, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_xla_while( + (input, cond, body, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "XlaWhile", input=input, cond=cond, body=body, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + xla_while, (), dict(input=input, cond=cond, body=body, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("T", _op.get_attr("T"), "cond", _op.get_attr("cond"), "body", + _op.get_attr("body")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "XlaWhile", _inputs_flat, _attrs, _result) + return _result + +XlaWhile = tf_export("raw_ops.XlaWhile")(_ops.to_raw_op(xla_while)) +_dispatcher_for_xla_while = xla_while._tf_type_based_dispatcher.Dispatch + + +def xla_while_eager_fallback(input, cond, body, name, ctx): + _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx) + _inputs_flat = list(input) + _attrs = ("T", _attr_T, "cond", cond, "body", body) + _result = _execute.execute(b"XlaWhile", len(input), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "XlaWhile", _inputs_flat, _attrs, _result) + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d4ade38e8f3e0470fb5bc6afbbafc60b3c6b49e Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/xla.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/xla.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a64952f4d07b518aef7ee7f5d78693f1e5105a3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/__pycache__/xla.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/xla.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/xla.py new file mode 100644 index 0000000000000000000000000000000000000000..38d7ae6f7983af0aa2b4b9399af02a8d213416fb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/python/xla.py @@ -0,0 +1,726 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Experimental library that exposes XLA operations directly in TensorFlow. + +It is sometimes useful to be able to build HLO programs directly from +TensorFlow. This file provides Tensorflow operators that mirror the semantics of +HLO operators as closely as possible. + +Note: Most of the operators defined in this module are used by the jax2tf +converter (see go/jax2tf for details) and are used in SavedModel produced +by jax2tf. Hence, we need to maintain backwards compatibility for these +operators. Please reach out to the JAX team if you want to make changes. +""" + +from tensorflow.compiler.tf2xla.ops import gen_xla_ops +from tensorflow.compiler.xla import xla_data_pb2 +from tensorflow.core.framework import attr_value_pb2 +from tensorflow.python.framework import constant_op +from tensorflow.python.framework import dtypes +from tensorflow.python.framework import ops +from tensorflow.python.ops import array_ops +from tensorflow.python.ops import bitwise_ops +from tensorflow.python.ops import gen_math_ops +from tensorflow.python.ops import gen_random_ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import random_ops +from tensorflow.python.ops import random_ops_util +from tensorflow.python.ops import special_math_ops +from tensorflow.python.ops.numpy_ops import np_utils + +# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing +# ops include: +# infeed/outfeed (available via tf.contrib.tpu) +# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu) +# conditional +# gather/scatter +# collapse + +# This file reuses builtin names (following XLA's names, so we can call things +# like xla.max), so we capture the builtin versions here. +# pylint: disable=redefined-builtin +_max = max +_min = min +_slice = slice # pylint: disable=invalid-name + +constant = constant_op.constant + +# Unary operators. + +# For most arithmetic operators there is a TensorFlow operator +# that exactly corresponds to each XLA operator. Rather than defining +# XLA-specific variants, we reuse the corresponding TensorFlow operator. +# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1 +# wrap every HLO operator, because that would allow us to be confident that the +# semantics match. + + +def _unary_op(fn): + """Wrapper that restricts `fn` to have the correct signature.""" + + def unary_op_wrapper(x, name=None): + return fn(x, name=name) + + return unary_op_wrapper + + +abs = _unary_op(math_ops.abs) +# TODO(phawkins): implement clz. +conj = _unary_op(math_ops.conj) +cos = _unary_op(math_ops.cos) +ceil = _unary_op(math_ops.ceil) +digamma = _unary_op(math_ops.digamma) +erf = _unary_op(math_ops.erf) +erfc = _unary_op(math_ops.erfc) +erfinv = _unary_op(math_ops.erfinv) +ndtri = _unary_op(math_ops.ndtri) +exp = _unary_op(math_ops.exp) +expm1 = _unary_op(math_ops.expm1) +floor = _unary_op(math_ops.floor) +imag = _unary_op(math_ops.imag) +is_finite = _unary_op(math_ops.is_finite) +lgamma = _unary_op(math_ops.lgamma) +log = _unary_op(math_ops.log) +log1p = _unary_op(math_ops.log1p) +logical_not = _unary_op(math_ops.logical_not) +neg = _unary_op(math_ops.neg) +real = _unary_op(math_ops.real) +# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for +# numbers halfway between two integers. +round = _unary_op(math_ops.round) +sin = _unary_op(math_ops.sin) +sign = _unary_op(math_ops.sign) +tan = _unary_op(math_ops.tan) +tanh = _unary_op(math_ops.tanh) + +# Bessel +bessel_i0e = _unary_op(special_math_ops.bessel_i0e) +bessel_i1e = _unary_op(special_math_ops.bessel_i1e) + +# Binary operators + +# The main difference between TensorFlow and XLA binary ops is the broadcasting +# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA +# requires an explicit specification of which dimensions to broadcast if the +# arguments have different ranks. + + +def _broadcasting_binary_op(fn): + """Wraps a binary Tensorflow operator and performs XLA-style broadcasting.""" + + def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None): + """Inner wrapper function.""" + broadcast_dims = broadcast_dims or [] + broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64) + # Rather than relying on having static shape information in the TensorFlow + # graph, we use an XlaBroadcastHelper op that can compute the correct shapes + # at JIT compilation time. + x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims) + return fn(x, y, name=name) + + return broadcasting_binary_op_wrapper + + +# Map from TF signed types to TF unsigned types. +_SIGNED_TO_UNSIGNED_TABLE = { + dtypes.int8: dtypes.uint8, + dtypes.int16: dtypes.uint16, + dtypes.int32: dtypes.uint32, + dtypes.int64: dtypes.uint64, +} + +# Map from TF unsigned types to TF signed types. +_UNSIGNED_TO_SIGNED_TABLE = { + dtypes.uint8: dtypes.int8, + dtypes.uint16: dtypes.int16, + dtypes.uint32: dtypes.int32, + dtypes.uint64: dtypes.int64, +} + + +def _shift_right_logical_helper(x, y, name=None): + """Performs an integer right logical shift irrespective of input type.""" + assert y.dtype == x.dtype + dtype = x.dtype + signed = dtype in _SIGNED_TO_UNSIGNED_TABLE + if signed: + unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype] + x = math_ops.cast(x, unsigned_dtype) + y = math_ops.cast(y, unsigned_dtype) + output = bitwise_ops.right_shift(x, y, name=name) + if signed: + output = math_ops.cast(output, dtype) + return output + + +def _shift_right_arithmetic_helper(x, y, name=None): + """Performs an integer right arithmetic shift irrespective of input type.""" + assert y.dtype == x.dtype + dtype = x.dtype + unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE + if unsigned: + signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype] + x = math_ops.cast(x, signed_dtype) + y = math_ops.cast(y, signed_dtype) + output = bitwise_ops.right_shift(x, y, name=name) + if unsigned: + output = math_ops.cast(output, dtype) + return output + + +add = _broadcasting_binary_op(math_ops.add) +sub = _broadcasting_binary_op(math_ops.sub) +mul = _broadcasting_binary_op(math_ops.mul) +div = _broadcasting_binary_op(math_ops.div) +rem = _broadcasting_binary_op(gen_math_ops.mod) +max = _broadcasting_binary_op(math_ops.maximum) +min = _broadcasting_binary_op(math_ops.minimum) +atan2 = _broadcasting_binary_op(math_ops.atan2) +complex = _broadcasting_binary_op(math_ops.complex) +logical_and = _broadcasting_binary_op(math_ops.logical_and) +logical_or = _broadcasting_binary_op(math_ops.logical_or) +logical_xor = _broadcasting_binary_op(math_ops.logical_xor) +eq = _broadcasting_binary_op(math_ops.equal) +ne = _broadcasting_binary_op(math_ops.not_equal) +ge = _broadcasting_binary_op(math_ops.greater_equal) +gt = _broadcasting_binary_op(math_ops.greater) +le = _broadcasting_binary_op(math_ops.less_equal) +lt = _broadcasting_binary_op(math_ops.less) +pow = _broadcasting_binary_op(math_ops.pow) +shift_left = _broadcasting_binary_op(bitwise_ops.left_shift) +shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper) +shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper) + +igamma = _broadcasting_binary_op(math_ops.igamma) +igamma_grad_a = _broadcasting_binary_op(gen_math_ops.igamma_grad_a) +random_gamma_grad = _broadcasting_binary_op(gen_random_ops.random_gamma_grad) +igammac = _broadcasting_binary_op(math_ops.igammac) +polygamma = _broadcasting_binary_op(math_ops.polygamma) +zeta = _broadcasting_binary_op(math_ops.zeta) + + +def _binary_op(fn): + """Wrapper that restricts `fn` to have the correct signature.""" + + def binary_op_wrapper(x, y, name=None): + return fn(x, y, name=name) + + return binary_op_wrapper + + +transpose = _binary_op(array_ops.transpose) +rev = _binary_op(array_ops.reverse) + +bitcast_convert_type = array_ops.bitcast + + +def broadcast(x, dims, name=None): + x = ops.convert_to_tensor(x) + shape = array_ops.concat( + [constant_op.constant(dims), array_ops.shape(x)], axis=0 + ) + return array_ops.broadcast_to(x, shape, name=name) + + +def clamp(a, x, b, name=None): + return min(max(a, x, name=name), b, name=name) + + +concatenate = array_ops.concat + + +def conv( + lhs, + rhs, + window_strides, + padding, + lhs_dilation, + rhs_dilation, + dimension_numbers, + feature_group_count=1, + precision_config=None, + preferred_element_type=None, + name=None, + use_v2=False, + batch_group_count=1, +): + """Wraps the XLA ConvGeneralDilated operator. + + ConvGeneralDilated is the most general form of XLA convolution and is + documented at + https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + + Args: + lhs: the input tensor + rhs: the kernel tensor + window_strides: the inter-window strides + padding: the padding to apply at the start and end of each input dimensions + lhs_dilation: dilation to apply between input elements + rhs_dilation: dilation to apply between kernel elements + dimension_numbers: a `ConvolutionDimensionNumbers` proto. + feature_group_count: number of feature groups for grouped convolution. + precision_config: a `xla.PrecisionConfig` proto. + preferred_element_type: the result `dtype`. + name: an optional name for the operator. + use_v2: an optional request to use the XlaConvV2 op even if not necessary. + batch_group_count: number of batch groups or grouped filters. + + Returns: + A tensor representing the output of the convolution. + """ + precision_config_proto = "" + if precision_config: + precision_config_proto = precision_config.SerializeToString() + needs_v2 = ( + preferred_element_type + or (lhs.dtype != rhs.dtype) + or batch_group_count > 1 + ) + if preferred_element_type is None: + preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype) + if needs_v2 or use_v2: + return gen_xla_ops.xla_conv_v2( + lhs, + rhs, + window_strides=window_strides, + padding=padding, + lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + batch_group_count=batch_group_count, + dimension_numbers=dimension_numbers.SerializeToString(), + precision_config=precision_config_proto, + preferred_element_type=preferred_element_type, + name=name, + ) + return gen_xla_ops.xla_conv( + lhs, + rhs, + window_strides=window_strides, + padding=padding, + lhs_dilation=lhs_dilation, + rhs_dilation=rhs_dilation, + feature_group_count=feature_group_count, + dimension_numbers=dimension_numbers.SerializeToString(), + precision_config=precision_config_proto, + name=name, + ) + + +convert_element_type = math_ops.cast + + +def dot(lhs, rhs, name=None): + return math_ops.tensordot(lhs, rhs, axes=1, name=name) + + +DotDimensionNumbers = xla_data_pb2.DotDimensionNumbers +PrecisionConfig = xla_data_pb2.PrecisionConfig + + +def dot_general( + lhs, + rhs, + dimension_numbers, + precision_config=None, + preferred_element_type=None, + name=None, + use_v2=False, +): + precision_config_proto = "" + if precision_config: + precision_config_proto = precision_config.SerializeToString() + needs_v2 = preferred_element_type or (lhs.dtype != rhs.dtype) + if preferred_element_type is None: + preferred_element_type = np_utils.result_type(lhs.dtype, rhs.dtype) + if needs_v2 or use_v2: + return gen_xla_ops.xla_dot_v2( + lhs, + rhs, + dimension_numbers=dimension_numbers.SerializeToString(), + precision_config=precision_config_proto, + preferred_element_type=preferred_element_type, + name=name, + ) + return gen_xla_ops.xla_dot( + lhs, + rhs, + dimension_numbers=dimension_numbers.SerializeToString(), + precision_config=precision_config_proto, + name=name, + ) + + +def self_adjoint_eig(a, lower, max_iter, epsilon): + return gen_xla_ops.xla_self_adjoint_eig(a, lower, max_iter, epsilon) + + +def svd(a, max_iter, epsilon, precision_config=None): + precision_config_proto = "" + if precision_config: + precision_config_proto = precision_config.SerializeToString() + return gen_xla_ops.xla_svd(a, max_iter, epsilon, precision_config_proto) + + +dynamic_slice = gen_xla_ops.xla_dynamic_slice +dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice +einsum = gen_xla_ops.xla_einsum + +# TODO(phawkins): generalize tf.pad to support interior padding, and then remove +# the XLA-specific pad operator. +pad = gen_xla_ops.xla_pad + + +def random_normal(mu, sigma, dims, name=None): + mu = ops.convert_to_tensor(mu) + return random_ops.random_normal( + dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name + ) + + +def random_uniform(minval, maxval, dims, name=None): + minval = ops.convert_to_tensor(minval) + return random_ops.random_uniform( + dims, minval, maxval, dtype=minval.dtype, name=name + ) + + +def rng_bit_generator(algorithm, initial_state, shape, dtype): + """Stateless PRNG bit generator. + + Wraps the XLA RngBitGenerator operator, documented at + https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + + Args: + algorithm: The PRNG algorithm to use, one of tf.random.Algorithm.{PHILOX, + THREEFRY, AUTO_SELECT}. + initial_state: Initial state for the PRNG algorithm. For THREEFRY, it should + be a u64[2] and for PHILOX a u64[3]. + shape: The output shape of the generated data. + dtype: The type of the tensor. + + Returns: + a tuple with a new state and generated data of the given shape. + """ + alg_int = random_ops_util.convert_alg_to_int(algorithm) + return gen_xla_ops.xla_rng_bit_generator( + alg_int, initial_state, shape, dtype=dtype + ) + + +recv = gen_xla_ops.xla_recv +reduce = gen_xla_ops.xla_reduce +variadic_reduce = gen_xla_ops.xla_variadic_reduce_v2 + +ops.no_gradient("XlaVariadicReduce") + + +def reduce_window( + operand, + init, + reducer, + window_dimensions, + window_strides=None, + base_dilations=None, + window_dilations=None, + padding=None, + name=None, +): + """Wraps the XLA ReduceWindow operator. + + ReduceWindow is documented at + https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . + + Args: + operand: the input tensor + init: a scalar tensor representing the initial value for the reduction + reducer: a reduction function that combines a pair of scalars. + window_dimensions: shape of the window, as a list of integers + window_strides: inter-window strides, as a list of integers. Optional; if + omitted, defaults to strides of 1. + padding: padding to apply to 'operand'. List of (low, high) pairs of + integers that specify the padding to apply before and after each + dimension. Optional; if omitted, defaults to no padding. + name: the operator name, or None. + + Returns: + A tensor that represents the output of the reduce_window operator. + """ + window_strides = window_strides or [1] * len(window_dimensions) + base_dilations = base_dilations or [1] * len(window_dimensions) + window_dilations = window_dilations or [1] * len(window_dimensions) + padding = padding or [(0, 0)] * len(window_dimensions) + return gen_xla_ops.xla_reduce_window( + input=operand, + init_value=init, + window_dimensions=window_dimensions, + window_strides=window_strides, + base_dilations=base_dilations, + window_dilations=window_dilations, + padding=padding, + computation=reducer, + name=name, + ) + + +replica_id = gen_xla_ops.xla_replica_id + +# Set a static bound for the given input value as a hint to Xla compiler, +# returns the same value. +# Usage: +# def f(t, p): +# p = xla.set_bound(p, 3) # Tells xla the constraint that p <= 3. +# return t[:p] # xla knows the bound of the slice is 3. +set_bound = gen_xla_ops.xla_set_bound + +# Make a static dimension into a xla bounded dynamic dimension. The current +# static dimension size will become the bound and the second operand becomes the +# dynamic size of the dimension. +# +# This should mostly be used for testing. +# +# def f(): +# array = tf.convert_to_tensor([[1, 2, 3, 4, 5]]) +# # Tells xla the valid size of the array is 3. +# dim = 0 +# p = xla_set_dynamic_dimension_size(array, dim, 3) +# assert(reduce_sum(p) == 6) # xla knows only the first 3 elements are valid. +set_dynamic_dimension_size = gen_xla_ops.xla_set_dynamic_dimension_size + +# Inverse of xla_set_dynamic_dimension_size. Make an xla bounded dynamic +# dimension into a static dimension. The bound of the size of dimension +# `dim_index` becomes the static dimension size. +remove_dynamic_dimension_size = gen_xla_ops.xla_remove_dynamic_dimension_size + + +def reshape(x, new_sizes, dimensions=None, name=None): + if dimensions is not None: + x = array_ops.transpose(x, dimensions) + x = array_ops.reshape(x, new_sizes, name=name) + return x + + +def select(condition, x, y, name=None): + return array_ops.where(condition, x, y, name) + + +select_and_scatter = gen_xla_ops.xla_select_and_scatter +send = gen_xla_ops.xla_send + + +def slice(x, start_dims, limit_dims, strides): + spec = [ + _slice(start, limit, stride) + for (start, limit, stride) in zip(start_dims, limit_dims, strides) + ] + return x[tuple(spec)] + + +sharding = gen_xla_ops.xla_sharding + + +@ops.RegisterGradient("XlaSharding") +def _sharding_grad(op, grad): + """Gradient for XlaSharding op.""" + sharding_attr = op.get_attr("sharding") + grad_sharding = gen_xla_ops.xla_sharding( + grad, + sharding=sharding_attr, + unspecified_dims=op.get_attr("unspecified_dims"), + ) + # pylint: disable=protected-access + grad_sharding.op._set_attr( + "_XlaSharding", attr_value_pb2.AttrValue(s=sharding_attr) + ) + return [grad_sharding] + + +spmd_full_to_shard_shape = gen_xla_ops.xla_spmd_full_to_shard_shape +spmd_shard_to_full_shape = gen_xla_ops.xla_spmd_shard_to_full_shape + + +@ops.RegisterGradient("XlaSpmdFullToShardShape") +def _spmd_full_to_shard_shape_grad(op, grad): + s2f = gen_xla_ops.xla_spmd_shard_to_full_shape( + grad, + manual_sharding=op.get_attr("manual_sharding"), + full_shape=op.inputs[0].shape.as_list(), + dim=op.get_attr("dim"), + unspecified_dims=op.get_attr("unspecified_dims"), + ) + return [s2f] + + +@ops.RegisterGradient("XlaSpmdShardToFullShape") +def _spmd_shard_to_full_shape_grad(op, grad): + f2s = gen_xla_ops.xla_spmd_full_to_shard_shape( + grad, + manual_sharding=op.get_attr("manual_sharding"), + dim=op.get_attr("dim"), + unspecified_dims=op.get_attr("unspecified_dims"), + ) + return [f2s] + + +sort = gen_xla_ops.xla_sort +key_value_sort = gen_xla_ops.xla_key_value_sort +variadic_sort = gen_xla_ops.xla_variadic_sort +while_loop = gen_xla_ops.xla_while +dequantize = gen_xla_ops.xla_dequantize +custom_call = gen_xla_ops.xla_custom_call + + +def custom_call_v2( + call_target_name, + operands, + result_specs, + backend_config=None, + has_side_effect=None, + name=None, +): + """Emits an HLO `CustomCall` operation with multiple outputs. + + See `CustomCall` specification at + https://tensorflow.org/xla/operation_semantics#customcall, + and `mhlo.custom_call` specification at + https://tensorflow.org/mlir/hlo_ops#mhlocustom_call_mlirmhlocustomcallop. + + Args: + call_target_name: Name of the user function. The function signature must + conform to version 3 of the API, see + `API_VERSION_STATUS_RETURNING_UNIFIED`. All operands and results assumed + to be in the default layout. + operands: A sequence of tensors with possibly different types. + result_specs: A sequence of tensor specs for all results. + backend_config: A string that encodes a metadata for the backend. Empty + string by default. + has_side_effect: Indicates whether the custom call has side effects. `False` + by default. + name: Optional name of the operation. + + Returns: + A tuple of output tensors. + """ + return gen_xla_ops.xla_custom_call_v2( + operands=operands, + call_target_name=call_target_name, + backend_config="" if backend_config is None else backend_config, + has_side_effect=False if has_side_effect is None else has_side_effect, + result_dtypes=tuple(spec.dtype for spec in result_specs), + result_shapes=tuple(spec.shape for spec in result_specs), + name=name, + ) + + +# pylint: disable=g-doc-args +# pylint: disable=g-doc-return-or-yield +def call_module( + args, + *, + version=4, + module, + Tout, + Sout, + platforms=(), + function_list=(), + has_token_input_output=False, + disabled_checks=(), +): + """See documentation for the XlaCallModule op. + + https://github.com/search?q=repo%3Atensorflow%2Ftensorflow+path%3Axla_ops.cc+xlacallmodule&type=code + """ + res = gen_xla_ops.xla_call_module( + args, + version=version, + module=module, + dim_args_spec=(), + Tout=Tout, + Sout=Sout, + platforms=platforms, + function_list=function_list, + has_token_input_output=has_token_input_output, + disabled_checks=disabled_checks, + ) + # Since XLACallModule op is stateful, zero return function will return the TF + # op under tf.function. It creates trouble for downstream codes. + # Here we force it return empty tuple to work around it. + # TODO(johnqiangzhang): Figure out a better way to handle control dependency. + if isinstance(res, ops.Operation): + res = () + return res + + +def call_module_maximum_supported_version(): + """Maximum version of XlaCallModule op supported. + + See versioning details documentation for the XlaCallModule op at: + https://github.com/search?q=repo%3Atensorflow%2Ftensorflow+path%3Axla_call_module+%22int+VERSION_MAXIMUM_SUPPORTED%22&type=code + """ + return 9 + +# pylint: enable=g-doc-args +# pylint: enable=g-doc-return-or-yield + + +def call_module_disable_check_platform(): + # For use with xla_call_module.disabled_checks. + return "platform" + + +def gather( + operand, + start_indices, + dimension_numbers, + slice_sizes, + indices_are_sorted=False, + name=None, +): + return gen_xla_ops.xla_gather( + operand, + start_indices, + slice_sizes=slice_sizes, + dimension_numbers=dimension_numbers.SerializeToString(), + indices_are_sorted=indices_are_sorted, + name=name, + ) + + +def scatter( + operand, + scatter_indices, + updates, + update_computation, + dimension_numbers, + indices_are_sorted=False, + name=None, +): + return gen_xla_ops.xla_scatter( + operand, + scatter_indices, + updates, + update_computation=update_computation, + dimension_numbers=dimension_numbers.SerializeToString(), + indices_are_sorted=indices_are_sorted, + name=name, + ) + + +def optimization_barrier(*args): + return gen_xla_ops.xla_optimization_barrier(args) + + +def reduce_precision(operand, exponent_bits, mantissa_bits): + return gen_xla_ops.xla_reduce_precision(operand, exponent_bits, mantissa_bits) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/tf2xla_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/tf2xla_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..5be7d74bd5adc6a11330569cf0a6e5f4c0b3a5af --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/tf2xla/tf2xla_pb2.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/compiler/tf2xla/tf2xla.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2 +from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'tensorflow/compiler/tf2xla/tf2xla.proto\x12\x11tensorflow.tf2xla\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"3\n\x08TensorId\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x14\n\x0coutput_index\x18\x02 \x01(\x03\"\x8e\x01\n\x04\x46\x65\x65\x64\x12\'\n\x02id\x18\x01 \x01(\x0b\x32\x1b.tensorflow.tf2xla.TensorId\x12+\n\x05shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\"\n\x04type\x18\x04 \x01(\x0e\x32\x14.tensorflow.DataType\"\x8f\x01\n\x05\x46\x65tch\x12\'\n\x02id\x18\x01 \x01(\x0b\x32\x1b.tensorflow.tf2xla.TensorId\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05shape\x18\x03 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\"\n\x04type\x18\x04 \x01(\x0e\x32\x14.tensorflow.DataType\"\x8e\x01\n\x08Variable\x12\x11\n\tnode_name\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12+\n\x05shape\x18\x03 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\"\n\x04type\x18\x04 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x10\n\x08readonly\x18\x05 \x01(\x08\"\x87\x01\n\x06\x43onfig\x12%\n\x04\x66\x65\x65\x64\x18\x01 \x03(\x0b\x32\x17.tensorflow.tf2xla.Feed\x12\'\n\x05\x66\x65tch\x18\x02 \x03(\x0b\x32\x18.tensorflow.tf2xla.Fetch\x12-\n\x08variable\x18\x03 \x03(\x0b\x32\x1b.tensorflow.tf2xla.VariableB*\n\x15org.tensorflow.tf2xlaB\x0cTf2XlaProtosP\x01\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.compiler.tf2xla.tf2xla_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025org.tensorflow.tf2xlaB\014Tf2XlaProtosP\001\370\001\001' + _TENSORID._serialized_start=147 + _TENSORID._serialized_end=198 + _FEED._serialized_start=201 + _FEED._serialized_end=343 + _FETCH._serialized_start=346 + _FETCH._serialized_end=489 + _VARIABLE._serialized_start=492 + _VARIABLE._serialized_end=634 + _CONFIG._serialized_start=637 + _CONFIG._serialized_end=772 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4371c682a072ec22b7156af86887ef5c642d2464 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/xla_data_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/xla_data_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..049100c0d6c95bdb5738ae68390eceb742861d08 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/__pycache__/xla_data_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23d5312bcf8e5de4ae76025b5d2152898693232c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/hlo_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/hlo_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5f78e509896badcb7d5484efa8bcc7753ef9310 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/__pycache__/hlo_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/hlo_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/hlo_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..56fbab59735c1079d63ce622562b62a13e47de69 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/service/hlo_pb2.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xla/service/hlo.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 +from tensorflow.compiler.xla import xla_data_pb2 as xla_dot_xla__data__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15xla/service/hlo.proto\x12\x03xla\x1a\x19google/protobuf/any.proto\x1a\x12xla/xla_data.proto\"\xc5\x16\n\x13HloInstructionProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06opcode\x18\x02 \x01(\t\x12\x1e\n\x05shape\x18\x03 \x01(\x0b\x32\x0f.xla.ShapeProto\x12!\n\x08metadata\x18\x07 \x01(\x0b\x32\x0f.xla.OpMetadata\x12\"\n\x07literal\x18\x08 \x01(\x0b\x32\x11.xla.LiteralProto\x12\x18\n\x10parameter_number\x18\t \x01(\x03\x12\x13\n\x0b\x66usion_kind\x18\x0b \x01(\t\x12\x13\n\x0btuple_index\x18\r \x01(\x03\x12\x12\n\ndimensions\x18\x0e \x03(\x03\x12\x1b\n\x06window\x18\x0f \x01(\x0b\x32\x0b.xla.Window\x12G\n\x1d\x63onvolution_dimension_numbers\x18\x10 \x01(\x0b\x32 .xla.ConvolutionDimensionNumbers\x12\x1b\n\x13\x66\x65\x61ture_group_count\x18\x32 \x01(\x03\x12\x19\n\x11\x62\x61tch_group_count\x18: \x01(\x03\x12\x42\n\x10slice_dimensions\x18\x11 \x03(\x0b\x32(.xla.HloInstructionProto.SliceDimensions\x12\x15\n\rexponent_bits\x18\x12 \x01(\x05\x12\x15\n\rmantissa_bits\x18\x13 \x01(\x05\x12\x1b\n\x13\x64ynamic_slice_sizes\x18\x14 \x03(\x03\x12*\n\x0epadding_config\x18\x15 \x01(\x0b\x32\x12.xla.PaddingConfig\x12\x16\n\x0eoutfeed_config\x18\x16 \x01(\x0c\x12-\n\x0c\x64istribution\x18\x17 \x01(\x0e\x32\x17.xla.RandomDistribution\x12\x0f\n\x07\x65psilon\x18\x18 \x01(\x02\x12\x15\n\rfeature_index\x18\x19 \x01(\x03\x12\x12\n\nchannel_id\x18\x1a \x01(\x03\x12\x15\n\rinfeed_config\x18\x1b \x01(\x0c\x12\x1a\n\x12\x63ustom_call_target\x18\x1c \x01(\t\x12&\n\routfeed_shape\x18\x1d \x01(\x0b\x32\x0f.xla.ShapeProto\x12\x37\n\x15\x64ot_dimension_numbers\x18\x1e \x01(\x0b\x32\x18.xla.DotDimensionNumbers\x12\x1e\n\x08\x66\x66t_type\x18\x1f \x01(\x0e\x32\x0c.xla.FftType\x12\x12\n\nfft_length\x18 \x03(\x03\x12\x1c\n\x14\x63omparison_direction\x18? \x01(\t\x12=\n\x18gather_dimension_numbers\x18! \x01(\x0b\x32\x1b.xla.GatherDimensionNumbers\x12\x1a\n\x12gather_slice_sizes\x18\" \x03(\x03\x12\n\n\x02id\x18# \x01(\x03\x12\x13\n\x0boperand_ids\x18$ \x03(\x03\x12\x1f\n\x17\x63ontrol_predecessor_ids\x18% \x03(\x03\x12\x1e\n\x16\x63\x61lled_computation_ids\x18& \x03(\x03\x12!\n\x08sharding\x18( \x01(\x0b\x32\x0f.xla.OpSharding\x12\x16\n\x0e\x62\x61\x63kend_config\x18+ \x01(\x0c\x12)\n\x0ereplica_groups\x18\x31 \x03(\x0b\x32\x11.xla.ReplicaGroup\x12\x19\n\rall_reduce_id\x18- \x01(\x03\x42\x02\x18\x01\x12\x1d\n\x15use_global_device_ids\x18G \x01(\x08\x12\x18\n\x10is_host_transfer\x18/ \x01(\x08\x12\x11\n\tis_stable\x18< \x01(\x08\x12?\n\x19scatter_dimension_numbers\x18\x30 \x01(\x0b\x32\x1c.xla.ScatterDimensionNumbers\x12.\n\x10precision_config\x18\x33 \x01(\x0b\x32\x14.xla.PrecisionConfig\x12.\n\x13source_target_pairs\x18\x34 \x03(\x0b\x32\x11.xla.SourceTarget\x12.\n\x15\x64omain_entry_sharding\x18\x36 \x01(\x0b\x32\x0f.xla.OpSharding\x12-\n\x14\x64omain_exit_sharding\x18\x37 \x01(\x0b\x32\x0f.xla.OpSharding\x12\x18\n\x10\x63onstrain_layout\x18\x38 \x01(\x08\x12\x33\n\x1aoperand_shapes_with_layout\x18\x39 \x03(\x0b\x32\x0f.xla.ShapeProto\x12=\n\x18triangular_solve_options\x18; \x01(\x0b\x32\x1b.xla.TriangularSolveOptions\x12.\n\x10\x63holesky_options\x18> \x01(\x0b\x32\x14.xla.CholeskyOptions\x12\x38\n\x15parameter_replication\x18= \x01(\x0b\x32\x19.xla.ParameterReplication\x12#\n\x1b\x63ustom_call_has_side_effect\x18\x41 \x01(\x08\x12;\n\x17output_operand_aliasing\x18J \x03(\x0b\x32\x1a.xla.OutputOperandAliasing\x12\x35\n\x14\x63ustom_call_schedule\x18L \x01(\x0e\x32\x17.xla.CustomCallSchedule\x12\r\n\x05\x64\x65lta\x18\x42 \x01(\x03\x12\x1a\n\x12indices_are_sorted\x18\x43 \x01(\x08\x12\x34\n\x13\x66rontend_attributes\x18\x44 \x01(\x0b\x32\x17.xla.FrontendAttributes\x12\x16\n\x0eunique_indices\x18\x45 \x01(\x08\x12+\n\rrng_algorithm\x18\x46 \x01(\x0e\x32\x14.xla.RandomAlgorithm\x12\x17\n\x0f\x63omparison_type\x18H \x01(\t\x12%\n\x19is_cross_program_prefetch\x18I \x01(\x08\x42\x02\x18\x01\x12&\n\x1c\x63ross_program_prefetch_index\x18P \x01(\x05H\x00\x12&\n\x0cpadding_type\x18K \x01(\x0e\x32\x10.xla.PaddingType\x12:\n\x17\x63ustom_call_api_version\x18M \x01(\x0e\x32\x19.xla.CustomCallApiVersion\x12\x1e\n\x16\x61sync_execution_thread\x18O \x01(\t\x12\t\n\x01k\x18Q \x01(\x03\x12\x0f\n\x07largest\x18U \x01(\x08\x12*\n\x0estatistics_viz\x18R \x01(\x0b\x32\x12.xla.StatisticsViz\x12-\n\x0c\x64ot_sparsity\x18V \x01(\x0b\x32\x17.xla.SparsityDescriptor\x1a?\n\x0fSliceDimensions\x12\r\n\x05start\x18\x01 \x01(\x03\x12\r\n\x05limit\x18\x02 \x01(\x03\x12\x0e\n\x06stride\x18\x03 \x01(\x03\x42\'\n%optional_cross_program_prefetch_indexJ\x04\x08\n\x10\x0bJ\x04\x08\x0c\x10\rJ\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07J\x04\x08,\x10-J\x04\x08\x35\x10\x36J\x04\x08.\x10/J\x04\x08)\x10*J\x04\x08*\x10+J\x04\x08@\x10\x41J\x04\x08N\x10OJ\x04\x08S\x10TJ\x04\x08T\x10UR\x0eparameter_nameR\x1e\x66used_instructions_computationR\roperand_namesR\x19\x63ontrol_predecessor_namesR\x18\x63\x61lled_computation_namesR\x11replica_group_idsR\x12\x63ustom_call_opaqueR\x12\x61ll_reduce_barrier\"\xe9\x01\n\x13HloComputationProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\x0cinstructions\x18\x02 \x03(\x0b\x32\x18.xla.HloInstructionProto\x12-\n\rprogram_shape\x18\x04 \x01(\x0b\x32\x16.xla.ProgramShapeProto\x12\n\n\x02id\x18\x05 \x01(\x03\x12\x0f\n\x07root_id\x18\x06 \x01(\x03\x12\x1d\n\x15is_fusion_computation\x18\x07 \x01(\x08\x12\x18\n\x10\x65xecution_thread\x18\x08 \x01(\tJ\x04\x08\x03\x10\x04R\troot_name\"\xd8\x01\n\x10HloScheduleProto\x12\x37\n\tsequences\x18\x01 \x03(\x0b\x32$.xla.HloScheduleProto.SequencesEntry\x1a.\n\x13InstructionSequence\x12\x17\n\x0finstruction_ids\x18\x01 \x03(\x03\x1a[\n\x0eSequencesEntry\x12\x0b\n\x03key\x18\x01 \x01(\x03\x12\x38\n\x05value\x18\x02 \x01(\x0b\x32).xla.HloScheduleProto.InstructionSequence:\x02\x38\x01\"\xdb\x01\n\x18HloInputOutputAliasProto\x12>\n\x07\x65ntries\x18\x01 \x03(\x0b\x32-.xla.HloInputOutputAliasProto.AliasEntryProto\x1a\x7f\n\x0f\x41liasEntryProto\x12\x1a\n\x12output_shape_index\x18\x01 \x03(\x03\x12\x18\n\x10parameter_number\x18\x02 \x01(\x03\x12\x1d\n\x15parameter_shape_index\x18\x03 \x03(\x03\x12\x17\n\x04kind\x18\x04 \x01(\x0e\x32\t.xla.Kind\"\xa8\x01\n\x13HloBufferDonorProto\x12?\n\x07\x65ntries\x18\x01 \x03(\x0b\x32..xla.HloBufferDonorProto.BufferDonorEntryProto\x1aP\n\x15\x42ufferDonorEntryProto\x12\x18\n\x10parameter_number\x18\x01 \x01(\x03\x12\x1d\n\x15parameter_shape_index\x18\x02 \x03(\x03\"H\n\x14\x43rossProgramPrefetch\x12\x11\n\tparameter\x18\x01 \x01(\x03\x12\r\n\x05index\x18\x02 \x03(\x03\x12\x0e\n\x06offset\x18\x03 \x01(\x03\"\xdd\x02\n\x14StackFrameIndexProto\x12\x12\n\nfile_names\x18\x01 \x03(\t\x12\x16\n\x0e\x66unction_names\x18\x02 \x03(\t\x12>\n\x0e\x66ile_locations\x18\x03 \x03(\x0b\x32&.xla.StackFrameIndexProto.FileLocation\x12:\n\x0cstack_frames\x18\x04 \x03(\x0b\x32$.xla.StackFrameIndexProto.StackFrame\x1a\\\n\x0c\x46ileLocation\x12\x14\n\x0c\x66ile_name_id\x18\x01 \x01(\x05\x12\x18\n\x10\x66unction_name_id\x18\x02 \x01(\x05\x12\x0c\n\x04line\x18\x03 \x01(\x05\x12\x0e\n\x06\x63olumn\x18\x04 \x01(\x05\x1a?\n\nStackFrame\x12\x18\n\x10\x66ile_location_id\x18\x01 \x01(\x05\x12\x17\n\x0fparent_frame_id\x18\x02 \x01(\x05\"\xce\x08\n\x0eHloModuleProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1e\n\x16\x65ntry_computation_name\x18\x02 \x01(\t\x12\x1c\n\x14\x65ntry_computation_id\x18\x06 \x01(\x03\x12.\n\x0c\x63omputations\x18\x03 \x03(\x0b\x32\x18.xla.HloComputationProto\x12\x32\n\x12host_program_shape\x18\x04 \x01(\x0b\x32\x16.xla.ProgramShapeProto\x12\n\n\x02id\x18\x05 \x01(\x03\x12\'\n\x08schedule\x18\x07 \x01(\x0b\x32\x15.xla.HloScheduleProto\x12\x39\n\x12input_output_alias\x18\x08 \x01(\x0b\x32\x1d.xla.HloInputOutputAliasProto\x12.\n\x0c\x62uffer_donor\x18\x12 \x01(\x0b\x32\x18.xla.HloBufferDonorProto\x12;\n\x18\x63ross_program_prefetches\x18\n \x03(\x0b\x32\x19.xla.CrossProgramPrefetch\x12\x12\n\nis_dynamic\x18\x0b \x01(\x08\x12-\n\x14spmd_output_sharding\x18\x0c \x01(\x0b\x32\x0f.xla.OpSharding\x12\x32\n\x19spmd_parameters_shardings\x18\x0e \x03(\x0b\x32\x0f.xla.OpSharding\x12\"\n\x1ause_auto_spmd_partitioning\x18\x10 \x01(\x08\x12\x35\n\x0cprofile_info\x18\r \x03(\x0b\x32\x1f.xla.HloModuleProto.ProfileInfo\x12\x35\n\x11\x64\x65vice_assignment\x18\x0f \x01(\x0b\x32\x1a.xla.DeviceAssignmentProto\x12\x34\n\x11stack_frame_index\x18\x11 \x01(\x0b\x32\x19.xla.StackFrameIndexProto\x12\x34\n\x13\x66rontend_attributes\x18\x13 \x01(\x0b\x32\x17.xla.FrontendAttributes\x1a\xd1\x01\n\x0bProfileInfo\x12\x35\n\x0cprofile_type\x18\x01 \x01(\x0e\x32\x1f.xla.HloModuleProto.ProfileType\x12\x18\n\x10relative_speedup\x18\x02 \x01(\x01\x12*\n\x0eprofile_source\x18\x03 \x01(\x0e\x32\x12.xla.ProfileSource\x12\x30\n\x11\x63ompilation_event\x18\x04 \x01(\x0e\x32\x15.xla.CompilationEvent\x12\x13\n\x0b\x66ingerprint\x18\x05 \x01(\t\"E\n\x0bProfileType\x12\x0b\n\x07INVALID\x10\x00\x12\x08\n\x04\x46LAG\x10\x01\x12\n\n\x06\x46USION\x10\x02\x12\n\n\x06LAYOUT\x10\x03\x12\x07\n\x03\x44OT\x10\x04J\x04\x08\t\x10\nR\x19\x64ynamic_parameter_binding\"\xd0\x01\n\x12LogicalBufferProto\x12\n\n\x02id\x18\x01 \x01(\x03\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x34\n\ndefined_at\x18\x03 \x01(\x0b\x32 .xla.LogicalBufferProto.Location\x12\r\n\x05\x63olor\x18\x04 \x01(\x03\x1a[\n\x08Location\x12\x1c\n\x10instruction_name\x18\x02 \x01(\tB\x02\x18\x01\x12\x16\n\x0einstruction_id\x18\x04 \x01(\x03\x12\x13\n\x0bshape_index\x18\x03 \x03(\x03J\x04\x08\x01\x10\x02\"\xf8\x02\n\x15\x42ufferAllocationProto\x12\r\n\x05index\x18\x01 \x01(\x03\x12\x0c\n\x04size\x18\x02 \x01(\x03\x12\x17\n\x0fis_thread_local\x18\x03 \x01(\x08\x12\x10\n\x08is_tuple\x18\x0b \x01(\x08\x12&\n\x1eis_entry_computation_parameter\x18\x05 \x01(\x08\x12\x13\n\x0bis_constant\x18\x0c \x01(\x08\x12\x18\n\x10parameter_number\x18\x06 \x01(\x03\x12\x1d\n\x15parameter_shape_index\x18\n \x03(\x03\x12\x16\n\x0emaybe_live_out\x18\x07 \x01(\x08\x12\r\n\x05\x63olor\x18\x08 \x01(\x03\x12\x35\n\x08\x61ssigned\x18\t \x03(\x0b\x32#.xla.BufferAllocationProto.Assigned\x1a\x43\n\x08\x41ssigned\x12\x19\n\x11logical_buffer_id\x18\x01 \x01(\x03\x12\x0e\n\x06offset\x18\x02 \x01(\x03\x12\x0c\n\x04size\x18\x03 \x01(\x03\"\xd6\x02\n\x12HeapSimulatorTrace\x12-\n\x06\x65vents\x18\x01 \x03(\x0b\x32\x1d.xla.HeapSimulatorTrace.Event\x12\x1f\n\x17whole_module_simulation\x18\x02 \x01(\x08\x12\x1f\n\x17\x62uffer_allocation_index\x18\x03 \x01(\x03\x1a\xce\x01\n\x05\x45vent\x12\x30\n\x04kind\x18\x01 \x01(\x0e\x32\".xla.HeapSimulatorTrace.Event.Kind\x12\x11\n\tbuffer_id\x18\x02 \x01(\x03\x12\x18\n\x10\x63omputation_name\x18\x03 \x01(\t\x12\x18\n\x10instruction_name\x18\x04 \x01(\t\x12\x1f\n\x17share_with_canonical_id\x18\x05 \x01(\x03\"+\n\x04Kind\x12\t\n\x05\x41LLOC\x10\x00\x12\x08\n\x04\x46REE\x10\x01\x12\x0e\n\nSHARE_WITH\x10\x02\"M\n\x13HloModuleGroupProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x0bhlo_modules\x18\x02 \x03(\x0b\x32\x13.xla.HloModuleProto\"\xd6\x02\n\x15\x42ufferAssignmentProto\x12\x30\n\x0flogical_buffers\x18\x01 \x03(\x0b\x32\x17.xla.LogicalBufferProto\x12>\n\x0e\x62uffer_aliases\x18\x02 \x03(\x0b\x32&.xla.BufferAssignmentProto.BufferAlias\x12\x36\n\x12\x62uffer_allocations\x18\x03 \x03(\x0b\x32\x1a.xla.BufferAllocationProto\x12\x36\n\x15heap_simulator_traces\x18\x04 \x03(\x0b\x32\x17.xla.HeapSimulatorTrace\x1a[\n\x0b\x42ufferAlias\x12\x18\n\x10source_buffer_id\x18\x01 \x01(\x03\x12\x32\n\x08location\x18\x02 \x01(\x0b\x32 .xla.LogicalBufferProto.Location\"~\n\x08HloProto\x12\'\n\nhlo_module\x18\x01 \x01(\x0b\x32\x13.xla.HloModuleProto\x12\x35\n\x11\x62uffer_assignment\x18\x03 \x01(\x0b\x32\x1a.xla.BufferAssignmentProtoJ\x04\x08\x02\x10\x03R\x0chlo_ordering\"\x8e\x01\n\x0bHloSnapshot\x12\x1a\n\x03hlo\x18\x01 \x01(\x0b\x32\r.xla.HloProto\x12$\n\targuments\x18\x02 \x03(\x0b\x32\x11.xla.LiteralProto\x12!\n\x06result\x18\x03 \x01(\x0b\x32\x11.xla.LiteralProto\x12\x1a\n\x12\x65xecution_platform\x18\x04 \x01(\t\"\xb9\x01\n\x16HloModuleMetadataProto\x12\x1b\n\x13\x63\x61nonical_module_id\x18\x01 \x01(\x03\x12\x19\n\x11module_group_name\x18\x02 \x01(\t\x12\x1a\n\x12original_module_id\x18\x03 \x01(\x03\x12\x1e\n\x16partitioned_module_ids\x18\x04 \x03(\x03\x12+\n\rpass_metadata\x18\x05 \x03(\x0b\x32\x14.xla.HloPassMetadata\"\x99\x02\n\x0fHloPassMetadata\x12\x0f\n\x07pass_id\x18\x01 \x01(\x03\x12\x11\n\tpass_name\x18\x02 \x01(\t\x12\x15\n\rpipeline_name\x18\x03 \x01(\t\x12\x16\n\x0e\x64ump_filenames\x18\x04 \x03(\t\x12\x16\n\x0emodule_changed\x18\x05 \x01(\x08\x12\x11\n\tmodule_id\x18\x06 \x01(\x03\x12\x1f\n\x17module_group_module_ids\x18\x07 \x03(\x03\x12\x1c\n\x14start_timestamp_usec\x18\x08 \x01(\x03\x12\x1a\n\x12\x65nd_timestamp_usec\x18\t \x01(\x03\x12-\n\x0f\x63ustom_metadata\x18\n \x01(\x0b\x32\x14.google.protobuf.Any\"q\n\x19XlaRuntimeExecutableProto\x12-\n\x10hlo_module_proto\x18\x01 \x01(\x0b\x32\x13.xla.HloModuleProto\x12\x10\n\x08obj_file\x18\x03 \x01(\x0c\x12\x13\n\x0bmlir_module\x18\x04 \x01(\t*S\n\x12\x43ustomCallSchedule\x12\x11\n\rSCHEDULE_NONE\x10\x00\x12\x13\n\x0fSCHEDULE_LATEST\x10\x01\x12\x15\n\x11SCHEDULE_EARLIEST\x10\x02*\xb4\x01\n\x14\x43ustomCallApiVersion\x12\x1b\n\x17\x41PI_VERSION_UNSPECIFIED\x10\x00\x12\x18\n\x14\x41PI_VERSION_ORIGINAL\x10\x01\x12 \n\x1c\x41PI_VERSION_STATUS_RETURNING\x10\x02\x12(\n$API_VERSION_STATUS_RETURNING_UNIFIED\x10\x03\x12\x19\n\x15\x41PI_VERSION_TYPED_FFI\x10\x04*:\n\x04Kind\x12\x13\n\x0fUNDEFINED_ALIAS\x10\x00\x12\r\n\tMAY_ALIAS\x10\x01\x12\x0e\n\nMUST_ALIAS\x10\x02\x42\x03\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'xla.service.hlo_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\370\001\001' + _HLOINSTRUCTIONPROTO.fields_by_name['all_reduce_id']._options = None + _HLOINSTRUCTIONPROTO.fields_by_name['all_reduce_id']._serialized_options = b'\030\001' + _HLOINSTRUCTIONPROTO.fields_by_name['is_cross_program_prefetch']._options = None + _HLOINSTRUCTIONPROTO.fields_by_name['is_cross_program_prefetch']._serialized_options = b'\030\001' + _HLOSCHEDULEPROTO_SEQUENCESENTRY._options = None + _HLOSCHEDULEPROTO_SEQUENCESENTRY._serialized_options = b'8\001' + _LOGICALBUFFERPROTO_LOCATION.fields_by_name['instruction_name']._options = None + _LOGICALBUFFERPROTO_LOCATION.fields_by_name['instruction_name']._serialized_options = b'\030\001' + _CUSTOMCALLSCHEDULE._serialized_start=7563 + _CUSTOMCALLSCHEDULE._serialized_end=7646 + _CUSTOMCALLAPIVERSION._serialized_start=7649 + _CUSTOMCALLAPIVERSION._serialized_end=7829 + _KIND._serialized_start=7831 + _KIND._serialized_end=7889 + _HLOINSTRUCTIONPROTO._serialized_start=78 + _HLOINSTRUCTIONPROTO._serialized_end=2963 + _HLOINSTRUCTIONPROTO_SLICEDIMENSIONS._serialized_start=2600 + _HLOINSTRUCTIONPROTO_SLICEDIMENSIONS._serialized_end=2663 + _HLOCOMPUTATIONPROTO._serialized_start=2966 + _HLOCOMPUTATIONPROTO._serialized_end=3199 + _HLOSCHEDULEPROTO._serialized_start=3202 + _HLOSCHEDULEPROTO._serialized_end=3418 + _HLOSCHEDULEPROTO_INSTRUCTIONSEQUENCE._serialized_start=3279 + _HLOSCHEDULEPROTO_INSTRUCTIONSEQUENCE._serialized_end=3325 + _HLOSCHEDULEPROTO_SEQUENCESENTRY._serialized_start=3327 + _HLOSCHEDULEPROTO_SEQUENCESENTRY._serialized_end=3418 + _HLOINPUTOUTPUTALIASPROTO._serialized_start=3421 + _HLOINPUTOUTPUTALIASPROTO._serialized_end=3640 + _HLOINPUTOUTPUTALIASPROTO_ALIASENTRYPROTO._serialized_start=3513 + _HLOINPUTOUTPUTALIASPROTO_ALIASENTRYPROTO._serialized_end=3640 + _HLOBUFFERDONORPROTO._serialized_start=3643 + _HLOBUFFERDONORPROTO._serialized_end=3811 + _HLOBUFFERDONORPROTO_BUFFERDONORENTRYPROTO._serialized_start=3731 + _HLOBUFFERDONORPROTO_BUFFERDONORENTRYPROTO._serialized_end=3811 + _CROSSPROGRAMPREFETCH._serialized_start=3813 + _CROSSPROGRAMPREFETCH._serialized_end=3885 + _STACKFRAMEINDEXPROTO._serialized_start=3888 + _STACKFRAMEINDEXPROTO._serialized_end=4237 + _STACKFRAMEINDEXPROTO_FILELOCATION._serialized_start=4080 + _STACKFRAMEINDEXPROTO_FILELOCATION._serialized_end=4172 + _STACKFRAMEINDEXPROTO_STACKFRAME._serialized_start=4174 + _STACKFRAMEINDEXPROTO_STACKFRAME._serialized_end=4237 + _HLOMODULEPROTO._serialized_start=4240 + _HLOMODULEPROTO._serialized_end=5342 + _HLOMODULEPROTO_PROFILEINFO._serialized_start=5029 + _HLOMODULEPROTO_PROFILEINFO._serialized_end=5238 + _HLOMODULEPROTO_PROFILETYPE._serialized_start=5240 + _HLOMODULEPROTO_PROFILETYPE._serialized_end=5309 + _LOGICALBUFFERPROTO._serialized_start=5345 + _LOGICALBUFFERPROTO._serialized_end=5553 + _LOGICALBUFFERPROTO_LOCATION._serialized_start=5462 + _LOGICALBUFFERPROTO_LOCATION._serialized_end=5553 + _BUFFERALLOCATIONPROTO._serialized_start=5556 + _BUFFERALLOCATIONPROTO._serialized_end=5932 + _BUFFERALLOCATIONPROTO_ASSIGNED._serialized_start=5865 + _BUFFERALLOCATIONPROTO_ASSIGNED._serialized_end=5932 + _HEAPSIMULATORTRACE._serialized_start=5935 + _HEAPSIMULATORTRACE._serialized_end=6277 + _HEAPSIMULATORTRACE_EVENT._serialized_start=6071 + _HEAPSIMULATORTRACE_EVENT._serialized_end=6277 + _HEAPSIMULATORTRACE_EVENT_KIND._serialized_start=6234 + _HEAPSIMULATORTRACE_EVENT_KIND._serialized_end=6277 + _HLOMODULEGROUPPROTO._serialized_start=6279 + _HLOMODULEGROUPPROTO._serialized_end=6356 + _BUFFERASSIGNMENTPROTO._serialized_start=6359 + _BUFFERASSIGNMENTPROTO._serialized_end=6701 + _BUFFERASSIGNMENTPROTO_BUFFERALIAS._serialized_start=6610 + _BUFFERASSIGNMENTPROTO_BUFFERALIAS._serialized_end=6701 + _HLOPROTO._serialized_start=6703 + _HLOPROTO._serialized_end=6829 + _HLOSNAPSHOT._serialized_start=6832 + _HLOSNAPSHOT._serialized_end=6974 + _HLOMODULEMETADATAPROTO._serialized_start=6977 + _HLOMODULEMETADATAPROTO._serialized_end=7162 + _HLOPASSMETADATA._serialized_start=7165 + _HLOPASSMETADATA._serialized_end=7446 + _XLARUNTIMEEXECUTABLEPROTO._serialized_start=7448 + _XLARUNTIMEEXECUTABLEPROTO._serialized_end=7561 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/xla_data_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/xla_data_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..9eafb5fb04f4b3cbae8b2c91b4cf9c577aa62c08 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/compiler/xla/xla_data_pb2.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: xla/xla_data.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12xla/xla_data.proto\x12\x03xla\"\xb7\x01\n\rPaddingConfig\x12=\n\ndimensions\x18\x01 \x03(\x0b\x32).xla.PaddingConfig.PaddingConfigDimension\x1ag\n\x16PaddingConfigDimension\x12\x18\n\x10\x65\x64ge_padding_low\x18\x01 \x01(\x03\x12\x19\n\x11\x65\x64ge_padding_high\x18\x02 \x01(\x03\x12\x18\n\x10interior_padding\x18\x03 \x01(\x03\"\x1f\n\tTileProto\x12\x12\n\ndimensions\x18\x01 \x03(\x03\"\x8c\x04\n\x0bLayoutProto\x12*\n\x0f\x64im_level_types\x18\t \x03(\x0e\x32\x11.xla.DimLevelType\x12\x12\n\ndim_unique\x18\r \x03(\x08\x12\x13\n\x0b\x64im_ordered\x18\x0e \x03(\x08\x12\x16\n\x0eminor_to_major\x18\x01 \x03(\x03\x12\x1d\n\x05tiles\x18\x06 \x03(\x0b\x32\x0e.xla.TileProto\x12*\n\"tail_padding_alignment_in_elements\x18\x10 \x01(\x03\x12\x1c\n\x14\x65lement_size_in_bits\x18\x07 \x01(\x03\x12\x14\n\x0cmemory_space\x18\x08 \x01(\x03\x12\x30\n\x14index_primitive_type\x18\x0b \x01(\x0e\x32\x12.xla.PrimitiveType\x12\x32\n\x16pointer_primitive_type\x18\x0c \x01(\x0e\x32\x12.xla.PrimitiveType\x12\'\n\x0ephysical_shape\x18\n \x01(\x0b\x32\x0f.xla.ShapeProto\x12+\n#dynamic_shape_metadata_prefix_bytes\x18\x0f \x01(\x03J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06R\x11padded_dimensionsR\rpadding_valueR\x06\x66ormatR\x13max_sparse_elements\"\xbd\x01\n\nShapeProto\x12(\n\x0c\x65lement_type\x18\x02 \x01(\x0e\x32\x12.xla.PrimitiveType\x12\x12\n\ndimensions\x18\x03 \x03(\x03\x12%\n\x0ctuple_shapes\x18\x04 \x03(\x0b\x32\x0f.xla.ShapeProto\x12 \n\x06layout\x18\x05 \x01(\x0b\x32\x10.xla.LayoutProto\x12\x1c\n\x14is_dynamic_dimension\x18\x06 \x03(\x08J\x04\x08\x01\x10\x02R\x04rank\"r\n\x11ProgramShapeProto\x12#\n\nparameters\x18\x01 \x03(\x0b\x32\x0f.xla.ShapeProto\x12\x1f\n\x06result\x18\x02 \x01(\x0b\x32\x0f.xla.ShapeProto\x12\x17\n\x0fparameter_names\x18\x03 \x03(\t\"D\n\x10\x43omputationStats\x12\x12\n\nflop_count\x18\x01 \x01(\x01\x12\x1c\n\x14transcendental_count\x18\x02 \x01(\x01\"\xcb\x04\n\nOpMetadata\x12\x0f\n\x07op_type\x18\x01 \x01(\t\x12\x0f\n\x07op_name\x18\x02 \x01(\t\x12\x13\n\x0bsource_file\x18\x03 \x01(\t\x12\x13\n\x0bsource_line\x18\x04 \x01(\x05\x12*\n\x0cprofile_type\x18\x05 \x03(\x0e\x32\x10.xla.ProfileTypeB\x02\x18\x01\x12\x18\n\x10\x63reation_pass_id\x18\x06 \x01(\x03\x12 \n\x18logical_creation_pass_id\x18\x07 \x01(\x03\x12\'\n\x1fsize_of_generated_code_in_bytes\x18\x08 \x01(\x03\x12+\n#size_of_memory_working_set_in_bytes\x18\t \x01(\x03\x12\x31\n\x0cprofile_info\x18\n \x01(\x0b\x32\x1b.xla.OpMetadata.ProfileInfo\x12\x19\n\x11\x64\x65\x64uplicated_name\x18\x0c \x01(\t\x12\x17\n\x0fpreserve_layout\x18\r \x01(\x08\x12\x16\n\x0estack_frame_id\x18\x0f \x01(\x05\x1a\xad\x01\n\x0bProfileInfo\x12&\n\x0cprofile_type\x18\x01 \x03(\x0e\x32\x10.xla.ProfileType\x12\x18\n\x10relative_speedup\x18\x02 \x01(\x01\x12*\n\x0eprofile_source\x18\x03 \x01(\x0e\x32\x12.xla.ProfileSource\x12\x30\n\x11\x63ompilation_event\x18\x04 \x01(\x0e\x32\x15.xla.CompilationEventJ\x04\x08\x0e\x10\x0f\"\xe3\x01\n\x10\x45xecutionProfile\x12\x1d\n\x15\x63ompilation_cache_hit\x18\x01 \x01(\x08\x12\x17\n\x0f\x63ompile_time_ms\x18\x02 \x01(\x03\x12\x1b\n\x13\x63ompute_cycle_count\x18\x03 \x01(\x03\x12\x17\n\x0f\x63ompute_time_ns\x18\x04 \x01(\x03\x12$\n\x1c\x63ompute_and_transfer_time_ns\x18\x05 \x01(\x03\x12 \n\x18\x65xecutable_size_in_bytes\x18\x06 \x01(\x03\x12\x19\n\x11profile_cache_hit\x18\x07 \x01(\x08\"!\n\x0f\x45xecutionHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"\"\n\x10GlobalDataHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\"4\n\x0c\x44\x65viceHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\x12\x14\n\x0c\x64\x65vice_count\x18\x02 \x01(\x03\"\xb4\x01\n\rChannelHandle\x12\x0e\n\x06handle\x18\x01 \x01(\x03\x12,\n\x04type\x18\x02 \x01(\x0e\x32\x1e.xla.ChannelHandle.ChannelType\"e\n\x0b\x43hannelType\x12\x18\n\x14\x43HANNEL_TYPE_INVALID\x10\x00\x12\x14\n\x10\x44\x45VICE_TO_DEVICE\x10\x01\x12\x12\n\x0e\x44\x45VICE_TO_HOST\x10\x02\x12\x12\n\x0eHOST_TO_DEVICE\x10\x03\"\xc5\x01\n\x15\x44\x65viceAssignmentProto\x12\x15\n\rreplica_count\x18\x01 \x01(\x05\x12\x19\n\x11\x63omputation_count\x18\x02 \x01(\x05\x12I\n\x13\x63omputation_devices\x18\x03 \x03(\x0b\x32,.xla.DeviceAssignmentProto.ComputationDevice\x1a/\n\x11\x43omputationDevice\x12\x1a\n\x12replica_device_ids\x18\x01 \x03(\x05\"\xc4\x03\n\x0cLiteralProto\x12\x1e\n\x05shape\x18\x01 \x01(\x0b\x32\x0f.xla.ShapeProto\x12\r\n\x05preds\x18\x02 \x03(\x08\x12\x0b\n\x03s4s\x18\x15 \x01(\x0c\x12\x0b\n\x03u4s\x18\x16 \x01(\x0c\x12\x0b\n\x03s8s\x18\x0f \x01(\x0c\x12\x0b\n\x03u8s\x18\x03 \x01(\x0c\x12\x0c\n\x04s32s\x18\x04 \x03(\x05\x12\x0c\n\x04s64s\x18\x05 \x03(\x03\x12\x0c\n\x04u32s\x18\x06 \x03(\r\x12\x0c\n\x04u64s\x18\x07 \x03(\x04\x12\x0c\n\x04\x66\x33\x32s\x18\x08 \x03(\x02\x12\x0c\n\x04\x66\x36\x34s\x18\t \x03(\x01\x12\x0c\n\x04\x63\x36\x34s\x18\x0c \x03(\x02\x12\r\n\x05\x63\x31\x32\x38s\x18\x12 \x03(\x01\x12)\n\x0etuple_literals\x18\n \x03(\x0b\x32\x11.xla.LiteralProto\x12\x0c\n\x04\x66\x31\x36s\x18\x0b \x01(\x0c\x12\r\n\x05\x62\x66\x31\x36s\x18\r \x01(\x0c\x12\x0c\n\x04u16s\x18\x10 \x01(\x0c\x12\x0c\n\x04s16s\x18\x11 \x01(\x0c\x12\x0f\n\x07\x66\x38\x65\x35m2s\x18\x13 \x01(\x0c\x12\x11\n\tf8e4m3fns\x18\x14 \x01(\x0c\x12\x16\n\x0e\x66\x38\x65\x34m3b11fnuzs\x18\x17 \x01(\x0c\x12\x13\n\x0b\x66\x38\x65\x35m2fnuzs\x18\x18 \x01(\x0c\x12\x13\n\x0b\x66\x38\x65\x34m3fnuzs\x18\x19 \x01(\x0c\x12\x16\n\x0esparse_indices\x18\x0e \x03(\x03\"\xa3\x01\n\x0fWindowDimension\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x0e\n\x06stride\x18\x02 \x01(\x03\x12\x13\n\x0bpadding_low\x18\x03 \x01(\x03\x12\x14\n\x0cpadding_high\x18\x04 \x01(\x03\x12\x17\n\x0fwindow_dilation\x18\x05 \x01(\x03\x12\x15\n\rbase_dilation\x18\x06 \x01(\x03\x12\x17\n\x0fwindow_reversal\x18\x07 \x01(\x08\"2\n\x06Window\x12(\n\ndimensions\x18\x01 \x03(\x0b\x32\x14.xla.WindowDimension\"~\n\x16GatherDimensionNumbers\x12\x13\n\x0boffset_dims\x18\x01 \x03(\x03\x12\x1c\n\x14\x63ollapsed_slice_dims\x18\x02 \x03(\x03\x12\x17\n\x0fstart_index_map\x18\x03 \x03(\x03\x12\x18\n\x10index_vector_dim\x18\x04 \x01(\x03\"\x93\x01\n\x17ScatterDimensionNumbers\x12\x1a\n\x12update_window_dims\x18\x01 \x03(\x03\x12\x1c\n\x14inserted_window_dims\x18\x02 \x03(\x03\x12$\n\x1cscatter_dims_to_operand_dims\x18\x03 \x03(\x03\x12\x18\n\x10index_vector_dim\x18\x04 \x01(\x03\"\xd8\x02\n\x1b\x43onvolutionDimensionNumbers\x12\x1d\n\x15input_batch_dimension\x18\x07 \x01(\x03\x12\x1f\n\x17input_feature_dimension\x18\x08 \x01(\x03\x12 \n\x18input_spatial_dimensions\x18\x0b \x03(\x03\x12&\n\x1ekernel_input_feature_dimension\x18\x03 \x01(\x03\x12\'\n\x1fkernel_output_feature_dimension\x18\x04 \x01(\x03\x12!\n\x19kernel_spatial_dimensions\x18\x06 \x03(\x03\x12\x1e\n\x16output_batch_dimension\x18\t \x01(\x03\x12 \n\x18output_feature_dimension\x18\n \x01(\x03\x12!\n\x19output_spatial_dimensions\x18\x0c \x03(\x03\"\x99\x01\n\x13\x44otDimensionNumbers\x12\"\n\x1alhs_contracting_dimensions\x18\x01 \x03(\x03\x12\"\n\x1arhs_contracting_dimensions\x18\x02 \x03(\x03\x12\x1c\n\x14lhs_batch_dimensions\x18\x03 \x03(\x03\x12\x1c\n\x14rhs_batch_dimensions\x18\x04 \x03(\x03\"m\n\x12SparsityDescriptor\x12\x1f\n\x04type\x18\x01 \x01(\x0e\x32\x11.xla.SparsityType\x12\r\n\x05index\x18\x02 \x01(\x05\x12\x11\n\tdimension\x18\x03 \x01(\x05\x12\t\n\x01n\x18\x04 \x01(\x05\x12\t\n\x01m\x18\x05 \x01(\x05\"\xdf\x01\n\x16TriangularSolveOptions\x12\x11\n\tleft_side\x18\x01 \x01(\x08\x12\r\n\x05lower\x18\x02 \x01(\x08\x12\x15\n\runit_diagonal\x18\x03 \x01(\x08\x12:\n\x0btranspose_a\x18\x04 \x01(\x0e\x32%.xla.TriangularSolveOptions.Transpose\"P\n\tTranspose\x12\x15\n\x11TRANSPOSE_INVALID\x10\x00\x12\x10\n\x0cNO_TRANSPOSE\x10\x01\x12\r\n\tTRANSPOSE\x10\x02\x12\x0b\n\x07\x41\x44JOINT\x10\x03\" \n\x0f\x43holeskyOptions\x12\r\n\x05lower\x18\x01 \x01(\x08\"!\n\x0bSortOptions\x12\x12\n\ndescending\x18\x01 \x01(\x08\"o\n\x12\x46rontendAttributes\x12-\n\x03map\x18\x01 \x03(\x0b\x32 .xla.FrontendAttributes.MapEntry\x1a*\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"0\n\tStatistic\x12\x11\n\tstat_name\x18\x01 \x01(\t\x12\x10\n\x08stat_val\x18\x02 \x01(\x01\"T\n\rStatisticsViz\x12\x1f\n\x17stat_index_to_visualize\x18\x01 \x01(\x03\x12\"\n\nstatistics\x18\x02 \x03(\x0b\x32\x0e.xla.Statistic\"\xd3\x04\n\nOpSharding\x12\"\n\x04type\x18\x01 \x01(\x0e\x32\x14.xla.OpSharding.Type\x12#\n\ntile_shape\x18\x02 \x01(\x0b\x32\x0f.xla.ShapeProto\x12\"\n\x1atile_assignment_dimensions\x18\x03 \x03(\x03\x12\x1f\n\x17tile_assignment_devices\x18\x04 \x03(\x03\x12(\n\x0ftuple_shardings\x18\x05 \x03(\x0b\x32\x0f.xla.OpSharding\x12\"\n\x1areplicate_on_last_tile_dim\x18\x06 \x01(\x08\x12!\n\x08metadata\x18\x07 \x03(\x0b\x32\x0f.xla.OpMetadata\x12,\n\x0elast_tile_dims\x18\x08 \x03(\x0e\x32\x14.xla.OpSharding.Type\x12\x19\n\x11iota_reshape_dims\x18\t \x03(\x03\x12\x1b\n\x13iota_transpose_perm\x18\n \x03(\x05\x12\x16\n\x0eis_shard_group\x18\x0b \x01(\x08\x12\x16\n\x0eshard_group_id\x18\x0c \x01(\x03\x12\x38\n\x10shard_group_type\x18\r \x01(\x0e\x32\x1e.xla.OpSharding.ShardGroupType\"R\n\x04Type\x12\x0e\n\nREPLICATED\x10\x00\x12\x0b\n\x07MAXIMAL\x10\x01\x12\t\n\x05TUPLE\x10\x02\x12\t\n\x05OTHER\x10\x03\x12\n\n\x06MANUAL\x10\x04\x12\x0b\n\x07UNKNOWN\x10\x05\"\"\n\x0eShardGroupType\x12\x06\n\x02\x41S\x10\x00\x12\x08\n\x04LIKE\x10\x01\"#\n\x0cReplicaGroup\x12\x13\n\x0breplica_ids\x18\x01 \x03(\x03\".\n\x0cSourceTarget\x12\x0e\n\x06source\x18\x01 \x01(\x03\x12\x0e\n\x06target\x18\x02 \x01(\x03\"\x90\x01\n\x0fPrecisionConfig\x12\x39\n\x11operand_precision\x18\x01 \x03(\x0e\x32\x1e.xla.PrecisionConfig.Precision\"B\n\tPrecision\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x08\n\x04HIGH\x10\x01\x12\x0b\n\x07HIGHEST\x10\x02\x12\x11\n\rPACKED_NIBBLE\x10\x03\":\n\x14ParameterReplication\x12\"\n\x1areplicated_at_leaf_buffers\x18\x01 \x03(\x08\"{\n\x16WhileLoopBackendConfig\x12\x44\n\x10known_trip_count\x18\x01 \x01(\x0b\x32*.xla.WhileLoopBackendConfig.KnownTripCount\x1a\x1b\n\x0eKnownTripCount\x12\t\n\x01n\x18\x01 \x01(\x03\"g\n\x15OutputOperandAliasing\x12\x1a\n\x12output_shape_index\x18\x01 \x03(\x03\x12\x15\n\roperand_index\x18\x02 \x01(\x03\x12\x1b\n\x13operand_shape_index\x18\x03 \x03(\x03*\xb7\x02\n\rPrimitiveType\x12\x1a\n\x16PRIMITIVE_TYPE_INVALID\x10\x00\x12\x08\n\x04PRED\x10\x01\x12\x06\n\x02S4\x10\x15\x12\x06\n\x02S8\x10\x02\x12\x07\n\x03S16\x10\x03\x12\x07\n\x03S32\x10\x04\x12\x07\n\x03S64\x10\x05\x12\x06\n\x02U4\x10\x16\x12\x06\n\x02U8\x10\x06\x12\x07\n\x03U16\x10\x07\x12\x07\n\x03U32\x10\x08\x12\x07\n\x03U64\x10\t\x12\x07\n\x03\x46\x31\x36\x10\n\x12\x07\n\x03\x46\x33\x32\x10\x0b\x12\x08\n\x04\x42\x46\x31\x36\x10\x10\x12\x07\n\x03\x46\x36\x34\x10\x0c\x12\n\n\x06\x46\x38\x45\x35M2\x10\x13\x12\x0c\n\x08\x46\x38\x45\x34M3FN\x10\x14\x12\x11\n\rF8E4M3B11FNUZ\x10\x17\x12\x0e\n\nF8E5M2FNUZ\x10\x18\x12\x0e\n\nF8E4M3FNUZ\x10\x19\x12\x07\n\x03\x43\x36\x34\x10\x0f\x12\x08\n\x04\x43\x31\x32\x38\x10\x12\x12\t\n\x05TUPLE\x10\r\x12\x0f\n\x0bOPAQUE_TYPE\x10\x0e\x12\t\n\x05TOKEN\x10\x11*^\n\x0c\x44imLevelType\x12\r\n\tDIM_DENSE\x10\x00\x12\x12\n\x0e\x44IM_COMPRESSED\x10\x01\x12\x11\n\rDIM_SINGLETON\x10\x02\x12\x18\n\x14\x44IM_LOOSE_COMPRESSED\x10\x03*=\n\x0bProfileType\x12\x0b\n\x07INVALID\x10\x00\x12\n\n\x06WINDOW\x10\x01\x12\x08\n\x04\x46LAG\x10\x02\x12\x0b\n\x07INTEGER\x10\x03*j\n\rProfileSource\x12!\n\x1dPROFILE_SOURCE_UNKNOWN_SOURCE\x10\x00\x12\x1b\n\x17PROFILE_SOURCE_EMBEDDED\x10\x01\x12\x19\n\x15PROFILE_SOURCE_REMOTE\x10\x02*\x85\x01\n\x10\x43ompilationEvent\x12#\n\x1f\x43OMPILATION_EVENT_UNKNOWN_EVENT\x10\x00\x12\'\n#COMPILATION_EVENT_FIRST_COMPILATION\x10\x01\x12#\n\x1f\x43OMPILATION_EVENT_RECOMPILATION\x10\x02*G\n\x0bPaddingType\x12\x13\n\x0fPADDING_INVALID\x10\x00\x12\x11\n\rPADDING_VALID\x10\x01\x12\x10\n\x0cPADDING_SAME\x10\x02*1\n\x07\x46\x66tType\x12\x07\n\x03\x46\x46T\x10\x00\x12\x08\n\x04IFFT\x10\x01\x12\x08\n\x04RFFT\x10\x02\x12\t\n\x05IRFFT\x10\x03*A\n\x0cSparsityType\x12\x14\n\x10SPARSITY_INVALID\x10\x00\x12\x1b\n\x17SPARSITY_STRUCTURED_N_M\x10\x01*F\n\x12RandomDistribution\x12\x0f\n\x0bRNG_INVALID\x10\x00\x12\x0f\n\x0bRNG_UNIFORM\x10\x01\x12\x0e\n\nRNG_NORMAL\x10\x02*E\n\x0fRandomAlgorithm\x12\x0f\n\x0bRNG_DEFAULT\x10\x00\x12\x11\n\rRNG_THREE_FRY\x10\x01\x12\x0e\n\nRNG_PHILOX\x10\x02\x42\x03\xf8\x01\x01\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'xla.xla_data_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\370\001\001' + _OPMETADATA.fields_by_name['profile_type']._options = None + _OPMETADATA.fields_by_name['profile_type']._serialized_options = b'\030\001' + _FRONTENDATTRIBUTES_MAPENTRY._options = None + _FRONTENDATTRIBUTES_MAPENTRY._serialized_options = b'8\001' + _PRIMITIVETYPE._serialized_start=5709 + _PRIMITIVETYPE._serialized_end=6020 + _DIMLEVELTYPE._serialized_start=6022 + _DIMLEVELTYPE._serialized_end=6116 + _PROFILETYPE._serialized_start=6118 + _PROFILETYPE._serialized_end=6179 + _PROFILESOURCE._serialized_start=6181 + _PROFILESOURCE._serialized_end=6287 + _COMPILATIONEVENT._serialized_start=6290 + _COMPILATIONEVENT._serialized_end=6423 + _PADDINGTYPE._serialized_start=6425 + _PADDINGTYPE._serialized_end=6496 + _FFTTYPE._serialized_start=6498 + _FFTTYPE._serialized_end=6547 + _SPARSITYTYPE._serialized_start=6549 + _SPARSITYTYPE._serialized_end=6614 + _RANDOMDISTRIBUTION._serialized_start=6616 + _RANDOMDISTRIBUTION._serialized_end=6686 + _RANDOMALGORITHM._serialized_start=6688 + _RANDOMALGORITHM._serialized_end=6757 + _PADDINGCONFIG._serialized_start=28 + _PADDINGCONFIG._serialized_end=211 + _PADDINGCONFIG_PADDINGCONFIGDIMENSION._serialized_start=108 + _PADDINGCONFIG_PADDINGCONFIGDIMENSION._serialized_end=211 + _TILEPROTO._serialized_start=213 + _TILEPROTO._serialized_end=244 + _LAYOUTPROTO._serialized_start=247 + _LAYOUTPROTO._serialized_end=771 + _SHAPEPROTO._serialized_start=774 + _SHAPEPROTO._serialized_end=963 + _PROGRAMSHAPEPROTO._serialized_start=965 + _PROGRAMSHAPEPROTO._serialized_end=1079 + _COMPUTATIONSTATS._serialized_start=1081 + _COMPUTATIONSTATS._serialized_end=1149 + _OPMETADATA._serialized_start=1152 + _OPMETADATA._serialized_end=1739 + _OPMETADATA_PROFILEINFO._serialized_start=1560 + _OPMETADATA_PROFILEINFO._serialized_end=1733 + _EXECUTIONPROFILE._serialized_start=1742 + _EXECUTIONPROFILE._serialized_end=1969 + _EXECUTIONHANDLE._serialized_start=1971 + _EXECUTIONHANDLE._serialized_end=2004 + _GLOBALDATAHANDLE._serialized_start=2006 + _GLOBALDATAHANDLE._serialized_end=2040 + _DEVICEHANDLE._serialized_start=2042 + _DEVICEHANDLE._serialized_end=2094 + _CHANNELHANDLE._serialized_start=2097 + _CHANNELHANDLE._serialized_end=2277 + _CHANNELHANDLE_CHANNELTYPE._serialized_start=2176 + _CHANNELHANDLE_CHANNELTYPE._serialized_end=2277 + _DEVICEASSIGNMENTPROTO._serialized_start=2280 + _DEVICEASSIGNMENTPROTO._serialized_end=2477 + _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE._serialized_start=2430 + _DEVICEASSIGNMENTPROTO_COMPUTATIONDEVICE._serialized_end=2477 + _LITERALPROTO._serialized_start=2480 + _LITERALPROTO._serialized_end=2932 + _WINDOWDIMENSION._serialized_start=2935 + _WINDOWDIMENSION._serialized_end=3098 + _WINDOW._serialized_start=3100 + _WINDOW._serialized_end=3150 + _GATHERDIMENSIONNUMBERS._serialized_start=3152 + _GATHERDIMENSIONNUMBERS._serialized_end=3278 + _SCATTERDIMENSIONNUMBERS._serialized_start=3281 + _SCATTERDIMENSIONNUMBERS._serialized_end=3428 + _CONVOLUTIONDIMENSIONNUMBERS._serialized_start=3431 + _CONVOLUTIONDIMENSIONNUMBERS._serialized_end=3775 + _DOTDIMENSIONNUMBERS._serialized_start=3778 + _DOTDIMENSIONNUMBERS._serialized_end=3931 + _SPARSITYDESCRIPTOR._serialized_start=3933 + _SPARSITYDESCRIPTOR._serialized_end=4042 + _TRIANGULARSOLVEOPTIONS._serialized_start=4045 + _TRIANGULARSOLVEOPTIONS._serialized_end=4268 + _TRIANGULARSOLVEOPTIONS_TRANSPOSE._serialized_start=4188 + _TRIANGULARSOLVEOPTIONS_TRANSPOSE._serialized_end=4268 + _CHOLESKYOPTIONS._serialized_start=4270 + _CHOLESKYOPTIONS._serialized_end=4302 + _SORTOPTIONS._serialized_start=4304 + _SORTOPTIONS._serialized_end=4337 + _FRONTENDATTRIBUTES._serialized_start=4339 + _FRONTENDATTRIBUTES._serialized_end=4450 + _FRONTENDATTRIBUTES_MAPENTRY._serialized_start=4408 + _FRONTENDATTRIBUTES_MAPENTRY._serialized_end=4450 + _STATISTIC._serialized_start=4452 + _STATISTIC._serialized_end=4500 + _STATISTICSVIZ._serialized_start=4502 + _STATISTICSVIZ._serialized_end=4586 + _OPSHARDING._serialized_start=4589 + _OPSHARDING._serialized_end=5184 + _OPSHARDING_TYPE._serialized_start=5066 + _OPSHARDING_TYPE._serialized_end=5148 + _OPSHARDING_SHARDGROUPTYPE._serialized_start=5150 + _OPSHARDING_SHARDGROUPTYPE._serialized_end=5184 + _REPLICAGROUP._serialized_start=5186 + _REPLICAGROUP._serialized_end=5221 + _SOURCETARGET._serialized_start=5223 + _SOURCETARGET._serialized_end=5269 + _PRECISIONCONFIG._serialized_start=5272 + _PRECISIONCONFIG._serialized_end=5416 + _PRECISIONCONFIG_PRECISION._serialized_start=5350 + _PRECISIONCONFIG_PRECISION._serialized_end=5416 + _PARAMETERREPLICATION._serialized_start=5418 + _PARAMETERREPLICATION._serialized_end=5476 + _WHILELOOPBACKENDCONFIG._serialized_start=5478 + _WHILELOOPBACKENDCONFIG._serialized_end=5601 + _WHILELOOPBACKENDCONFIG_KNOWNTRIPCOUNT._serialized_start=5574 + _WHILELOOPBACKENDCONFIG_KNOWNTRIPCOUNT._serialized_end=5601 + _OUTPUTOPERANDALIASING._serialized_start=5603 + _OUTPUTOPERANDALIASING._serialized_end=5706 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..678bde323a947130491402174c29aeff1a07e130 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec1036db679eaf9fd28ced049cdece487c5d2573 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2616c1abd6bdb7603f09eb70a4456256bfa6090 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab76857c0b61a0435f55383bf5709b6653f9676f Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..476782457e9f30428d11acfeccb0c11ddf578d9c Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/__pycache__/gen_rpc_ops.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..beb3f4ce5316f83453d1276033c621931c98b4bd --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/kernels/gen_rpc_ops.py @@ -0,0 +1,763 @@ +"""Python wrappers around TensorFlow ops. + +This file is MACHINE GENERATED! Do not edit. +""" + +import collections + +from tensorflow.python import pywrap_tfe as pywrap_tfe +from tensorflow.python.eager import context as _context +from tensorflow.python.eager import core as _core +from tensorflow.python.eager import execute as _execute +from tensorflow.python.framework import dtypes as _dtypes +from tensorflow.security.fuzzing.py import annotation_types as _atypes + +from tensorflow.python.framework import op_def_registry as _op_def_registry +from tensorflow.python.framework import ops as _ops +from tensorflow.python.framework import op_def_library as _op_def_library +from tensorflow.python.util.deprecation import deprecated_endpoints +from tensorflow.python.util import dispatch as _dispatch +from tensorflow.python.util.tf_export import tf_export + +from typing import TypeVar, List, Any +from typing_extensions import Annotated + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('delete_rpc_future_resource') +def delete_rpc_future_resource(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name=None): + r"""TODO: add doc. + + Args: + handle: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "DeleteRpcFutureResource", name, handle, deleter) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_delete_rpc_future_resource( + (handle, deleter, name,), None) + if _result is not NotImplemented: + return _result + return delete_rpc_future_resource_eager_fallback( + handle, deleter, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + delete_rpc_future_resource, (), dict(handle=handle, + deleter=deleter, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_delete_rpc_future_resource( + (handle, deleter, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "DeleteRpcFutureResource", handle=handle, deleter=deleter, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + delete_rpc_future_resource, (), dict(handle=handle, deleter=deleter, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +DeleteRpcFutureResource = tf_export("raw_ops.DeleteRpcFutureResource")(_ops.to_raw_op(delete_rpc_future_resource)) +_dispatcher_for_delete_rpc_future_resource = delete_rpc_future_resource._tf_type_based_dispatcher.Dispatch + + +def delete_rpc_future_resource_eager_fallback(handle: Annotated[Any, _atypes.Resource], deleter: Annotated[Any, _atypes.Variant], name, ctx): + handle = _ops.convert_to_tensor(handle, _dtypes.resource) + deleter = _ops.convert_to_tensor(deleter, _dtypes.variant) + _inputs_flat = [handle, deleter] + _attrs = None + _result = _execute.execute(b"DeleteRpcFutureResource", 0, + inputs=_inputs_flat, attrs=_attrs, ctx=ctx, + name=name) + _result = None + return _result + +_RpcCallOutput = collections.namedtuple( + "RpcCall", + ["future", "deleter"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_call') +def rpc_call(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name=None): + r"""TODO: add doc. + + Args: + client: A `Tensor` of type `resource`. + method_name: A `Tensor` of type `string`. + args: A list of `Tensor` objects. + timeout_in_ms: A `Tensor` of type `int64`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (future, deleter). + + future: A `Tensor` of type `resource`. + deleter: A `Tensor` of type `variant`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcCall", name, client, method_name, args, timeout_in_ms) + _result = _RpcCallOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_call( + (client, method_name, args, timeout_in_ms, name,), None) + if _result is not NotImplemented: + return _result + return rpc_call_eager_fallback( + client, method_name, args, timeout_in_ms, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_call, (), dict(client=client, method_name=method_name, + args=args, timeout_in_ms=timeout_in_ms, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_call( + (client, method_name, args, timeout_in_ms, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcCall", client=client, method_name=method_name, args=args, + timeout_in_ms=timeout_in_ms, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_call, (), dict(client=client, method_name=method_name, + args=args, timeout_in_ms=timeout_in_ms, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("Tin", _op.get_attr("Tin")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcCall", _inputs_flat, _attrs, _result) + _result = _RpcCallOutput._make(_result) + return _result + +RpcCall = tf_export("raw_ops.RpcCall")(_ops.to_raw_op(rpc_call)) +_dispatcher_for_rpc_call = rpc_call._tf_type_based_dispatcher.Dispatch + + +def rpc_call_eager_fallback(client: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], args, timeout_in_ms: Annotated[Any, _atypes.Int64], name, ctx): + _attr_Tin, args = _execute.convert_to_mixed_eager_tensors(args, ctx) + client = _ops.convert_to_tensor(client, _dtypes.resource) + method_name = _ops.convert_to_tensor(method_name, _dtypes.string) + timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64) + _inputs_flat = [client, method_name] + list(args) + [timeout_in_ms] + _attrs = ("Tin", _attr_Tin) + _result = _execute.execute(b"RpcCall", 2, inputs=_inputs_flat, attrs=_attrs, + ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcCall", _inputs_flat, _attrs, _result) + _result = _RpcCallOutput._make(_result) + return _result + +_RpcCheckStatusOutput = collections.namedtuple( + "RpcCheckStatus", + ["error_code", "error"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_check_status') +def rpc_check_status(status_or: Annotated[Any, _atypes.Resource], name=None): + r"""TODO: add doc. + + Args: + status_or: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (error_code, error). + + error_code: A `Tensor` of type `int64`. + error: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcCheckStatus", name, status_or) + _result = _RpcCheckStatusOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_check_status( + (status_or, name,), None) + if _result is not NotImplemented: + return _result + return rpc_check_status_eager_fallback( + status_or, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_check_status, (), dict(status_or=status_or, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_check_status( + (status_or, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcCheckStatus", status_or=status_or, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_check_status, (), dict(status_or=status_or, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcCheckStatus", _inputs_flat, _attrs, _result) + _result = _RpcCheckStatusOutput._make(_result) + return _result + +RpcCheckStatus = tf_export("raw_ops.RpcCheckStatus")(_ops.to_raw_op(rpc_check_status)) +_dispatcher_for_rpc_check_status = rpc_check_status._tf_type_based_dispatcher.Dispatch + + +def rpc_check_status_eager_fallback(status_or: Annotated[Any, _atypes.Resource], name, ctx): + status_or = _ops.convert_to_tensor(status_or, _dtypes.resource) + _inputs_flat = [status_or] + _attrs = None + _result = _execute.execute(b"RpcCheckStatus", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcCheckStatus", _inputs_flat, _attrs, _result) + _result = _RpcCheckStatusOutput._make(_result) + return _result + +_RpcClientOutput = collections.namedtuple( + "RpcClient", + ["client", "method_specs"]) + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_client') +def rpc_client(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name:str="", list_registered_methods:bool=False, name=None): + r"""TODO: add doc. + + Args: + server_address: A `Tensor` of type `string`. + timeout_in_ms: A `Tensor` of type `int64`. + shared_name: An optional `string`. Defaults to `""`. + list_registered_methods: An optional `bool`. Defaults to `False`. + name: A name for the operation (optional). + + Returns: + A tuple of `Tensor` objects (client, method_specs). + + client: A `Tensor` of type `resource`. + method_specs: A `Tensor` of type `string`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcClient", name, server_address, timeout_in_ms, "shared_name", + shared_name, "list_registered_methods", list_registered_methods) + _result = _RpcClientOutput._make(_result) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_client( + (server_address, timeout_in_ms, shared_name, + list_registered_methods, name,), None) + if _result is not NotImplemented: + return _result + return rpc_client_eager_fallback( + server_address, timeout_in_ms, shared_name=shared_name, + list_registered_methods=list_registered_methods, name=name, + ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_client, (), dict(server_address=server_address, + timeout_in_ms=timeout_in_ms, + shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_client( + (server_address, timeout_in_ms, shared_name, list_registered_methods, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if list_registered_methods is None: + list_registered_methods = False + list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcClient", server_address=server_address, + timeout_in_ms=timeout_in_ms, shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_client, (), dict(server_address=server_address, + timeout_in_ms=timeout_in_ms, + shared_name=shared_name, + list_registered_methods=list_registered_methods, + name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = ("shared_name", _op.get_attr("shared_name"), + "list_registered_methods", + _op._get_attr_bool("list_registered_methods")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcClient", _inputs_flat, _attrs, _result) + _result = _RpcClientOutput._make(_result) + return _result + +RpcClient = tf_export("raw_ops.RpcClient")(_ops.to_raw_op(rpc_client)) +_dispatcher_for_rpc_client = rpc_client._tf_type_based_dispatcher.Dispatch + + +def rpc_client_eager_fallback(server_address: Annotated[Any, _atypes.String], timeout_in_ms: Annotated[Any, _atypes.Int64], shared_name: str, list_registered_methods: bool, name, ctx): + if shared_name is None: + shared_name = "" + shared_name = _execute.make_str(shared_name, "shared_name") + if list_registered_methods is None: + list_registered_methods = False + list_registered_methods = _execute.make_bool(list_registered_methods, "list_registered_methods") + server_address = _ops.convert_to_tensor(server_address, _dtypes.string) + timeout_in_ms = _ops.convert_to_tensor(timeout_in_ms, _dtypes.int64) + _inputs_flat = [server_address, timeout_in_ms] + _attrs = ("shared_name", shared_name, "list_registered_methods", + list_registered_methods) + _result = _execute.execute(b"RpcClient", 2, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcClient", _inputs_flat, _attrs, _result) + _result = _RpcClientOutput._make(_result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_get_value') +def rpc_get_value(status_or: Annotated[Any, _atypes.Resource], Tout, name=None): + r"""TODO: add doc. + + Args: + status_or: A `Tensor` of type `resource`. + Tout: A list of `tf.DTypes`. + name: A name for the operation (optional). + + Returns: + A list of `Tensor` objects of type `Tout`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcGetValue", name, status_or, "Tout", Tout) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_get_value( + (status_or, Tout, name,), None) + if _result is not NotImplemented: + return _result + return rpc_get_value_eager_fallback( + status_or, Tout=Tout, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_get_value( + (status_or, Tout, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'rpc_get_value' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcGetValue", status_or=status_or, Tout=Tout, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_get_value, (), dict(status_or=status_or, Tout=Tout, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if not _result: + return _op + if _execute.must_record_gradient(): + _attrs = ("Tout", _op.get_attr("Tout")) + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcGetValue", _inputs_flat, _attrs, _result) + return _result + +RpcGetValue = tf_export("raw_ops.RpcGetValue")(_ops.to_raw_op(rpc_get_value)) +_dispatcher_for_rpc_get_value = rpc_get_value._tf_type_based_dispatcher.Dispatch + + +def rpc_get_value_eager_fallback(status_or: Annotated[Any, _atypes.Resource], Tout, name, ctx): + if not isinstance(Tout, (list, tuple)): + raise TypeError( + "Expected list for 'Tout' argument to " + "'rpc_get_value' Op, not %r." % Tout) + Tout = [_execute.make_type(_t, "Tout") for _t in Tout] + status_or = _ops.convert_to_tensor(status_or, _dtypes.resource) + _inputs_flat = [status_or] + _attrs = ("Tout", Tout) + _result = _execute.execute(b"RpcGetValue", len(Tout), inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcGetValue", _inputs_flat, _attrs, _result) + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server') +def rpc_server(server_address: Annotated[Any, _atypes.String], name=None) -> Annotated[Any, _atypes.Resource]: + r"""TODO: add doc. + + Args: + server_address: A `Tensor` of type `string`. + name: A name for the operation (optional). + + Returns: + A `Tensor` of type `resource`. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServer", name, server_address) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server( + (server_address, name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_eager_fallback( + server_address, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server, (), dict(server_address=server_address, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server( + (server_address, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServer", server_address=server_address, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server, (), dict(server_address=server_address, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + _result = _outputs[:] + if _execute.must_record_gradient(): + _attrs = () + _inputs_flat = _op.inputs + _execute.record_gradient( + "RpcServer", _inputs_flat, _attrs, _result) + _result, = _result + return _result + +RpcServer = tf_export("raw_ops.RpcServer")(_ops.to_raw_op(rpc_server)) +_dispatcher_for_rpc_server = rpc_server._tf_type_based_dispatcher.Dispatch + + +def rpc_server_eager_fallback(server_address: Annotated[Any, _atypes.String], name, ctx) -> Annotated[Any, _atypes.Resource]: + server_address = _ops.convert_to_tensor(server_address, _dtypes.string) + _inputs_flat = [server_address] + _attrs = None + _result = _execute.execute(b"RpcServer", 1, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + if _execute.must_record_gradient(): + _execute.record_gradient( + "RpcServer", _inputs_flat, _attrs, _result) + _result, = _result + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server_register') +def rpc_server_register(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs:str="", name=None): + r"""TODO: add doc. + + Args: + server: A `Tensor` of type `resource`. + method_name: A `Tensor` of type `string`. + captured_inputs: A list of `Tensor` objects. + f: A function decorated with @Defun. + output_specs: A `string`. + input_specs: An optional `string`. Defaults to `""`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServerRegister", name, server, method_name, captured_inputs, + "f", f, "input_specs", input_specs, "output_specs", output_specs) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server_register( + (server, method_name, captured_inputs, f, output_specs, input_specs, + name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_register_eager_fallback( + server, method_name, captured_inputs, f=f, input_specs=input_specs, + output_specs=output_specs, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_register, (), dict(server=server, + method_name=method_name, + captured_inputs=captured_inputs, + f=f, output_specs=output_specs, + input_specs=input_specs, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server_register( + (server, method_name, captured_inputs, f, output_specs, input_specs, + name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + output_specs = _execute.make_str(output_specs, "output_specs") + if input_specs is None: + input_specs = "" + input_specs = _execute.make_str(input_specs, "input_specs") + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServerRegister", server=server, method_name=method_name, + captured_inputs=captured_inputs, f=f, + output_specs=output_specs, + input_specs=input_specs, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_register, (), dict(server=server, + method_name=method_name, + captured_inputs=captured_inputs, f=f, + output_specs=output_specs, + input_specs=input_specs, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +RpcServerRegister = tf_export("raw_ops.RpcServerRegister")(_ops.to_raw_op(rpc_server_register)) +_dispatcher_for_rpc_server_register = rpc_server_register._tf_type_based_dispatcher.Dispatch + + +def rpc_server_register_eager_fallback(server: Annotated[Any, _atypes.Resource], method_name: Annotated[Any, _atypes.String], captured_inputs, f, output_specs: str, input_specs: str, name, ctx): + output_specs = _execute.make_str(output_specs, "output_specs") + if input_specs is None: + input_specs = "" + input_specs = _execute.make_str(input_specs, "input_specs") + _attr_Tin, captured_inputs = _execute.convert_to_mixed_eager_tensors(captured_inputs, ctx) + server = _ops.convert_to_tensor(server, _dtypes.resource) + method_name = _ops.convert_to_tensor(method_name, _dtypes.string) + _inputs_flat = [server, method_name] + list(captured_inputs) + _attrs = ("Tin", _attr_Tin, "f", f, "input_specs", input_specs, + "output_specs", output_specs) + _result = _execute.execute(b"RpcServerRegister", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + + +@_dispatch.add_fallback_dispatch_list +@_dispatch.add_type_based_api_dispatcher +@tf_export('rpc_server_start') +def rpc_server_start(server: Annotated[Any, _atypes.Resource], name=None): + r"""TODO: add doc. + + Args: + server: A `Tensor` of type `resource`. + name: A name for the operation (optional). + + Returns: + The created Operation. + """ + _ctx = _context._context or _context.context() + tld = _ctx._thread_local_data + if tld.is_eager: + try: + _result = pywrap_tfe.TFE_Py_FastPathExecute( + _ctx, "RpcServerStart", name, server) + return _result + except _core._NotOkStatusException as e: + _ops.raise_from_not_ok_status(e, name) + except _core._FallbackException: + pass + try: + _result = _dispatcher_for_rpc_server_start( + (server, name,), None) + if _result is not NotImplemented: + return _result + return rpc_server_start_eager_fallback( + server, name=name, ctx=_ctx) + except _core._SymbolicException: + pass # Add nodes to the TensorFlow graph. + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_start, (), dict(server=server, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + else: + _result = _dispatcher_for_rpc_server_start( + (server, name,), None) + if _result is not NotImplemented: + return _result + # Add nodes to the TensorFlow graph. + try: + _, _, _op, _outputs = _op_def_library._apply_op_helper( + "RpcServerStart", server=server, name=name) + except (TypeError, ValueError): + _result = _dispatch.dispatch( + rpc_server_start, (), dict(server=server, name=name) + ) + if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED: + return _result + raise + return _op +RpcServerStart = tf_export("raw_ops.RpcServerStart")(_ops.to_raw_op(rpc_server_start)) +_dispatcher_for_rpc_server_start = rpc_server_start._tf_type_based_dispatcher.Dispatch + + +def rpc_server_start_eager_fallback(server: Annotated[Any, _atypes.Resource], name, ctx): + server = _ops.convert_to_tensor(server, _dtypes.resource) + _inputs_flat = [server] + _attrs = None + _result = _execute.execute(b"RpcServerStart", 0, inputs=_inputs_flat, + attrs=_attrs, ctx=ctx, name=name) + _result = None + return _result + diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b6af83c1afbda2a5657df23eff0b1414fa62c79 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d5d5967466b81e0ad78984767c190844ca330ed Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e538c4499fcaad315f5c8e269b605031e3081883 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/__pycache__/tf_rpc_service_pb2_grpc.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..39fa8850a62a590e1f1fd5d521d99462ec88a6ce --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: tensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from tensorflow.core.framework import tensor_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__pb2 +from tensorflow.core.protobuf import struct_pb2 as tensorflow_dot_core_dot_protobuf_dot_struct__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\nAtensorflow/distribute/experimental/rpc/proto/tf_rpc_service.proto\x12\x0etensorflow.rpc\x1a&tensorflow/core/framework/tensor.proto\x1a%tensorflow/core/protobuf/struct.proto\"M\n\x0b\x43\x61llRequest\x12\x0e\n\x06method\x18\x01 \x01(\t\x12.\n\rinput_tensors\x18\x02 \x03(\x0b\x32\x17.tensorflow.TensorProto\"?\n\x0c\x43\x61llResponse\x12/\n\x0eoutput_tensors\x18\x01 \x03(\x0b\x32\x17.tensorflow.TensorProto\"\r\n\x0bListRequest\"\x87\x01\n\x10RegisteredMethod\x12\x0e\n\x06method\x18\x01 \x01(\t\x12\x30\n\x0binput_specs\x18\x02 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\x12\x31\n\x0coutput_specs\x18\x03 \x01(\x0b\x32\x1b.tensorflow.StructuredValue\"L\n\x0cListResponse\x12<\n\x12registered_methods\x18\x01 \x03(\x0b\x32 .tensorflow.rpc.RegisteredMethod2\x96\x01\n\nRpcService\x12\x43\n\x04\x43\x61ll\x12\x1b.tensorflow.rpc.CallRequest\x1a\x1c.tensorflow.rpc.CallResponse\"\x00\x12\x43\n\x04List\x12\x1b.tensorflow.rpc.ListRequest\x1a\x1c.tensorflow.rpc.ListResponse\"\x00\x62\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'tensorflow.distribute.experimental.rpc.proto.tf_rpc_service_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + _CALLREQUEST._serialized_start=164 + _CALLREQUEST._serialized_end=241 + _CALLRESPONSE._serialized_start=243 + _CALLRESPONSE._serialized_end=306 + _LISTREQUEST._serialized_start=308 + _LISTREQUEST._serialized_end=321 + _REGISTEREDMETHOD._serialized_start=324 + _REGISTEREDMETHOD._serialized_end=459 + _LISTRESPONSE._serialized_start=461 + _LISTRESPONSE._serialized_end=537 + _RPCSERVICE._serialized_start=540 + _RPCSERVICE._serialized_end=690 +# @@protoc_insertion_point(module_scope) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..fe854a3a079e616b21c1effaedfbfa409857573d --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/distribute/experimental/rpc/proto/tf_rpc_service_pb2_grpc.py @@ -0,0 +1,63 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from tensorflow.distribute.experimental.rpc.proto import tf_rpc_service_pb2 as tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2 + + +class RpcServiceStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Call = channel.unary_unary( + '/tensorflow.rpc.RpcService/Call', + request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.SerializeToString, + response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.FromString, + ) + self.List = channel.unary_unary( + '/tensorflow.rpc.RpcService/List', + request_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.SerializeToString, + response_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.FromString, + ) + + +class RpcServiceServicer(object): + # missing associated documentation comment in .proto file + pass + + def Call(self, request, context): + """RPC for invoking a registered function on remote server. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def List(self, request, context): + """RPC for listing available methods in a server. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RpcServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Call': grpc.unary_unary_rpc_method_handler( + servicer.Call, + request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallRequest.FromString, + response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.CallResponse.SerializeToString, + ), + 'List': grpc.unary_unary_rpc_method_handler( + servicer.List, + request_deserializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListRequest.FromString, + response_serializer=tensorflow_dot_distribute_dot_experimental_dot_rpc_dot_proto_dot_tf__rpc__service__pb2.ListResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'tensorflow.rpc.RpcService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ced4129d8fe2dde869478e28678775b17a3588ff Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/autocast_variable.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/autocast_variable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2650483b212410e87a1c51b98a0ac2b12e3bcb94 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/autocast_variable.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/device_compatibility_check.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/device_compatibility_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c6fafaae31a593d6fa5b01f3e14cca33c3ca4e5 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/device_compatibility_check.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/loss_scale.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/loss_scale.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4a2199e9b5c610a4c60b0d8bdd79e0a0d6e29c3 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/loss_scale.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/test_util.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25832a6c4ba4fc03909bc97f325686dbf7baa8f4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/keras/mixed_precision/__pycache__/test_util.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__init__.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..65d2db48c3dbf144d209d32b58e28c9a8e9afedb --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Public TensorFlow type definitions. + +For details, see +https://github.com/tensorflow/community/blob/master/rfcs/20200211-tf-types.md. +""" + +# Note: this module should contain **type definitions only**. diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/__init__.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39c6086516a4edf3a95ae73ece01ee839f6825f8 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/__init__.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/core.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/core.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45c96517242817225dcb44166b5dde58f1848397 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/core.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/data.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..440b50eec63f36857497b654d7639d6557827911 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/data.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/distribute.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/distribute.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666e1edba336819c24d3d8d9a81a993dbde73904 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/distribute.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/doc_typealias.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/doc_typealias.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c85e16a00fefe2a6d3de7e047f79a549dea11a6 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/doc_typealias.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/internal.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/internal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9be28d5ebd024e6470134bf8075503cf9a8de8a4 Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/internal.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/trace.cpython-310.pyc b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/trace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7476e9cef50ab5930028c4feefd93afb32ea3cd Binary files /dev/null and b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/__pycache__/trace.cpython-310.pyc differ diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/core.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/core.py new file mode 100644 index 0000000000000000000000000000000000000000..534211fd9d29ba1a8bd6ac75659cbabb00532746 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/core.py @@ -0,0 +1,415 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Core TensorFlow types.""" + +import abc +import inspect +import sys +import textwrap +from typing import Union + +import numpy as np + +from tensorflow.python.types import doc_typealias + + +from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import, g-bad-import-order +from tensorflow.python.util.tf_export import tf_export + +# pylint:disable=g-import-not-at-top +if sys.version_info >= (3, 8): + from typing import Protocol + from typing import runtime_checkable +else: + from typing_extensions import Protocol + from typing_extensions import runtime_checkable +# pylint:enable=g-import-not-at-top + +# TODO(mdan): Consider adding ABC once the dependence on isinstance is reduced. +# TODO(mdan): Add type annotations. + + +# TODO(b/178822082): Revisit this API when tf.types gets more resource. +@tf_export("__internal__.types.Tensor", v1=[]) +class Tensor(object): + """The base class of all dense Tensor objects. + + A dense tensor has a static data type (dtype), and may have a static rank and + shape. Tensor objects are immutable. Mutable objects may be backed by a Tensor + which holds the unique handle that identifies the mutable object. + """ + + @property + def dtype(self): + pass + + @property + def shape(self): + pass + + +# `ops.EagerTensor` subclasses `Symbol` by way of subclassing `tensor.Tensor`; +# care should be taken when performing `isinstance` checks on `Value`, e.g.: +# +# ``` +# if isinstance(core.Symbol) and not isinstance(core.Value): +# ... +# ``` +class Symbol(Tensor): + """Symbolic "graph" Tensor. + + These objects represent the output of an op definition and do not carry a + value. + """ + pass + + +class Value(Tensor): + """Tensor that can be associated with a value (aka "eager tensor"). + + These objects represent the (usually future) output of executing an op + immediately. + """ + + def numpy(self): + pass + + +@tf_export("types.experimental.FunctionType") +class FunctionType(inspect.Signature, metaclass=abc.ABCMeta): + """Represents the type of a TensorFlow callable. + + FunctionType inherits from inspect.Signature which canonically represents the + structure (and optionally type) information of input parameters and output of + a Python function. Additionally, it integrates with the tf.function type + system (`tf.types.experimental.TraceType`) to provide a holistic + representation of the the I/O contract of the callable. It is used for: + - Canonicalization and type-checking of Python input arguments + - Type-based dispatch to concrete functions + - Packing/unpacking structured python values to Tensors + - Generation of structured placeholder values for tracing + """ + + # The signature of this method changes in Py3.10 so we override to enforce it. + @classmethod + def from_callable(cls, obj, *, follow_wrapped=True): + return super().from_callable(obj, follow_wrapped=follow_wrapped) + + +@tf_export("types.experimental.Callable", v1=[]) +class Callable(metaclass=abc.ABCMeta): + """Base class for TF callables like those created by tf.function. + + Note: Callables are conceptually very similar to `tf.Operation`: a + `tf.Operation` is a kind of callable. + """ + + @property + @abc.abstractmethod + def function_type(self) -> FunctionType: + """Returns a FunctionType describing this callable.""" + + def __call__(self, *args, **kwargs): + """Executes this callable. + + This behaves like a regular op - in eager mode, it immediately starts + execution, returning results. In graph mode, it creates ops which return + symbolic TensorFlow values (like `tf.Tensor`, `tf.data.Dataset`, + etc.). For example, `tf.function` callables typically generate a + `tf.raw_ops.PartitionedCall` op, but not always - the + exact operations being generated are an internal implementation detail. + + Args: + *args: positional argument for this call + **kwargs: keyword arguments for this call + Returns: + The execution results. + """ + + +@tf_export("types.experimental.AtomicFunction", v1=[]) +class AtomicFunction(Callable): + """Base class for graph functions. + + An `AtomicFunction` encapsulates a single graph function definition. + + `AtomicFunction` can be called directly only if no captures are needed + according to the `FunctionType`. If captures are present, please use + `call_with_captures` instead. + + `AtomicFunction` does not support gradients. Please use the parent + `ConcreteFunction` if you need gradient support. + """ + + def call_with_captures(self, args, kwargs, captures): + """Calls this AtomicFunction with captures as defined by its FunctionType. + + Args: + args: Tuple containing positional arguments + kwargs: Dict containing keyword arguments + captures: Tuple of tensors supplying captured tensor values. + + Returns: + A structured output value based on the inputs. + """ + + +@tf_export("types.experimental.ConcreteFunction", v1=[]) +class ConcreteFunction(Callable, metaclass=abc.ABCMeta): + """Base class for differentiable graph functions. + + A `ConcreteFunction` encapsulates the original graph function definition with + support for differentiability under `tf.GradientTape` contexts. In the + process, it may generate new graph functions (using the original) to + efficiently perform forwards and backwards passes. + """ + + @property + @abc.abstractmethod + def inference_fn(self) -> AtomicFunction: + """Returns the original `AtomicFunction` owned by this ConcreteFunction.""" + + +# TODO(fmuham): Remove the export as GenericFunction in future release. +@tf_export( + "types.experimental.PolymorphicFunction", + "types.experimental.GenericFunction", # Deprecated + v1=[], +) +class PolymorphicFunction(Callable, metaclass=abc.ABCMeta): + """Base class for polymorphic graph functions. + + Graph functions are Python callable objects that dispatch calls to a + TensorFlow graph. Polymorphic graph functions can be backed by multiple TF + graphs, and automatically select the appropriate specialization based on the + type of input they were called with. They may also create specializations on + the fly if necessary, for example by tracing. + + Also see `tf.function`. + """ + + @abc.abstractmethod + def get_concrete_function(self, *args, **kwargs) -> ConcreteFunction: + """Returns a `ConcreteFunction` specialized to input types. + + The arguments specified by `args` and `kwargs` follow normal function call + rules. The returned `ConcreteFunction` has the same set of positional and + keyword arguments as `self`, but their types are compatible to the types + specified by `args` and `kwargs` (though not neccessarily equal). + + >>> @tf.function + ... def f(x): + ... return x + >>> f_concrete = f.get_concrete_function(tf.constant(1.0)) + >>> f_concrete = f.get_concrete_function(x=tf.constant(1.0)) + + Unlike normal calls, `get_concrete_function` allow type specifiers instead + of TensorFlow objects, so for example `tf.Tensor`s may be replaced with + `tf.TensorSpec`s. + + >>> @tf.function + ... def f(x): + ... return x + >>> f_concrete = f.get_concrete_function(tf.TensorSpec([], tf.float64)) + + If the function definition allows only one specialization, `args` and + `kwargs` may be omitted altogether. + + >>> @tf.function(input_signature=[tf.TensorSpec(None, tf.float32)]) + ... def f(x): + ... return x + >>> f_concrete = f.get_concrete_function() + + The returned `ConcreteFunction` can be called normally: + + >>> f_concrete(tf.constant(1.0)) + + >>> f_concrete(x=tf.constant(1.0)) + + + Args: + *args: inputs to specialize on. + **kwargs: inputs to specialize on. + + Returns: + A `ConcreteFunction`. + """ + pass + + def experimental_get_compiler_ir(self, *args, **kwargs): + """Returns compiler IR for the compiled function. + + This API is intended *only* for debugging as there are no guarantees on + backwards compatibility of returned IR or the allowed values of `stage`. + + Args: + *args: compilation args supports inputs either: (1) all inputs are + TensorSpec or (2) all inputs are tf.Tensor/Python variables. + **kwargs: Keyword arguments used for compilation. Same requirement as + compiliation args. + + Returns: + Function callable with the following kwargs: + - `stage` at which the compiler IR should be serialized. Allowed values + are: + - `hlo`: HLO output after conversion from TF + (https://www.tensorflow.org/xla/operation_semantics). + - `hlo_serialized`: Like stage=`hlo`, but the output is a serialized + HLO module proto (a bytes object). + - `optimized_hlo`: HLO after compiler optimizations. + - `optimized_hlo_serialized`: Like stage=`optimized_hlo`, but the + output is a serialized HLO module proto (a bytes object). + - `optimized_hlo_dot`: optimized HLO in DOT format suitable for + Graphviz. + - `device_name` can be either None, in which case the preferred device + is used for compilation, or a device name. It can be a full device + name, or a partial one, e.g., `/device:CPU:0`. + + For example, for + + ```python + @tf.function(jit_compile=True) + def f(x): + return x + 1 + + f.experimental_get_compiler_ir(tf.random.normal([10, 10])(stage='hlo') + ``` + + the output is: + + ``` + HloModule a_inference_f_13__.9 + + ENTRY %a_inference_f_13__.9 (arg0.1: f32[10,10]) -> f32[10,10] { + %arg0.1 = f32[10,10]{1,0} parameter(0), parameter_replication={false} + %reshape.2 = f32[10,10]{1,0} reshape(f32[10,10]{1,0} %arg0.1) + %constant.3 = f32[] constant(1) + %broadcast.4 = f32[10,10]{1,0} broadcast(f32[] %constant.3) + %add.5 = f32[10,10]{1,0} add(f32[10,10]{1,0} %reshape.2, + f32[10,10]{1,0} %broadcast.4) + %reshape.6 = f32[10,10]{1,0} reshape(f32[10,10]{1,0} %add.5) + %tuple.7 = (f32[10,10]{1,0}) tuple(f32[10,10]{1,0} %reshape.6) + ROOT %get-tuple-element.8 = f32[10,10]{1,0} + get-tuple-element((f32[10,10]{1,0}) %tuple.7), index=0 + } + ``` + + Here is another example using tf.TensorSpec inputs: + + ```python + y = tf.Variable(tf.zeros([10, 20], dtype=tf.float32)) + + @tf.function(jit_compile=True) + def f(x): + return x + y + + hlo_str = f.experimental_get_compiler_ir(tf.TensorSpec(shape=(10, + 20)))(stage='hlo') + ``` + + The output is: + + ``` + HloModule a_inference_f_120__.8, + entry_computation_layout={(f32[10,20]{1,0},f32[10,20]{1,0})->f32[10,20]{1,0}} + + ENTRY %a_inference_f_120__.8 (arg0.1: f32[10,20], arg1.2: f32[10,20]) -> + f32[10,20] { + %arg0.1 = f32[10,20]{1,0} parameter(0), parameter_replication={false}, + metadata={op_name="XLA_Args"} + %reshape.3 = f32[10,20]{1,0} reshape(f32[10,20]{1,0} %arg0.1) + %arg1.2 = f32[10,20]{1,0} parameter(1), parameter_replication={false}, + metadata={op_name="XLA_Args"} + %add.4 = f32[10,20]{1,0} add(f32[10,20]{1,0} %reshape.3, f32[10,20]{1,0} + %arg1.2), metadata={op_type="AddV2" op_name="add" + source_file="" source_line=4} + %reshape.5 = f32[10,20]{1,0} reshape(f32[10,20]{1,0} %add.4), + metadata={op_name="XLA_Retvals"} + %tuple.6 = (f32[10,20]{1,0}) tuple(f32[10,20]{1,0} %reshape.5), + metadata={op_name="XLA_Retvals"} + ROOT %get-tuple-element.7 = f32[10,20]{1,0} + get-tuple-element((f32[10,20]{1,0}) %tuple.6), index=0, + metadata={op_name="XLA_Retvals"} + } + ``` + + The HLO module accepts a flat list of inputs. To retrieve the order + of these inputs signatures, users can call the + `concrete_fn.structured_input_signature` and `concrete_fn.captured_inputs`: + + ```python + # Use concrete_fn to get the hlo_module flat_args. + concrete_fn = f.get_concrete_function(tf.TensorSpec(shape=(10, 20))) + flat_args = list( + tf.nest.flatten(concrete_fn.structured_input_signature) + ) + concrete_fn.captured_inputs + ``` + + Raises: + ValueError: + (1) If an invalid `stage` is selected + (2) or if applied to a function which is not compiled + (`jit_compile=True` is not set). + (3) or if input shapes are not fully defined for tf.TensorSpec inputs + TypeError: When called with input in graph mode. + """ + pass + + +@runtime_checkable +class TensorProtocol(Protocol): + """Protocol type for objects that can be converted to Tensor.""" + + def __tf_tensor__(self, dtype=None, name=None): + """Converts this object to a Tensor. + + Args: + dtype: data type for the returned Tensor + name: a name for the operations which create the Tensor + Returns: + A Tensor. + """ + pass + + +# TODO(rahulkamat): Add missing types that are convertible to Tensor. +TensorLike = Union[Tensor, TensorProtocol, int, float, bool, str, bytes, + complex, tuple, list, np.ndarray, np.generic] +doc_typealias.document( + obj=TensorLike, + doc=textwrap.dedent("""\ + Union of all types that can be converted to a `tf.Tensor` by `tf.convert_to_tensor`. + + This definition may be used in user code. Additional types may be added + in the future as more input types are supported. + + Example: + + ``` + def foo(x: TensorLike): + pass + ``` + + This definition passes static type verification for: + + ``` + foo(tf.constant([1, 2, 3])) + foo([1, 2, 3]) + foo(np.array([1, 2, 3])) + ``` + """), +) +tf_export("types.experimental.TensorLike").export_constant( + __name__, "TensorLike") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/data.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/data.py new file mode 100644 index 0000000000000000000000000000000000000000..59c09001086ef3b967fe290a9850525978352991 --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/data.py @@ -0,0 +1,29 @@ +# Copyright 2023 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Dataset types.""" + +import abc + +from tensorflow.python.util.tf_export import tf_export + + +@tf_export("__internal__.types.data.Dataset", v1=[]) +class DatasetV2(abc.ABC): + """Represents the TensorFlow 2 type `tf.data.Dataset`.""" + + +@tf_export(v1=["__internal__.types.data.Dataset"]) +class DatasetV1(DatasetV2, abc.ABC): + """Represents the TensorFlow 1 type `tf.data.Dataset`.""" diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/distribute.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/distribute.py new file mode 100644 index 0000000000000000000000000000000000000000..00ba6c9a9c8379dbcfa3c9ea74b63bf1e352b24b --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/distribute.py @@ -0,0 +1,508 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Types specific to tf.distribute.""" + +from tensorflow.python.util.tf_export import tf_export +from tensorflow.tools.docs import doc_controls + +# TODO(mdan, anjalisridhar): Decide the location of this file. + + +class Iterable(object): + """Interface for distributed objects that admit iteration/reduction.""" + + def __iter__(self): + pass + + # TODO(mdan): Describe this contract. + def reduce(self, initial_state, reduce_func): + """Reduces this iterable object to a single element. + + The transformation calls `reduce_func` successively on each element. + The `initial_state` argument is used for the initial state and the final + state is returned as the result. + + Args: + initial_state: An element representing the initial state of the + reduction. + reduce_func: A function that maps `(old_state, input_element)` to + `new_state`. The structure of `new_state` must match the structure of + `old_state`. For the first element, `old_state` is `initial_state`. + + Returns: + The final state of the transformation. + """ + + +class Iterator(object): + """Interface for distributed iterators.""" + + def get_next(self): + """Unlike __next__, this may use a non-raising mechanism.""" + + def __next__(self): + pass + + def __iter__(self): + pass + + +@tf_export("distribute.DistributedValues", v1=[]) +class DistributedValues(object): + """Base class for representing distributed values. + + A subclass instance of `tf.distribute.DistributedValues` is created when + creating variables within a distribution strategy, iterating a + `tf.distribute.DistributedDataset` or through `tf.distribute.Strategy.run`. + This base class should never be instantiated directly. + `tf.distribute.DistributedValues` contains a value per replica. Depending on + the subclass, the values could either be synced on update, synced on demand, + or never synced. + + Two representative types of `tf.distribute.DistributedValues` are + `tf.types.experimental.PerReplica` and `tf.types.experimental.Mirrored` + values. + + `PerReplica` values exist on the worker devices, with a different value for + each replica. They are produced by iterating through a distributed dataset + returned by `tf.distribute.Strategy.experimental_distribute_dataset` (Example + 1, below) and `tf.distribute.Strategy.distribute_datasets_from_function`. They + are also the typical result returned by `tf.distribute.Strategy.run` (Example + 2). + + `Mirrored` values are like `PerReplica` values, except we know that the value + on all replicas are the same. `Mirrored` values are kept synchronized by the + distribution strategy in use, while `PerReplica` values are left + unsynchronized. `Mirrored` values typically represent model weights. We can + safely read a `Mirrored` value in a cross-replica context by using the value + on any replica, while PerReplica values should not be read or manipulated in + a cross-replica context." + + `tf.distribute.DistributedValues` can be reduced via `strategy.reduce` to + obtain a single value across replicas (Example 4), used as input into + `tf.distribute.Strategy.run` (Example 3), or collected to inspect the + per-replica values using `tf.distribute.Strategy.experimental_local_results` + (Example 5). + + Example usages: + + 1. Created from a `tf.distribute.DistributedDataset`: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) + >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> distributed_values = next(dataset_iterator) + >>> distributed_values + PerReplica:{ + 0: , + 1: + } + + 2. Returned by `run`: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> @tf.function + ... def run(): + ... ctx = tf.distribute.get_replica_context() + ... return ctx.replica_id_in_sync_group + >>> distributed_values = strategy.run(run) + >>> distributed_values + PerReplica:{ + 0: , + 1: + } + + 3. As input into `run`: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) + >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> distributed_values = next(dataset_iterator) + >>> @tf.function + ... def run(input): + ... return input + 1.0 + >>> updated_value = strategy.run(run, args=(distributed_values,)) + >>> updated_value + PerReplica:{ + 0: , + 1: + } + + 4. As input into `reduce`: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) + >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> distributed_values = next(dataset_iterator) + >>> reduced_value = strategy.reduce(tf.distribute.ReduceOp.SUM, + ... distributed_values, + ... axis = 0) + >>> reduced_value + + + 5. How to inspect per-replica values locally: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensor_slices([5., 6., 7., 8.]).batch(2) + >>> dataset_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> per_replica_values = strategy.experimental_local_results( + ... distributed_values) + >>> per_replica_values + (, + ) + + """ + + +@tf_export("types.experimental.distributed.PerReplica", v1=[]) +class PerReplica(DistributedValues): + """Holds a distributed value: a map from replica id to unsynchronized values. + + `PerReplica` values exist on the worker devices, with a different value for + each replica. They can be produced many ways, often by iterating through a + distributed dataset returned by + `tf.distribute.Strategy.experimental_distribute_dataset` and + `tf.distribute.Strategy.distribute_datasets_from_function`. They are also the + typical result returned by `tf.distribute.Strategy.run`. + """ + + +@tf_export("types.experimental.distributed.Mirrored", v1=[]) +class Mirrored(DistributedValues): + """Holds a distributed value: a map from replica id to synchronized values. + + `Mirrored` values are `tf.distribute.DistributedValues` for which we know that + the value on all replicas is the same. `Mirrored` values are kept synchronized + by the distribution strategy in use, while `tf.types.experimental.PerReplica` + values are left unsynchronized. `Mirrored` values typically represent model + weights. We can safely read a `Mirrored` value in a cross-replica context by + using the value on any replica, while `PerReplica` values should not be read + or manipulated directly by the user in a cross-replica context. + """ + + +@tf_export("distribute.DistributedIterator", v1=[]) +class DistributedIteratorInterface(Iterator): + """An iterator over `tf.distribute.DistributedDataset`. + + `tf.distribute.DistributedIterator` is the primary mechanism for enumerating + elements of a `tf.distribute.DistributedDataset`. It supports the Python + Iterator protocol, which means it can be iterated over using a for-loop or by + fetching individual elements explicitly via `get_next()`. + + You can create a `tf.distribute.DistributedIterator` by calling `iter` on + a `tf.distribute.DistributedDataset` or creating a python loop over a + `tf.distribute.DistributedDataset`. + + Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input) + on distributed input for more examples and caveats. + """ + + def get_next(self): + """Returns the next input from the iterator for all replicas. + + Example use: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.range(100).batch(2) + >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) + >>> dist_dataset_iterator = iter(dist_dataset) + >>> @tf.function + ... def one_step(input): + ... return input + >>> step_num = 5 + >>> for _ in range(step_num): + ... strategy.run(one_step, args=(dist_dataset_iterator.get_next(),)) + >>> strategy.experimental_local_results(dist_dataset_iterator.get_next()) + (, + ) + + Returns: + A single `tf.Tensor` or a `tf.distribute.DistributedValues` which contains + the next input for all replicas. + + Raises: + `tf.errors.OutOfRangeError`: If the end of the iterator has been reached. + """ + raise NotImplementedError( + "DistributedIterator.get_next() must be implemented in descendants.") + + @property + def element_spec(self): + # pylint: disable=line-too-long + """The type specification of an element of `tf.distribute.DistributedIterator`. + + Example usage: + + >>> global_batch_size = 16 + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size) + >>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> distributed_iterator.element_spec + (PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), + TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)), + PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None), + TensorSpec(shape=(None, 1), dtype=tf.int32, name=None))) + + Returns: + A nested structure of `tf.TypeSpec` objects matching the structure of an + element of this `tf.distribute.DistributedIterator`. This returned value + is typically a `tf.distribute.DistributedValues` object and specifies the + `tf.TensorSpec` of individual components. + """ + raise NotImplementedError( + "DistributedIterator.element_spec() must be implemented in descendants") + + def get_next_as_optional(self): + # pylint: disable=line-too-long + """Returns a `tf.experimental.Optional` that contains the next value for all replicas. + + If the `tf.distribute.DistributedIterator` has reached the end of the + sequence, the returned `tf.experimental.Optional` will have no value. + + Example usage: + + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> global_batch_size = 2 + >>> steps_per_loop = 2 + >>> dataset = tf.data.Dataset.range(10).batch(global_batch_size) + >>> distributed_iterator = iter( + ... strategy.experimental_distribute_dataset(dataset)) + >>> def step_fn(x): + ... # train the model with inputs + ... return x + >>> @tf.function + ... def train_fn(distributed_iterator): + ... for _ in tf.range(steps_per_loop): + ... optional_data = distributed_iterator.get_next_as_optional() + ... if not optional_data.has_value(): + ... break + ... per_replica_results = strategy.run(step_fn, args=(optional_data.get_value(),)) + ... tf.print(strategy.experimental_local_results(per_replica_results)) + >>> train_fn(distributed_iterator) + ... # ([0 1], [2 3]) + ... # ([4], []) + + Returns: + An `tf.experimental.Optional` object representing the next value from the + `tf.distribute.DistributedIterator` (if it has one) or no value. + """ + # pylint: enable=line-too-long + raise NotImplementedError( + "get_next_as_optional() not implemented in descendants") + + +@tf_export("distribute.DistributedDataset", v1=[]) +class DistributedDatasetInterface(Iterable): + # pylint: disable=line-too-long + """Represents a dataset distributed among devices and machines. + + A `tf.distribute.DistributedDataset` could be thought of as a "distributed" + dataset. When you use `tf.distribute` API to scale training to multiple + devices or machines, you also need to distribute the input data, which leads + to a `tf.distribute.DistributedDataset` instance, instead of a + `tf.data.Dataset` instance in the non-distributed case. In TF 2.x, + `tf.distribute.DistributedDataset` objects are Python iterables. + + Note: `tf.distribute.DistributedDataset` instances are *not* of type + `tf.data.Dataset`. It only supports two usages we will mention below: + iteration and `element_spec`. We don't support any other APIs to transform or + inspect the dataset. + + There are two APIs to create a `tf.distribute.DistributedDataset` object: + `tf.distribute.Strategy.experimental_distribute_dataset(dataset)`and + `tf.distribute.Strategy.distribute_datasets_from_function(dataset_fn)`. + *When to use which?* When you have a `tf.data.Dataset` instance, and the + regular batch splitting (i.e. re-batch the input `tf.data.Dataset` instance + with a new batch size that is equal to the global batch size divided by the + number of replicas in sync) and autosharding (i.e. the + `tf.data.experimental.AutoShardPolicy` options) work for you, use the former + API. Otherwise, if you are *not* using a canonical `tf.data.Dataset` instance, + or you would like to customize the batch splitting or sharding, you can wrap + these logic in a `dataset_fn` and use the latter API. Both API handles + prefetch to device for the user. For more details and examples, follow the + links to the APIs. + + + There are two main usages of a `DistributedDataset` object: + + 1. Iterate over it to generate the input for a single device or multiple + devices, which is a `tf.distribute.DistributedValues` instance. To do this, + you can: + + * use a pythonic for-loop construct: + + >>> global_batch_size = 4 + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(4).batch(global_batch_size) + >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) + >>> @tf.function + ... def train_step(input): + ... features, labels = input + ... return labels - 0.3 * features + >>> for x in dist_dataset: + ... # train_step trains the model using the dataset elements + ... loss = strategy.run(train_step, args=(x,)) + ... print("Loss is", loss) + Loss is PerReplica:{ + 0: tf.Tensor( + [[0.7] + [0.7]], shape=(2, 1), dtype=float32), + 1: tf.Tensor( + [[0.7] + [0.7]], shape=(2, 1), dtype=float32) + } + + Placing the loop inside a `tf.function` will give a performance boost. + However `break` and `return` are currently not supported if the loop is + placed inside a `tf.function`. We also don't support placing the loop + inside a `tf.function` when using + `tf.distribute.experimental.MultiWorkerMirroredStrategy` or + `tf.distribute.experimental.TPUStrategy` with multiple workers. + + * use `__iter__` to create an explicit iterator, which is of type + `tf.distribute.DistributedIterator` + + >>> global_batch_size = 4 + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> train_dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(50).batch(global_batch_size) + >>> train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset) + >>> @tf.function + ... def distributed_train_step(dataset_inputs): + ... def train_step(input): + ... loss = tf.constant(0.1) + ... return loss + ... per_replica_losses = strategy.run(train_step, args=(dataset_inputs,)) + ... return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses,axis=None) + >>> EPOCHS = 2 + >>> STEPS = 3 + >>> for epoch in range(EPOCHS): + ... total_loss = 0.0 + ... num_batches = 0 + ... dist_dataset_iterator = iter(train_dist_dataset) + ... for _ in range(STEPS): + ... total_loss += distributed_train_step(next(dist_dataset_iterator)) + ... num_batches += 1 + ... average_train_loss = total_loss / num_batches + ... template = ("Epoch {}, Loss: {:.4f}") + ... print (template.format(epoch+1, average_train_loss)) + Epoch 1, Loss: 0.2000 + Epoch 2, Loss: 0.2000 + + + To achieve a performance improvement, you can also wrap the `strategy.run` + call with a `tf.range` inside a `tf.function`. This runs multiple steps in a + `tf.function`. Autograph will convert it to a `tf.while_loop` on the worker. + However, it is less flexible comparing with running a single step inside + `tf.function`. For example, you cannot run things eagerly or arbitrary + python code within the steps. + + + 2. Inspect the `tf.TypeSpec` of the data generated by `DistributedDataset`. + + `tf.distribute.DistributedDataset` generates + `tf.distribute.DistributedValues` as input to the devices. If you pass the + input to a `tf.function` and would like to specify the shape and type of + each Tensor argument to the function, you can pass a `tf.TypeSpec` object to + the `input_signature` argument of the `tf.function`. To get the + `tf.TypeSpec` of the input, you can use the `element_spec` property of the + `tf.distribute.DistributedDataset` or `tf.distribute.DistributedIterator` + object. + + For example: + + >>> global_batch_size = 4 + >>> epochs = 1 + >>> steps_per_epoch = 1 + >>> mirrored_strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensors(([2.])).repeat(100).batch(global_batch_size) + >>> dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset) + >>> @tf.function(input_signature=[dist_dataset.element_spec]) + ... def train_step(per_replica_inputs): + ... def step_fn(inputs): + ... return tf.square(inputs) + ... return mirrored_strategy.run(step_fn, args=(per_replica_inputs,)) + >>> for _ in range(epochs): + ... iterator = iter(dist_dataset) + ... for _ in range(steps_per_epoch): + ... output = train_step(next(iterator)) + ... print(output) + PerReplica:{ + 0: tf.Tensor( + [[4.] + [4.]], shape=(2, 1), dtype=float32), + 1: tf.Tensor( + [[4.] + [4.]], shape=(2, 1), dtype=float32) + } + + + Visit the [tutorial](https://www.tensorflow.org/tutorials/distribute/input) + on distributed input for more examples and caveats. + """ + + def __iter__(self): + """Creates an iterator for the `tf.distribute.DistributedDataset`. + + The returned iterator implements the Python Iterator protocol. + + Example usage: + + >>> global_batch_size = 4 + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4]).repeat().batch(global_batch_size) + >>> distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset)) + >>> print(next(distributed_iterator)) + PerReplica:{ + 0: tf.Tensor([1 2], shape=(2,), dtype=int32), + 1: tf.Tensor([3 4], shape=(2,), dtype=int32) + } + + Returns: + An `tf.distribute.DistributedIterator` instance for the given + `tf.distribute.DistributedDataset` object to enumerate over the + distributed data. + """ + raise NotImplementedError("Must be implemented in descendants") + + @property + def element_spec(self): + """The type specification of an element of this `tf.distribute.DistributedDataset`. + + Example usage: + + >>> global_batch_size = 16 + >>> strategy = tf.distribute.MirroredStrategy(["GPU:0", "GPU:1"]) + >>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size) + >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) + >>> dist_dataset.element_spec + (PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), + TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)), + PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None), + TensorSpec(shape=(None, 1), dtype=tf.int32, name=None))) + + Returns: + A nested structure of `tf.TypeSpec` objects matching the structure of an + element of this `tf.distribute.DistributedDataset`. This returned value is + typically a `tf.distribute.DistributedValues` object and specifies the + `tf.TensorSpec` of individual components. + """ + raise NotImplementedError( + "DistributedDataset.element_spec must be implemented in descendants.") + + @doc_controls.do_not_generate_docs + def reduce(self, initial_state, reduce_func): + raise NotImplementedError( + "DistributedDataset.reduce must be implemented in descendants.") diff --git a/videochat2/lib/python3.10/site-packages/tensorflow/python/types/internal.py b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/internal.py new file mode 100644 index 0000000000000000000000000000000000000000..b600078c7439cc4c4ad525131081b29e8d1dd3ec --- /dev/null +++ b/videochat2/lib/python3.10/site-packages/tensorflow/python/types/internal.py @@ -0,0 +1,61 @@ +# Copyright 2020 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Types internal to TensorFlow. + +These types should not be exported. External code should not rely on these. +""" + + +# TODO(mdan): Is this strictly needed? Only ops.py really uses it. +class NativeObject(object): + """Types natively supported by various TF operations. + + The most notable example of NativeObject is Tensor. + """ + + +class TypeSpec(object): + """Interface for internal isinstance checks to framework/type_spec.py. + + This helps to avoid circular dependencies. + """ + + +class TensorSpec(object): + """Interface for internal isinstance checks to framework/tensor_spec.py. + + This helps to avoid circular dependencies. + """ + + +class IndexedSlices(object): + """Interface for internal isinstance checks to framework/indexed_slices.py. + + This helps to avoid circular dependencies. + """ + + +class RaggedTensor(object): + """Interface for internal isinstance checks to ops/ragged/ragged_tensor.py. + + This helps to avoid circular dependencies. + """ + + +class RaggedTensorSpec(object): + """Interface for internal isinstance checks to ops/ragged/ragged_tensor.py. + + This helps to avoid circular dependencies. + """