identifier
stringlengths
24
117
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
emu3/modeling_emu3.py:Emu3VQVAEEncoderConvDownsample
[ -0.000030870887712808326, 0.01970171369612217, 0.037399861961603165, 0.006177655886858702, -0.00008826464909361675, 0.01385798491537571, 0.023931460455060005, -0.056099794805049896, 0.015249349176883698, 0.022484440356492996, 0.024376695975661278, -0.0022400959860533476, 0.002782727824524045...
[ "Conv2d", "F", "ModelVQVAEEncoderConvDownsample", "Module", "__init__", "class", "constant", "conv", "def", "forward", "hidden_states", "in_channels", "kernel_size", "mode", "nn", "pad", "padding", "return", "self", "stride", "super", "value" ]
emu3/modeling_emu3.py:Emu3VQVAEEncoderConvUpsample
[ -0.00021403432765509933, 0.01183026097714901, 0.02377372980117798, 0.012226490303874016, -0.000955194525886327, 0.029207725077867508, 0.02773601934313774, -0.04211346432566643, 0.004103798884898424, 0.009792512282729149, 0.03124547377228737, -0.015169904567301273, -0.00018661670037545264, ...
[ "Conv2d", "F", "ModelVQVAEEncoderConvUpsample", "Module", "__init__", "class", "conv", "def", "forward", "hidden_states", "in_channels", "interpolate", "kernel_size", "mode", "nearest", "nn", "padding", "return", "scale_factor", "self", "stride", "super" ]
emu3/modeling_emu3.py:Emu3VQVAEConv3d
[ -0.00004136830466450192, 0.014082827605307102, 0.0005527509492821991, -0.01735004410147667, -0.0001769155205693096, 0.029742931947112083, 0.025799740105867386, -0.048219598829746246, 0.023321161046624184, 0.01442081481218338, 0.026813702657818794, -0.001844850368797779, 0.003478458384051919,...
[ "Conv3d", "F", "ModelVQVAEConv3d", "Module", "__init__", "class", "conv", "def", "for", "forward", "hidden_states", "in", "in_channel", "kernel_size", "nn", "one_kernel", "one_stride", "out_channel", "pad", "pad_size", "padding", "padding_sizes", "return", "self", "st...
emu3/modeling_emu3.py:Emu3VQVAESpatialNorm
[ -0.00009896596748149022, 0.025671597570180893, -0.0024662669748067856, 0.018609104678034782, -0.0001874222798505798, 0.01793648675084114, 0.01793648675084114, -0.03026782162487507, 0.010817944072186947, 0.03542456403374672, 0.050894781947135925, 0.016927560791373253, 0.00014801105135120451, ...
[ "Conv2d", "F", "GroupNorm", "ModelVQVAESpatialNorm", "Module", "True", "__init__", "affine", "class", "conv_b", "conv_y", "def", "eps", "forward", "hidden_states", "in_channels", "interpolate", "kernel_size", "mode", "nearest", "nn", "norm_layer", "num_channels", "num_g...
emu3/modeling_emu3.py:Emu3VQVAETemporalUpsample
[ -0.00042080366984009743, 0.024238290265202522, 0.02704852819442749, 0.005152100697159767, -0.0018515361007303, 0.02658015489578247, 0.02985876426100731, -0.048710767179727554, 0.0026053234469145536, 0.0063815792091190815, 0.02704852819442749, -0.00427390169352293, -0.0016466230154037476, 0...
[ "F", "ModelVQVAEConv3d", "ModelVQVAETemporalUpsample", "Module", "__init__", "batch_size", "channels", "class", "contiguous", "conv", "def", "forward", "height", "hidden_states", "in_channel", "interpolate", "kernel_size", "mode", "nearest", "nn", "out_channel", "permute", ...
emu3/modeling_emu3.py:Emu3VQVAETemporalDownsample
[ -0.0001377799198962748, 0.03097575157880783, 0.027018994092941284, 0.0004398358869366348, -0.0005405212286859751, 0.023514438420534134, 0.03708046302199364, -0.05268138647079468, 0.017861928790807724, 0.013566022738814354, 0.0212534349411726, 0.0006288416916504502, 0.0019642470870167017, -...
[ "ModelVQVAEConv3d", "ModelVQVAETemporalDownsample", "Module", "__init__", "class", "conv", "def", "forward", "hidden_states", "in_channel", "kernel_size", "nn", "out_channel", "return", "self", "stride", "super" ]
emu3/modeling_emu3.py:Emu3VQVAETemporalResnetBlock
[ -0.00014785994426347315, 0.01835942268371582, 0.013202918693423271, -0.02017270028591156, -0.0005808150744996965, 0.044651929289102554, 0.01665947586297989, -0.03513222932815552, 0.008443067781627178, 0.006516461726278067, 0.024252571165561676, -0.014619540423154831, 0.002209930680692196, ...
[ "BatchNorm3d", "Conv3d", "ModelVQVAEConv3d", "ModelVQVAETemporalResnetBlock", "Module", "None", "__init__", "class", "conv1", "conv2", "def", "else", "forward", "hidden_states", "if", "in_channels", "is", "kernel_size", "nin_shortcut", "nn", "norm1", "norm2", "out_channel...
emu3/modeling_emu3.py:Emu3VQVAEResnetBlock
[ -0.00010209561150986701, 0.001957419328391552, -0.010702436789870262, -0.003098073648288846, 0, 0.031544022262096405, 0.011096736416220665, -0.02602382004261017, 0.004787932150065899, 0.02253144606947899, 0.035374369472265244, -0.004928753711283207, 0.0021123229525983334, -0.00259111612103...
[ "Conv2d", "GroupNorm", "ModelVQVAEResnetBlock", "ModelVQVAESpatialNorm", "Module", "None", "True", "__init__", "affine", "class", "conv1", "conv2", "def", "else", "eps", "forward", "hidden_states", "if", "in_channels", "is", "kernel_size", "nin_shortcut", "nn", "norm1",...
emu3/modeling_emu3.py:Emu3VQVAEAttentionBlock
[ -0.00019631002214737236, 0.014667289331555367, 0.03592917323112488, -0.003169385250657797, -0.0006537745357491076, 0.023876983672380447, 0.03365517407655716, -0.021375585347414017, -0.004832246340811253, 0.020238585770130157, 0.019670085981488228, 0.01853308640420437, -0.0036952474620193243,...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVQVAEAttentionBlock", "Module", "None", "ValueError", "__init__", "_attn_implementation", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "batch_size", "be", "by", "class", "...
emu3/modeling_emu3.py:Emu3VQVAEGroupNorm
[ -0.00010334557009628043, 0.019785817712545395, -0.021594807505607605, 0.03798877075314522, -0.0001766590867191553, 0.023403797298669815, 0.015150283463299274, -0.021029498428106308, 0.01040168758481741, 0.025778094306588173, 0.04884270578622818, 0.008196981623768806, -0.0006854372913949192, ...
[ "F", "GroupNorm", "ModelVQVAEGroupNorm", "None", "__init__", "bias", "class", "def", "eps", "forward", "group_norm", "input", "kwargs", "nn", "num_groups", "quant_states", "return", "self", "super", "weight" ]
emu3/modeling_emu3.py:Emu3VQVAEMiddleBlock
[ -0.00011152650404255837, 0.0062349047511816025, 0.01794298365712166, -0.008125124499201775, -0.00019131424778606743, 0.036788757890462875, 0.026406655088067055, -0.037240155041217804, 0.005614235531538725, 0.01348545029759407, 0.03656306117773056, 0.025616712868213654, 0.0008957386016845703,...
[ "ModelVQVAEAttentionBlock", "ModelVQVAEGroupNorm", "ModelVQVAEMiddleBlock", "ModelVQVAEResnetBlock", "ModelVQVAESpatialNorm", "Module", "None", "True", "__init__", "affine", "attn_1", "attn_norm", "batch_size", "block_1", "block_2", "channels", "class", "config", "def", "else",...
emu3/modeling_emu3.py:Emu3VQVAEDownBlock
[ -0.0001385996729368344, 0.015028796158730984, 0.012203834019601345, 0.011864839121699333, -0.00029662097222171724, 0.028362615033984184, 0.03186556696891785, -0.03525552153587341, 0.0009322373080067337, -0.002457716502249241, 0.0203397236764431, 0.009209374897181988, -0.0010240485426038504, ...
[ "GroupNorm", "ModelVQVAEDownBlock", "ModelVQVAEEncoderConvDownsample", "ModelVQVAEResnetBlock", "Module", "ModuleList", "None", "True", "__init__", "affine", "and", "append", "attn", "attn_norms", "attn_resolutions", "base_channels", "batch_size", "block", "block_in", "block_ou...
emu3/modeling_emu3.py:Emu3VQVAEUpBlock
[ -0.0004254016967024654, 0.0034032135736197233, 0.015138433314859867, 0.018658999353647232, -0.0017162758158519864, 0.02945540100336075, 0.023939847946166992, -0.035440362989902496, -0.010679049417376518, -0.00586760975420475, 0.027460413053631783, 0.0014082263223826885, -0.000601430016104131...
[ "ModelVQVAEEncoderConvUpsample", "ModelVQVAEResnetBlock", "ModelVQVAESpatialNorm", "ModelVQVAEUpBlock", "Module", "ModuleList", "__init__", "append", "attn", "attn_norms", "attn_resolutions", "base_channels", "batch_size", "block", "block_in", "block_out", "blocks", "channel_multip...
emu3/modeling_emu3.py:Emu3VQVAEEncoder
[ -0.00012637476902455091, 0.014827972277998924, 0.004774157889187336, 0.01550197135657072, -0.00029487445135600865, 0.0354972667992115, 0.020444627851247787, -0.04740457981824875, 0.005083073861896992, 0.032351940870285034, 0.04021525755524635, -0.004774157889187336, -0.0010390813695266843, ...
[ "Conv2d", "GroupNorm", "ModelVQVAEDownBlock", "ModelVQVAEEncoder", "ModelVQVAEMiddleBlock", "ModelVQVAETemporalDownsample", "ModelVQVAETemporalResnetBlock", "Module", "ModuleList", "True", "_", "__init__", "affine", "append", "base_channels", "block_in", "channel_multiplier", "clas...
emu3/modeling_emu3.py:Emu3VQVAEDecoder
[ -0.00036834340426139534, 0.033280909061431885, -0.003177864709869027, 0.002368953777477145, -0.0015167081728577614, 0.021725038066506386, 0.03304979205131531, -0.05500594899058342, -0.003235644195228815, 0.00912913866341114, 0.026231829077005386, -0.011151416227221489, -0.00345231662504375, ...
[ "Conv2d", "ModelVQVAEDecoder", "ModelVQVAEMiddleBlock", "ModelVQVAESpatialNorm", "ModelVQVAETemporalResnetBlock", "ModelVQVAETemporalUpsample", "ModelVQVAEUpBlock", "Module", "ModuleList", "_", "__init__", "append", "base_channels", "block_in", "cat", "channel_multiplier", "chunk", ...
emu3/modeling_emu3.py:Emu3VQVAE
[ -0.0000806528769317083, 0.04623163118958473, -0.013353311456739902, -0.003254168201237917, -0.0004944371758028865, 0.024125730618834496, 0.04106984660029411, -0.05835060402750969, 0.004881252534687519, 0.015709778293967247, 0.031868405640125275, 0.010772419162094593, -0.001585004385560751, ...
[ "BatchNorm2d", "BatchNorm3d", "Conv2d", "Conv3d", "Embedding", "False", "GroupNorm", "Linear", "ModelVQVAE", "ModelVQVAEAttentionBlock", "ModelVQVAEConfig", "ModelVQVAEConv3d", "ModelVQVAEDecoder", "ModelVQVAEEncoder", "ModelVQVAEModelOutput", "ModelVQVAEResnetBlock", "ModelVQVAETemp...
emu3/modeling_emu3.py:Emu3ImageVocabularyMapping
[ -0.00010021620255429298, 0.008439258672297001, 0.012265056371688843, -0.022392166778445244, -0.00021449783525895327, 0.015753284096717834, 0.039383210241794586, -0.0418587252497673, 0.003516357857733965, 0.007989165373146534, 0.04163367673754692, 0.033982083201408386, -0.0005450354656204581,...
[ "ModelImageVocabularyMapping", "bpe2img", "bpe2img_mapping_tensor", "cached_property", "cat", "class", "convert_bpe2img", "convert_img2bpe", "cpu", "def", "device", "dim", "dtype", "eol_row", "eol_token_id", "extra_200", "for", "get", "if", "image", "image_token_id", "image...
emu3/modeling_emu3.py:Emu3PreTrainedModel
[ -0.0003333537024445832, 0.0326976515352726, 0.017856163904070854, 0.014957436360418797, -0.0016740154242143035, 0.03385714069008827, 0.028059687465429306, -0.037567514926195145, -0.0011594911338761449, 0.012580479495227337, 0.012116682715713978, 0.005710494238883257, -0.0042901174165308475, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
emu3/modeling_emu3.py:Emu3RotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
emu3/modeling_emu3.py:Emu3TextModel
[ -0.00012416877143550664, 0.05162602663040161, 0.004057941026985645, -0.00498788570985198, -0.0007714314851909876, 0.03133632242679596, 0.04328470304608345, -0.026489337906241417, 0.009750330820679665, 0.011835661716759205, 0.024798529222607613, 0.010257572866976261, -0.0021276010666042566, ...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModelTextModel", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cac...
emu3/modeling_emu3.py:Emu3ForCausalLM
[ -0.0002878968371078372, 0.03798816353082657, 0.013250363059341908, -0.0042082699947059155, -0.00113026169128716, 0.026273252442479134, 0.040035430341959, -0.01626439392566681, 0.0030993339605629444, 0.021610034629702568, 0.028434256091713905, 0.005260337144136429, -0.00021769976592622697, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tu...
emu3/modeling_emu3.py:Emu3Model
[ -0.000022677722881780937, 0.03840559348464012, -0.0075918035581707954, -0.0046890550293028355, -0.00019363286264706403, 0.022217189893126488, 0.021658970043063164, -0.024450073018670082, 0.0034191026352345943, 0.014513742178678513, 0.040415190160274506, 0.019761018455028534, -0.0025957268662...
[ "Image", "ModelImageVocabularyMapping", "ModelModel", "ModelPreTrainedModel", "ModelTextModel", "ModelVQVAE", "None", "Tokenizes", "True", "VQGAN", "ValueError", "You", "__init__", "_checkpoint_conversion_mapping", "_from_config", "all", "and", "at", "attention_mask", "auto_doc...
emu3/modeling_emu3.py:Emu3ForConditionalGeneration
[ -0.0001545378618175164, 0.04001125693321228, 0.0041584731079638, 0.007754990831017494, -0.000811323756352067, 0.02056758478283882, 0.022028669714927673, -0.039561692625284195, -0.0033436373341828585, 0.012924985028803349, 0.03551561012864113, 0.009216075763106346, 0, 0.0083169462159276, ...
[ "CausalLMOutputWithPast", "False", "GenerationMixin", "Linear", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_checkpoint_conversion_mapping", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "cache_p...
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLBaseModelOutputWithPast
[ -0.00023066798166837543, 0.013790011405944824, 0.01911146752536297, 0.021514706313610077, -0.001316059147939086, 0.027007821947336197, 0.040282852947711945, -0.04440269246697426, 0.015907149761915207, -0.006065316032618284, 0.009155194275081158, 0.026893382892012596, -0.0026607282925397158, ...
[ "ModelBaseModelOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "last_hidden_state", "past_key_values", "r" ]
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLCausalLMOutputWithPast
[ -0.00020158913685008883, 0.02637176588177681, 0.02978190779685974, 0.009434727020561695, -0.0011793408775702119, 0.03387407958507538, 0.03773890808224678, -0.030463937669992447, 0.01784641109406948, -0.004205842036753893, 0.020006166771054268, 0.018642110750079155, -0.0008596400148235261, ...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "image_hidden_states", "logits", "loss", "past_key_values", "r" ]
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLAligner
[ -0.00007678051042603329, 0.027181178331375122, 0.03369567543268204, 0.03773915767669678, -0.000186905701411888, 0.0451522059738636, 0.031449295580387115, -0.04582611843943596, 0.006149460561573505, 0.024822480976581573, 0.025608712807297707, 0.02347465418279171, -0.0002316577738383785, 0.0...
[ "GELU", "Linear", "ModelAligner", "Module", "__init__", "activation", "class", "config", "def", "forward", "hidden_size", "in_features", "linear1", "linear2", "nn", "out_features", "return", "self", "super", "text_config", "vision_config", "vision_encodings", "x" ]
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLPreTrainedModel
[ -0.0003104745992459357, 0.03419552743434906, 0.019523797556757927, 0.01975484937429428, -0.0016245763981714845, 0.03211607038974762, 0.021949833258986473, -0.03304027393460274, 0.0011552543146535754, 0.020794577896595, 0.020794577896595, -0.005227525718510151, -0.004592136014252901, 0.0229...
[ "LlamaDecoderLayer", "ModelConfig", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_no_split_modules", "_skip_keys_device_placement", "_supports_flash_attn", "_supports_sdpa", "base_model_prefix", "causal_mask", "class", "config", "image", "input_modaliti...
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLModel
[ -0.00008953322685556486, 0.04139043018221855, -0.0011960944393649697, 0.018914980813860893, -0.0003372708160895854, 0.026703502982854843, 0.02347682975232601, -0.045618485659360886, 0.005173803772777319, 0.03360190615057945, 0.0389426089823246, 0.02013889141380787, 0.0009805192239582539, 0...
[ "AutoModel", "False", "Image", "ModelAligner", "ModelBaseModelOutputWithPast", "ModelModel", "ModelPreTrainedModel", "None", "True", "ValueError", "You", "__init__", "aligner", "all", "and", "at", "attention_mask", "attentions", "auto_docstring", "both", "cache_position", "...
deepseek_vl/modeling_deepseek_vl.py:DeepseekVLForConditionalGeneration
[ -0.00025467824889346957, 0.03644728660583496, 0.01160200871527195, 0.01822364330291748, -0.0006968280067667365, 0.026146966964006424, 0.02841077372431755, -0.04165404289960861, -0.0030278414487838745, 0.015393884852528572, 0.03825833275914192, 0.0011531264754012227, -0.0005235052667558193, ...
[ "False", "GenerationMixin", "Linear", "ModelCausalLMOutputWithPast", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "True", "__init__", "_can_compile_fullgraph", "_tied_weights_keys", "attention_mask", "attentions", "auto_docstring", "cache_position", ...
instructblip/modeling_instructblip.py:BaseModelOutputWithVisionQformerOutputs
[ -0.00013256042439024895, 0.03642406687140465, 0.007578921038657427, 0.04207997769117355, -0.00032521490356884897, 0.03438793867826462, 0.08099264651536942, -0.005571072455495596, 0.01481848768889904, 0.04185374081134796, 0.028053319081664085, 0.022171171382069588, 0.0017391926376149058, 0....
[ "ModelModelOutputWithPooling", "ModelModelOutputWithVisionQformerOutputs", "None", "class", "qformer_outputs", "r", "vision_outputs" ]
instructblip/modeling_instructblip.py:InstructBlipForConditionalGenerationModelOutput
[ -0.0002488101308699697, 0.017288750037550926, 0.004805590026080608, 0.020701002329587936, -0.0008317367173731327, 0.042084455490112305, 0.06551525741815567, -0.018312424421310425, 0.012966562062501907, 0.004919331520795822, 0.025023190304636955, 0.009213083423674107, -0.0019051746930927038, ...
[ "ModelForConditionalGenerationModelOutput", "ModelOutput", "None", "class", "def", "else", "for", "getattr", "if", "in", "k", "keys", "language_model_outputs", "logits", "loss", "not", "qformer_outputs", "r", "return", "self", "to_tuple", "tuple", "vision_outputs" ]
instructblip/modeling_instructblip.py:InstructBlipVisionEmbeddings
[ -0.00016194451018236578, 0.021517496556043625, 0.01520870253443718, 0.02647440694272518, -0.0006407370092347264, 0.014025802724063396, 0.0342477448284626, -0.013631503097712994, 0.005576523952186108, 0.017687156796455383, 0.023883294314146042, 0.0317692905664444, -0.00001628246536711231, 0...
[ "Conv2d", "False", "ModelVisionEmbeddings", "Module", "Parameter", "_", "__init__", "align_corners", "and", "batch_size", "bicubic", "cat", "class", "class_embedding", "class_embeds", "class_pos_embed", "config", "def", "dim", "dtype", "else", "embed_dim", "embeddings", ...
instructblip/modeling_instructblip.py:eager_attention_forward
[ 0.00004113081013201736, 0.027286089956760406, 0.026380328461527824, -0.009284064173698425, 0.00017513764032628387, 0.03917422145605087, 0.060686077922582626, -0.02343660034239292, 0.0228704996407032, 0.009397284127771854, 0.026719989255070686, 0.027172870934009552, 0.0026182192377746105, -...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "softmax", "torch", "training", ...
instructblip/modeling_instructblip.py:InstructBlipAttention
[ -0.00011519240797497332, 0.03444164991378784, 0.040069371461868286, -0.008835521526634693, -0.00029369667754508555, 0.022623436525464058, 0.02870137430727482, -0.0202597938477993, -0.0011536827078089118, 0.01834636926651001, 0.021610446274280548, 0.011818213388323784, -0.002096325857564807, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelAttention", "Module", "None", "Parameter", "ValueError", "__init__", "_attn_implementation", "_shape", "and", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "be", "bias", "bsz", ...
instructblip/modeling_instructblip.py:InstructBlipMLP
[ -0.00016017387679312378, 0.027564143761992455, 0.04351034387946129, 0.019363241270184517, -0.0006371361087076366, 0.04783859848976135, 0.03325921669602394, -0.028019750490784645, 0.005125564057379961, -0.009169064462184906, 0.04282693564891815, -0.03348701819777489, 0.0012315592030063272, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
instructblip/modeling_instructblip.py:InstructBlipEncoderLayer
[ -0.00009559374302625656, 0.024923095479607582, 0.034508902579545975, 0.014096773229539394, -0.00018237699987366796, 0.04443303123116493, 0.03044903092086315, -0.016013935208320618, 0.008739999495446682, 0.011728515848517418, 0.0066536772064864635, 0.02255483716726303, -0.0009656290058046579,...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelEncoderLayer", "ModelMLP", "_", "__init__", "auto_docstring", "class", "config", "def", "embed_dim", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "layer_norm1", "layer_norm2", "layer_norm_eps", ...
instructblip/modeling_instructblip.py:InstructBlipPreTrainedModel
[ -0.0002935535158030689, 0.03276343643665314, -0.002992813941091299, 0.02073490247130394, -0.0011598943965509534, 0.033221665769815445, 0.031388744711875916, -0.008133579976856709, -0.006128824781626463, 0.013460502959787846, 0.02062034420669079, -0.004696856252849102, -0.0035512815229594707,...
[ "ModelAttention", "ModelConfig", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "ModelQFormerEmbeddings", "ModelQFormerMultiHeadAttention", "ModelQFormerSelfOutput", "ModelVisionEmbeddings", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_no_...
instructblip/modeling_instructblip.py:InstructBlipEncoder
[ -0.0001200952465296723, 0.01638946868479252, 0.020345548167824745, 0.03865654021501541, -0.0004662521241698414, 0.03797835484147072, 0.013563698157668114, -0.020797671750187874, 0.008364280685782433, 0.009325042366981506, 0.0057928296737372875, -0.007177457213401794, -0.0022606162820011377, ...
[ "BaseModelOutput", "False", "ModelEncoder", "ModelEncoderLayer", "Module", "ModuleList", "_", "__init__", "auto_docstring", "class", "config", "def", "encoder_layer", "for", "forward", "gradient_checkpointing", "hidden_states", "in", "inputs_embeds", "kwargs", "last_hidden_st...
instructblip/modeling_instructblip.py:InstructBlipVisionModel
[ 0.00005651518949889578, 0.038581036031246185, 0.0045983209274709225, 0.0536096952855587, 0.0002908998867496848, 0.020187750458717346, 0.031178859993815422, -0.01783251389861107, 0.00361697212792933, 0.032300401479005814, 0.028038542717695236, 0.012785575352609158, 0.00028739505796693265, -...
[ "BaseModelOutputWithPooling", "False", "LayerNorm", "ModelAttention", "ModelEncoder", "ModelEncoderLayer", "ModelPreTrainedModel", "ModelVisionConfig", "ModelVisionEmbeddings", "ModelVisionModel", "None", "__init__", "_can_record_outputs", "attentions", "auto_docstring", "capture_outpu...
instructblip/modeling_instructblip.py:InstructBlipQFormerMultiHeadAttention
[ -0.0001544210099382326, 0.029140567407011986, 0.0460827574133873, 0.0030495943501591682, -0.0009106427314691246, 0.010052366182208061, 0.03252900391817093, -0.001164775574579835, 0.0019201149698346853, 0.03885408863425255, 0.01558681484311819, 0.03975767269730568, -0.0010518276831135154, -...
[ "Dropout", "False", "Linear", "ModelQFormerMultiHeadAttention", "Module", "None", "Softmax", "The", "ValueError", "__init__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_map", "attention_mask", "attention_probs", "attention_probs_dropout_prob", "...
instructblip/modeling_instructblip.py:InstructBlipQFormerSelfOutput
[ -0.0002137187693733722, 0.054257962852716446, 0.034507155418395996, 0.033372052013874054, -0.0009648382547311485, 0.034507155418395996, 0.039955656975507736, -0.01838868297636509, 0.0018161662155762315, 0.039955656975507736, 0.01782113127410412, 0.011351038701832294, 0.0024972285609692335, ...
[ "Dropout", "LayerNorm", "Linear", "ModelQFormerSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
instructblip/modeling_instructblip.py:InstructBlipQFormerAttention
[ -0.00010520677460590377, 0.045944418758153915, 0.03530703857541084, 0.017087701708078384, -0.000541063433047384, 0.027272425591945648, 0.060655683279037476, -0.015842901542782784, 0.0076385424472391605, 0.02987518720328808, 0.016635047271847725, 0.02681977115571499, 0.0016620901878923178, ...
[ "False", "ModelQFormerAttention", "ModelQFormerMultiHeadAttention", "ModelQFormerSelfOutput", "Module", "None", "_", "__init__", "attention", "attention_mask", "attention_output", "attn_output", "class", "config", "def", "encoder_attention_mask", "encoder_hidden_states", "forward",...
instructblip/modeling_instructblip.py:InstructBlipQFormerIntermediate
[ -0.00026151392376050353, 0.025678517296910286, 0.033015236258506775, 0.022010156884789467, -0.0011177032720297575, 0.02499070018529892, 0.043791044503450394, -0.017080798745155334, -0.004384836181998253, 0.00905626267194748, 0.026366334408521652, -0.013985620811581612, -0.001834179856814444,...
[ "ACT2FN", "Linear", "ModelQFormerIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super"...
instructblip/modeling_instructblip.py:InstructBlipQFormerOutput
[ -0.0003384845913387835, 0.05236193537712097, 0.04332602769136429, 0.03730208799242973, -0.0014987445902079344, 0.034058429300785065, 0.0491182766854763, -0.021547168493270874, -0.002273458056151867, 0.041472505778074265, 0.01598660834133625, 0.010541894473135471, 0.0023893029429018497, -0....
[ "Dropout", "LayerNorm", "Linear", "ModelQFormerOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self"...
instructblip/modeling_instructblip.py:InstructBlipQFormerLayer
[ -0.00014255392306949943, 0.02703244611620903, 0.018246902152895927, 0.017345819622278214, -0.0002015114005189389, 0.019147982820868492, 0.04775732383131981, -0.006983382161706686, -0.0007145295385271311, 0.024892378598451614, 0.022527039051055908, 0.026694541797041893, 0.003350896993651986, ...
[ "False", "GradientCheckpointingLayer", "ModelQFormerAttention", "ModelQFormerIntermediate", "ModelQFormerLayer", "ModelQFormerOutput", "None", "True", "__init__", "apply_chunking_to_forward", "attention", "attention_mask", "attention_output", "cat", "chunk_size_feed_forward", "class", ...
instructblip/modeling_instructblip.py:InstructBlipQFormerEncoder
[ -0.00016571264131926, 0.02121121808886528, 0.014972624368965626, 0.034936122596263885, -0.00066285056527704, 0.02790352702140808, 0.053084757179021835, -0.021664934232831, 0.008053457364439964, 0.03561669588088989, 0.015880055725574493, 0.007429597433656454, 0.0014462193939834833, 0.003473...
[ "BaseModelOutputWithPastAndCrossAttentions", "False", "ModelQFormerEncoder", "ModelQFormerLayer", "Module", "ModuleList", "None", "__init__", "attention_mask", "can_return_tuple", "class", "config", "def", "encoder_attention_mask", "encoder_hidden_states", "for", "forward", "gradie...
instructblip/modeling_instructblip.py:InstructBlipQFormerEmbeddings
[ -0.0002655214339029044, 0.024178488180041313, -0.007413215469568968, 0.024862784892320633, -0.00119039136916399, 0.024406585842370987, 0.05155035853385925, -0.004561978857964277, 0.005132226273417473, 0.024406585842370987, 0.023722289130091667, 0.03991731256246567, 0.0019103286322206259, -...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelQFormerEmbeddings", "Module", "None", "__init__", "arange", "cat", "class", "config", "def", "device", "dim", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forward", "hidden_dropout_prob", "hidden_size", ...
instructblip/modeling_instructblip.py:InstructBlipQFormerModel
[ -0.00019454398716334254, 0.035767797380685806, 0.005489677656441927, 0.029202822595834732, -0.0007852502749301493, 0.023203792050480843, 0.057047370821237564, -0.0014997572870925069, 0.0023062305990606546, 0.035088662058115005, 0.01867622323334217, 0.023882927373051643, 0.002164744073525071,...
[ "BaseModelOutputWithPoolingAndCrossAttentions", "False", "ModelPreTrainedModel", "ModelQFormerEmbeddings", "ModelQFormerEncoder", "ModelQFormerLayer", "ModelQFormerModel", "ModelQFormerMultiHeadAttention", "None", "OutputRecorder", "ValueError", "Wrong", "_", "__init__", "_can_record_out...
instructblip/modeling_instructblip.py:InstructBlipModel
[ -0.00011618296412052587, 0.047856900840997696, -0.015989571809768677, 0.018114060163497925, -0.00019917079771403223, 0.02996646985411644, 0.05076409503817558, -0.036675382405519485, 0.0034942245110869408, 0.039358943700790405, 0.036675382405519485, 0.014535973779857159, 0.0009993482381105423...
[ "AutoModel", "False", "GPU", "Linear", "ModelForConditionalGenerationModelOutput", "ModelModel", "ModelPreTrainedModel", "ModelQFormerModel", "ModelVisionModel", "None", "Parameter", "Please", "The", "True", "__init__", "_hf_hook", "_keep_in_fp32_modules", "_preprocess_accelerate",...
instructblip/modeling_instructblip.py:InstructBlipForConditionalGeneration
[ -0.0002505681477487087, 0.03952624276280403, -0.007905248552560806, 0.02371574379503727, -0.000730529660359025, 0.02224762737751007, 0.05669192224740982, -0.03207272291183472, -0.0018351469188928604, 0.04065556079149246, 0.0413331538438797, 0.0038396918680518866, 0.0000966098959906958, 0.0...
[ "AutoModelForCausalLM", "AutoModelForSeq2SeqLM", "BaseModelOutputWithVisionQformerOutputs", "False", "GPU", "GenerationMixin", "Linear", "ModelConfig", "ModelForConditionalGeneration", "ModelForConditionalGenerationModelOutput", "ModelPreTrainedModel", "ModelQFormerModel", "ModelVisionModel"...
phi/modeling_phi.py:PhiRotaryEmbedding
[ -0.00030098692514002323, 0.04780977591872215, 0.004612714983522892, -0.006150286644697189, -0.0016971309669315815, 0.03643754869699478, 0.03875841200351715, 0.012590681202709675, -0.004351617768406868, 0.02146798186004162, 0.002509433077648282, -0.0033362405374646187, -0.0013707596808671951,...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
phi/modeling_phi.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
phi/modeling_phi.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
phi/modeling_phi.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
phi/modeling_phi.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
phi/modeling_phi.py:PhiAttention
[ -0.00014382587687578052, 0.04020771384239197, 0.02631572261452675, -0.003176522208377719, -0.0005117730470374227, 0.02981695532798767, 0.035238221287727356, -0.006268337368965149, 0.0031200507655739784, 0.017844997346401215, 0.013327276334166527, 0.028687525540590286, 0.000557656167075038, ...
[ "ALL_ATTENTION_FUNCTIONS", "LayerNorm", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_po...
phi/modeling_phi.py:PhiMLP
[ -0.00016017387679312378, 0.027564143761992455, 0.04351034387946129, 0.019363241270184517, -0.0006371361087076366, 0.04783859848976135, 0.03325921669602394, -0.028019750490784645, 0.005125564057379961, -0.009169064462184906, 0.04282693564891815, -0.03348701819777489, 0.0012315592030063272, ...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "activation_fn", "class", "config", "def", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
phi/modeling_phi.py:PhiDecoderLayer
[ -0.00013115294859744608, 0.04326463118195534, 0.01566089503467083, -0.012280846014618874, -0.0006548845558427274, 0.03267380967736244, 0.04822203516960144, -0.03380049392580986, 0.005492580123245716, -0.004816570319235325, -0.009126133285462856, 0.02230832539498806, -0.00188719411380589, 0...
[ "Dropout", "False", "GradientCheckpointingLayer", "LayerNorm", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "None", "Tensor", "__init__", "attention_mask", "attn_outputs", "cache_position", "class", "config", "def", "eps", "feed_forward_hidden_states", "forward", "hidden_...
phi/modeling_phi.py:PhiPreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
phi/modeling_phi.py:PhiModel
[ -0.00013218997628428042, 0.04670007526874542, -0.001988137373700738, -0.007952549494802952, -0.0008424908155575395, 0.04309040680527687, 0.041285574436187744, -0.012239029631018639, 0.012182628735899925, 0.002975155832245946, 0.01037779450416565, -0.004371081944555044, -0.0010998205980286002...
[ "BaseModelOutputWithPast", "Dropout", "DynamicCache", "Embedding", "False", "LayerNorm", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRotaryEmbedding", "ModuleList", "None", "Setting", "True", "ValueError", "You", "__init__", "all_hidden_states", "all_self_att...
phi/modeling_phi.py:PhiForCausalLM
[ -0.00028262686100788414, 0.03526614233851433, 0.008361488580703735, -0.001301150070503354, -0.0012087186332792044, 0.02741658128798008, 0.0389065183699131, -0.007394513580948114, 0.003640376031398773, 0.026734011247754097, 0.025823917239904404, 0.0026734010316431522, 0.0009385344455949962, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
phi/modeling_phi.py:PhiForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
phi/modeling_phi.py:PhiForTokenClassification
[ -0.00016310509818140417, 0.024394849315285683, -0.0069496952928602695, -0.024848707020282745, -0.0007446102099493146, 0.035854753106832504, 0.0356278270483017, -0.007885776460170746, 0.0015317696379497647, -0.018381234258413315, 0.040166404098272324, 0.024394849315285683, -0.0042832815088331...
[ "GenericForTokenClassification", "ModelForTokenClassification", "ModelPreTrainedModel", "class", "pass" ]
evolla/modeling_evolla.py:create_position_ids_from_input_ids
[ 0.000022455322323367, -0.004883487243205309, 0.011218068189918995, -0.022659381851553917, -0.00028777692932635546, 0.023440739139914513, 0.021431533619761467, -0.019199082627892494, 0.01562715880572796, -0.013673764653503895, 0.020092062652111053, 0.013617953285574913, 0.0016045743832364678,...
[ "Model_position_ids_from_input_ids", "cumsum", "def", "dim", "incremental_indices", "input_ids", "int", "long", "mask", "ne", "padding_idx", "return", "torch", "type_as" ]
evolla/modeling_evolla.py:EvollaSaProtEmbeddings
[ -0.00023799062182661146, 0.05153480917215347, 0.0021615989971905947, -0.015460442751646042, -0.0013241582782939076, 0.03573080152273178, 0.015116877853870392, 0.014773312024772167, -0.0016820390010252595, -0.014773312024772167, 0.021072011440992355, 0.035959843546152115, 0.000418720330344513...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelSaProtEmbeddings", "Module", "None", "__init__", "absolute", "and", "arange", "attention_mask", "class", "config", "create_position_ids_from_input_ids", "create_position_ids_from_inputs_embeds", "def", "device", "dropout", "dt...
evolla/modeling_evolla.py:rotate_half_esm
[ -0.00010257137182634324, 0.028295550495386124, 0.03757648915052414, 0.023202350363135338, -0.0002635022974573076, 0.0373501256108284, 0.028069185093045235, -0.016750965267419815, 0.016864147037267685, 0.031691014766693115, -0.015505961142480373, -0.005715701263397932, 0.002093870658427477, ...
[ "Model_half_esm", "cat", "chunk", "def", "dim", "return", "torch", "x", "x1", "x2" ]
evolla/modeling_evolla.py:apply_rotary_pos_emb_esm
[ -0.00009793527715373784, 0.04193766787648201, 0.033732470124959946, 0.03099740669131279, -0.0008369014831259847, 0.043533120304346085, 0.041025977581739426, -0.004273539409041405, 0.01755000278353691, 0.011396105401217937, 0.007920293137431145, 0.030541561543941498, 0.0013247972819954157, ...
[ "Model_rotary_pos_emb_esm", "cos", "def", "return", "rotate_half_esm", "shape", "sin", "x" ]
evolla/modeling_evolla.py:EvollaSaProtRotaryEmbedding
[ -0.00025531387655064464, 0.03912415727972984, 0.010701607912778854, 0.008112508803606033, -0.0012370137264952064, 0.029918473213911057, 0.04855998232960701, 0.012139995582401752, 0.0006976182339712977, 0.02761705219745636, -0.002531563164666295, -0.0025171791203320026, -0.0017620254075154662...
[ "ModelSaProtRotaryEmbedding", "Module", "None", "Tensor", "__init__", "_cos_cached", "_seq_len_cached", "_sin_cached", "_update_cos_sin_tables", "apply_rotary_pos_emb_esm", "arange", "cat", "class", "cos", "def", "device", "dim", "dtype", "emb", "float", "forward", "freqs",...
evolla/modeling_evolla.py:eager_attention_forward
[ 0.000042374336771899834, 0.02700657583773136, 0.026102591305971146, -0.010847830213606358, 0.0003637130430433899, 0.03683742135763168, 0.06101904436945915, -0.026328587904572487, 0.02000068686902523, 0.012542803771793842, 0.02395562455058098, 0.026893578469753265, 0.002500085858628154, -0....
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "size", "softmax", "torch", "tra...
evolla/modeling_evolla.py:EvollaSaProtSelfAttention
[ -0.00008063857239903882, 0.04692723974585533, 0.02335081435739994, -0.008291231468319893, -0.0005887056468054652, 0.03090880811214447, 0.03000636026263237, 0.013536703772842884, -0.0018471961375325918, 0.03136003017425537, 0.0031303628347814083, 0.007275978568941355, -0.0015581310726702213, ...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelSaProtRotaryEmbedding", "ModelSaProtSelfAttention", "Module", "None", "The", "ValueError", "__init__", "_attn_implementation", "a", "absolute", "all_head_size", "and", "attention", "attention_head_size", "attention_interface", "...
evolla/modeling_evolla.py:EvollaSaProtSelfOutput
[ -0.00011477639054646716, 0.04565628245472908, 0.030512861907482147, -0.00009491124365013093, -0.0005897740484215319, 0.048594556748867035, 0.021359004080295563, -0.009040848352015018, 0.0027687596157193184, 0.0027687596157193184, 0.017516642808914185, -0.014691377989947796, 0.002231959253549...
[ "Dropout", "Linear", "ModelSaProtSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "nn", "return", "self", "super" ]
evolla/modeling_evolla.py:EvollaSaProtAttention
[ 0.000021721883967984468, 0.052841734141111374, 0.025178860872983932, 0.009145684540271759, 0, 0.047873709350824356, 0.03477618470788002, 0, 0.011121604591608047, 0.020097924396395683, 0.01383143663406372, 0.024840131402015686, 0.0016371904639527202, -0.020549563691020012, -0.005899531301...
[ "False", "LayerNorm", "ModelSaProtAttention", "ModelSaProtSelfAttention", "ModelSaProtSelfOutput", "Module", "None", "_", "__init__", "attention_mask", "attn_output", "class", "config", "def", "encoder_attention_mask", "encoder_hidden_states", "eps", "forward", "hidden_size", "...
evolla/modeling_evolla.py:gelu
[ 0.00008515508670825511, 0.02475174516439438, 0.00823165848851204, 0.023389263078570366, 0.0005428636795841157, 0.06085750088095665, 0.03837655857205391, 0.020210139453411102, 0.007266567088663578, 0.022026782855391502, 0.036332834511995316, -0.05018473044037819, 0.0009650909341871738, 0.02...
[ "Model", "def", "erf", "math", "return", "sqrt", "torch", "x" ]
evolla/modeling_evolla.py:EvollaSaProtIntermediate
[ -0.00016999398940242827, 0.029619894921779633, 0.02118961699306965, 0.030531276017427444, -0.0004521306254900992, 0.03303757682442665, 0.03531602770090103, -0.0027768651489168406, 0, 0.003161354223266244, 0.02096177265048027, -0.027911055833101273, -0.002890787785872817, 0.0117909964174032...
[ "Linear", "ModelSaProtIntermediate", "Module", "__init__", "class", "config", "def", "dense", "forward", "gelu", "hidden_size", "hidden_states", "intermediate_size", "nn", "return", "self", "super" ]
evolla/modeling_evolla.py:EvollaSaProtOutput
[ -0.0002448614395689219, 0.05193207040429115, 0.031799815595149994, 0.009379801340401173, -0.0010938335908576846, 0.045526351779699326, 0.03157103806734085, -0.00760678993538022, -0.0011653258698061109, 0.0033744405955076218, 0.013955313712358475, -0.009951740503311157, 0.0014727432280778885,...
[ "Dropout", "Linear", "ModelSaProtOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "nn", "return", "self", "super" ]
evolla/modeling_evolla.py:EvollaSaProtLayer
[ -0.00016112165758386254, 0.04170040786266327, 0.017790663987398148, -0.00009516801219433546, -0.0005878284573554993, 0.0387541837990284, 0.022096684202551842, 0.009008647873997688, 0.004164374899119139, 0.014107882976531982, 0.012634770944714546, 0.010878367349505424, 0.0022663266863673925, ...
[ "AttributeError", "GradientCheckpointingLayer", "If", "LayerNorm", "ModelSaProtAttention", "ModelSaProtIntermediate", "ModelSaProtLayer", "ModelSaProtOutput", "None", "True", "__init__", "add_cross_attention", "and", "are", "attention", "attention_mask", "attention_output", "attent...
evolla/modeling_evolla.py:EvollaSaProtEncoder
[ -0.00009243736712960526, 0.03845394402742386, 0.012799019925296307, 0.028442267328500748, -0.0005937323439866304, 0.042777169495821, 0.017292898148298264, -0.003782821586355567, 0.008817102760076523, 0.019795818254351616, 0.018885664641857147, 0.003441514214500785, -0.00016265422163996845, ...
[ "BaseModelOutputWithCrossAttentions", "False", "LayerNorm", "ModelSaProtEncoder", "ModelSaProtLayer", "Module", "ModuleList", "None", "_", "__init__", "attention_mask", "can_return_tuple", "class", "config", "def", "emb_layer_norm_after", "encoder_attention_mask", "encoder_hidden_s...
evolla/modeling_evolla.py:EvollaSaProtPooler
[ -0.00029702219762839377, 0.018322331830859184, 0.029086703434586525, 0.01923844963312149, -0.0010735741816461086, 0.031835053116083145, 0.03298019990324974, -0.0007443447248078883, -0.001603204058483243, 0.002734035486355424, 0.01649009995162487, -0.02530772238969803, -0.001159460050985217, ...
[ "Linear", "ModelSaProtPooler", "Module", "Tanh", "__init__", "activation", "class", "config", "def", "dense", "first_token_tensor", "forward", "hidden_size", "hidden_states", "nn", "pooled_output", "return", "self", "super" ]
evolla/modeling_evolla.py:EvollaSaProtPreTrainedModel
[ -0.00024811062030494213, 0.0517796091735363, -0.0031355430837720633, 0.000978059251792729, -0.0012081909226253629, 0.04533592611551285, 0.03336908295750618, 0.0030923932790756226, -0.0011866160202771425, 0.00949292816221714, -0.0008593976963311434, -0.0028047289233654737, -0.0035958061926066...
[ "ModelSaProtLayer", "ModelSaProtPreTrainedModel", "ModelSaProtRotaryEmbedding", "ModelSaProtSelfAttention", "OutputRecorder", "PreTrainedModel", "SaProtConfig", "True", "_can_record_outputs", "_init_weights", "_no_split_modules", "_supports_attention_backend", "_supports_flash_attn", "_sup...
evolla/modeling_evolla.py:EvollaSaProtProteinEncoder
[ -0.000022610232917941175, 0.04969398304820061, 0.0066070412285625935, -0.00299293315038085, -0.00019323478045407683, 0.03839989751577377, 0.018183480948209763, 0.005167045164853334, 0.006155277602374554, 0.014230550266802311, 0.03207520768046379, 0.006917628459632397, 0.0006458806456066668, ...
[ "BaseModelOutputWithPoolingAndCrossAttentions", "ModelSaProtEmbeddings", "ModelSaProtEncoder", "ModelSaProtPreTrainedModel", "ModelSaProtProteinEncoder", "None", "__init__", "attention_mask", "attentions", "batch_size", "capture_outputs", "class", "config", "create_bidirectional_mask", "...
evolla/modeling_evolla.py:EvollaSequenceCompressorAttention
[ -0.00023181048163678497, 0.022536933422088623, 0.04711238294839859, -0.02751997485756874, -0.0008352255681529641, 0.02819947898387909, 0.008437193930149078, -0.038958318531513214, 0.0014793401351198554, 0.013363609090447426, 0.021177923306822777, 0.04484736546874046, -0.004331847652792931, ...
[ "LayerNorm", "Linear", "ModelSequenceCompressorAttention", "Module", "None", "True", "__init__", "amax", "attn", "bool", "bs", "cat", "chunk", "class", "def", "detach", "device", "dim", "dim_head", "forward", "h", "heads", "inner_dim", "k", "keepdim", "kv_input", ...
evolla/modeling_evolla.py:EvollaFeedForward
[ -0.00003138890679110773, 0.03485848754644394, 0.024785742163658142, 0.02942599542438984, 0.00008974575030151755, 0.03146317973732948, 0.0398382730782032, -0.021956320852041245, 0.008205326274037361, -0.0042724283412098885, 0.02172996662557125, 0.0027020988054573536, 0.0021362141706049442, ...
[ "GELU", "LayerNorm", "Linear", "ModelFeedForward", "Module", "__init__", "activation", "class", "def", "dim", "fc1", "fc2", "forward", "inner_dim", "int", "mult", "nn", "norm", "return", "self", "super", "x" ]
evolla/modeling_evolla.py:EvollaSequenceCompressorResampler
[ -0.0005446056602522731, 0.05809127166867256, 0.029408706352114677, 0.006323476787656546, -0.002314574085175991, 0.04961962625384331, 0.0074732000939548016, -0.025535954162478447, -0.009258296340703964, 0.012465418316423893, 0.006989106070250273, 0.00792703777551651, -0.003993774764239788, ...
[ "LayerNorm", "Linear", "ModelFeedForward", "ModelSequenceCompressorAttention", "ModelSequenceCompressorResampler", "Module", "ModuleList", "None", "Parameter", "True", "_", "__init__", "append", "attn", "b", "bs", "cat", "class", "config", "def", "device", "dim", "dim_hea...
evolla/modeling_evolla.py:EvollaProteinEncoderModelOutput
[ -0.00007506469410145655, 0.02944291941821575, 0.03888263180851936, 0.0019385127816349268, -0.0004003450449090451, 0.04180445149540901, 0.047198571264743805, -0.017193766310811043, 0.010394923388957977, 0.0045231967233121395, 0.010732056573033333, 0.04337773472070694, -0.0045231967233121395, ...
[ "ModelOutput", "ModelProteinEncoderModelOutput", "None", "attentions", "class", "hidden_states", "last_hidden_state", "sequence_compressor_output" ]
evolla/modeling_evolla.py:EvollaProteinEncoder
[ -0.00023408101696986705, 0.05123336613178253, 0.016353506594896317, -0.00027875296655111015, -0.0009577666060067713, 0.04025478661060333, 0.028590047731995583, -0.001465239911340177, 0.0009005864849314094, 0.020356113091111183, 0.021156635135412216, 0.0174971092492342, -0.0012436670949682593...
[ "ModelProteinEncoder", "ModelProteinEncoderModelOutput", "ModelSaProtProteinEncoder", "ModelSequenceCompressorResampler", "Module", "__init__", "attention_mask", "can_return_tuple", "class", "config", "def", "forward", "input_ids", "kwargs", "last_hidden_state", "model", "nn", "pro...
evolla/modeling_evolla.py:EvollaSequenceAlignerCrossAttention
[ -0.0003111136320512742, 0.03821122646331787, 0.014732039533555508, 0.00019691874331329018, -0.0012228743871673942, 0.04051310941576958, 0.0041146124713122845, -0.030384831130504608, -0.005265553016215563, 0.02393956482410431, 0.03890179097652435, 0.03176596015691757, -0.0009279458317905664, ...
[ "Dropout", "Linear", "ModelFeedForward", "ModelRMSNorm", "ModelSequenceAlignerCrossAttention", "Module", "None", "Parameter", "Softmax", "T", "True", "_", "__init__", "aligner_attention_probs_dropout_prob", "aligner_enable_bias", "aligner_ffn_mult", "all_head_size", "amax", "and"...
evolla/modeling_evolla.py:EvollaRMSNorm
[ -0.00009835156379267573, 0.04154934734106064, 0.0313878208398819, 0.048549506813287735, -0.00046573654981330037, 0.03861379250884056, 0.02461347170174122, -0.03003295138478279, 0.008129219524562359, 0.04358164966106415, 0.01964561454951763, 0.004770271480083466, 0.0023992490023374557, 0.01...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", ...
evolla/modeling_evolla.py:EvollaRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
evolla/modeling_evolla.py:EvollaMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
evolla/modeling_evolla.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
evolla/modeling_evolla.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
evolla/modeling_evolla.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
evolla/modeling_evolla.py:EvollaAttention
[ -0.00009043229511007667, 0.03348980098962784, 0.028994524851441383, -0.007192440330982208, -0.0003863127203658223, 0.030792634934186935, 0.04225558787584305, -0.007361013442277908, 0.003174788085743785, 0.009102932177484035, 0.014047735370695591, 0.028320234268903732, -0.0009271505405195057,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...
evolla/modeling_evolla.py:EvollaDecoderLayer
[ -0.0002458144736010581, 0.04560036584734917, 0.008493068628013134, -0.003377277171239257, -0.0008015689090825617, 0.04218033701181412, 0.03579628840088844, -0.03374427184462547, -0.0007053806912153959, -0.001995016122236848, 0.004474536050111055, 0.020862167701125145, -0.003733529942110181, ...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "ModelSequenceAlignerCrossAttention", "None", "Tensor", "_", "__init__", "adapter", "aligner_num_add_layers", "attention_mask", "cache_position", "class", "config", "def", "e...
evolla/modeling_evolla.py:EvollaPreTrainedModel
[ -0.0003567451785784215, 0.03407369181513786, -0.001955760410055518, 0.011126103810966015, -0.0015211469726637006, 0.03523266315460205, 0.03430548682808876, -0.041954681277275085, -0.0025352449156343937, 0.004230237565934658, 0.013559939339756966, 0.004577928222715855, -0.005273309536278248, ...
[ "False", "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelSequenceAlignerCrossAttention", "ModelSequenceCompressorResampler", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_dev...
evolla/modeling_evolla.py:EvollaModel
[ -0.00028458485030569136, 0.05579303950071335, 0.0048415446653962135, -0.012103862129151821, -0.001462550018914044, 0.04933764785528183, 0.02662849612534046, -0.010432376526296139, 0.0016066436655819416, 0.013544797897338867, 0.023054976016283035, 0.0015850295312702656, -0.0009798364480957389...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelProteinEncoder", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "au...
evolla/modeling_evolla.py:EvollaForProteinText2Text
[ -0.00023476459318771958, 0.06966105848550797, 0.016390837728977203, -0.017984390258789062, -0.0010528835700824857, 0.04803425818681717, 0.027887187898159027, -0.009504408575594425, -0.0026891217567026615, 0.034375227987766266, 0.049855463206768036, 0.011724001727998257, 0.0002507712633814662...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForProteinText2Text", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "attention_mask", "attentions", "auto_docstring", "can_return_tuple", "class", "config", "def", "else", "forward", "get_input_embeddings", ...
prompt_depth_anything/modeling_prompt_depth_anything.py:PromptDepthAnythingLayer
[ 0.00008722027996554971, 0.0015386552549898624, 0.009503879584372044, -0.0018106036586686969, 0.0007478580228053033, 0.057481296360492706, 0.01843523606657982, -0.02862614393234253, 0.023931456729769707, -0.0072996667586266994, 0.03480939194560051, -0.038015518337488174, 0.0040076603181660175...
[ "Conv2d", "ModelLayer", "Model_depth", "Module", "ReLU", "__init__", "activation1", "activation2", "class", "config", "convolution1", "convolution2", "convolution3", "def", "forward", "fusion_hidden_size", "hidden_state", "kernel_size", "nn", "padding", "return", "self", ...
prompt_depth_anything/modeling_prompt_depth_anything.py:PromptDepthAnythingPreActResidualLayer
[ 0, 0.017853641882538795, 0.02892741933465004, -0.011808263137936592, 0.00046611562720499933, 0.04384312033653259, -0.008022839203476906, -0.021469568833708763, 0.013672725297510624, -0.014802702702581882, 0.02158256620168686, -0.03322133421897888, 0.0038984217680990696, -0.0126557452604174...
[ "Conv2d", "ModelPreActResidualLayer", "Module", "ReLU", "__init__", "activation1", "activation2", "class", "config", "convolution1", "convolution2", "def", "forward", "fusion_hidden_size", "hidden_state", "kernel_size", "nn", "padding", "residual", "return", "self", "stride...
prompt_depth_anything/modeling_prompt_depth_anything.py:PromptDepthAnythingFeatureFusionLayer
[ -0.0002046592126134783, 0.017276061698794365, 0.031390488147735596, -0.000988009967841208, -0.00038814678555354476, 0.04019789397716522, 0.022808916866779327, -0.05419940501451492, 0.0052505675703287125, -0.0038955823983997107, 0.006210348568856716, -0.011743204668164253, 0.00211716420017182...
[ "Conv2d", "False", "ModelFeatureFusionLayer", "ModelLayer", "ModelPreActResidualLayer", "Model_depth", "Model_depth_layer", "Module", "None", "True", "__init__", "align_corners", "bilinear", "class", "config", "def", "else", "forward", "functional", "fusion_hidden_size", "hid...