identifier
stringlengths
24
117
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
kosmos2/modeling_kosmos2.py:Kosmos2TextModel
[ 0.00007629470201209188, 0.05625055730342865, 0.022544866427779198, 0.006026845425367355, 0.00009286154818255454, 0.022879691794514656, 0.025335073471069336, -0.010658588260412216, 0.004603840410709381, 0.0040178969502449036, 0.02477703057229519, 0.022544866427779198, -0.0017857319908216596, ...
[ "ModelPreTrainedModel", "ModelTextConfig", "ModelTextModel", "ModelTextTransformer", "None", "__init__", "attention_mask", "auto_docstring", "cache_position", "can_return_tuple", "class", "config", "def", "embed_tokens", "encoder_attention_mask", "encoder_hidden_states", "forward", ...
kosmos2/modeling_kosmos2.py:Kosmos2TextForCausalLM
[ -0.00023601941938977689, 0.05474231019616127, 0.026235422119498253, 0.004003457259386778, -0.0010576510103419423, 0.03202765807509422, 0.02123819850385189, -0.02509968914091587, 0.005195976700633764, 0.007950128056108952, 0.02646256983280182, 0.013344857841730118, 0.00022182277461979538, 0...
[ "CausalLMOutputWithCrossAttentions", "False", "GenerationMixin", "Linear", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextForCausalLM", "ModelTextTransformer", "None", "__init__", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "batch_size", "bool...
kosmos2/modeling_kosmos2.py:Kosmos2ImageToTextProjection
[ -0.00017867863061837852, 0.03542741760611534, 0.03766965866088867, 0.014798793941736221, -0.000508007884491235, 0.028924915939569473, 0.03251250460743904, -0.023319313302636147, -0.003587586572393775, 0.027691682800650597, 0.02500099316239357, 0.03430629521608353, -0.0030270260758697987, 0...
[ "False", "Linear", "ModelImageToTextProjection", "ModelTextAttention", "Module", "None", "Parameter", "__init__", "add_inner_attn_layernorm", "attention_dropout", "attention_heads", "attention_mask", "attn_weights", "cat", "class", "config", "def", "dense", "dim", "dropout", ...
kosmos2/modeling_kosmos2.py:Kosmos2Model
[ -0.000021627665773849003, 0.05408698692917824, 0.009459658525884151, 0.0247064009308815, 0, 0.025596722960472107, 0.02849026583135128, -0.02348221093416214, 0.0034778155386447906, 0.02704349346458912, 0.044961199164390564, 0.031161228194832802, -0.0015302388928830624, 0.01969834789633751, ...
[ "False", "FutureWarning", "ModelConfig", "ModelImageToTextProjection", "ModelModel", "ModelModelOutput", "ModelOutput", "ModelPreTrainedModel", "ModelTextModel", "ModelVisionModel", "None", "Please", "True", "__init__", "a", "access", "and", "attention_mask", "attentions", "aut...
kosmos2/modeling_kosmos2.py:Kosmos2ForConditionalGeneration
[ -0.00009770026372279972, 0.044439662247896194, 0.0018563049379736185, 0.026462813839316368, -0.0004169707535766065, 0.019986681640148163, 0.031487397849559784, -0.024452978745102882, -0.0021773201879113913, 0.026574470102787018, 0.03796352818608284, 0.011389059014618397, -0.00092117389431223...
[ "GenerationMixin", "Make", "ModelConfig", "ModelForConditionalGeneration", "ModelForConditionalGenerationModelOutput", "ModelImageToTextProjection", "ModelPreTrainedModel", "ModelTextForCausalLM", "ModelVisionModel", "None", "ValueError", "__init__", "_tied_weights_keys", "allowed", "alo...
lightglue/modeling_lightglue.py:LightGlueKeypointMatchingOutput
[ -0.00008462519326712936, 0.003723508445546031, 0.039266087114810944, -0.024033553898334503, -0.0003208705165889114, 0.050549447536468506, 0.053031787276268005, -0.02267955057322979, 0.016248036175966263, 0.00016660583787597716, 0.017940539866685867, 0.029788067564368248, -0.00218615075573325...
[ "ModelKeypointMatchingOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "keypoints", "loss", "mask", "matches", "matching_scores", "prune", "r" ]
lightglue/modeling_lightglue.py:LightGluePositionalEncoder
[ -0.00022434598940890282, 0.02304113283753395, 0.026105716824531555, -0.002922705141827464, -0.0009151188423857093, 0.02383565343916416, 0.04744430258870125, -0.020884573459625244, 0.014982410706579685, 0.022814124822616577, 0.005164391826838255, 0.020771069452166557, 0.0025112563744187355, ...
[ "False", "Linear", "ModelPositionalEncoder", "Module", "__init__", "class", "config", "cos", "cosines", "def", "descriptor_dim", "dim", "else", "embeddings", "forward", "if", "keypoints", "nn", "num_attention_heads", "output", "output_hidden_states", "projected_keypoints", ...
lightglue/modeling_lightglue.py:rotate_half
[ 0.00004134093614993617, 0.011483984999358654, 0.030173607170581818, -0.007937460206449032, 0.0003535969590302557, 0.03129948675632477, 0.01598750799894333, -0.02792184427380562, 0.018577033653855324, 0.03422677889466286, -0.00529163982719183, 0.0008197819697670639, -0.0004046134417876601, ...
[ "Model_half", "def", "dim", "flatten", "return", "rot_x", "stack", "torch", "x", "x1", "x2" ]
lightglue/modeling_lightglue.py:apply_rotary_pos_emb
[ -0.00014119588013272732, 0.029553454369306564, 0.02216508984565735, -0.000987483188509941, -0.0007423884235322475, 0.02387009747326374, 0.04978620260953903, -0.006393775809556246, 0.012673884630203247, 0.03773748502135277, 0.006308525800704956, 0.002358592813834548, -0.0006713464972563088, ...
[ "Model_rotary_pos_emb", "cos", "def", "float", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
lightglue/modeling_lightglue.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
lightglue/modeling_lightglue.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
lightglue/modeling_lightglue.py:LightGlueAttention
[ -0.0000905400374904275, 0.03465485945343971, 0.02857900783419609, -0.005597659852355719, -0.00048170817899517715, 0.03420479595661163, 0.04388115182518959, -0.0014838017523288727, 0.0027144430205225945, 0.011307835578918457, 0.013783182948827744, 0.02554108202457428, -0.001195480115711689, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "class", "config", "contiguous", "co...
lightglue/modeling_lightglue.py:LightGlueMLP
[ -0.0002443415578454733, 0.04070442542433739, 0.03311546519398689, 0.038864679634571075, -0.0010851639090105891, 0.0515129454433918, 0.024261677637696266, -0.020812150090932846, 0.00385197252035141, 0.0009773662313818932, 0.040244489908218384, -0.01747760735452175, 0.0017678829608485103, -0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelMLP", "Module", "True", "__init__", "activation_fn", "class", "config", "def", "elementwise_affine", "fc1", "fc2", "forward", "hidden_act", "hidden_size", "hidden_states", "intermediate_size", "layer_norm", "nn", "return", "self", ...
lightglue/modeling_lightglue.py:LightGlueTransformerLayer
[ -0.0002727992250584066, 0.036515869200229645, 0.02533288300037384, -0.01643214002251625, -0.0008273126441054046, 0.058881837874650955, 0.02076840028166771, -0.03263605758547783, 0.005705604329705238, -0.0029098582454025745, 0.010155975818634033, 0.023849425837397575, 0.0011411209125071764, ...
[ "False", "ModelAttention", "ModelMLP", "ModelTransformerLayer", "Module", "None", "__init__", "all_attentions", "all_hidden_states", "attention_mask", "attention_output", "batch_size", "cat", "class", "config", "cross_attention", "cross_attention_hidden_states", "cross_attention_ou...
lightglue/modeling_lightglue.py:sigmoid_log_double_softmax
[ -0.0000668563152430579, 0.007424150127917528, 0.02006220817565918, -0.021309010684490204, 0.0001912710431497544, 0.029243217781186104, 0.025729497894644737, -0.024029310792684555, 0.029696600511670113, 0.013488151133060455, 0.04261802136898041, -0.0022102431394159794, -0.00264945812523365, ...
[ "Model_log_double_softmax", "batch_size", "certainties", "contiguous", "def", "functional", "logModel", "log_softmax", "matchability0", "matchability1", "new_full", "nn", "num_keypoints_0", "num_keypoints_1", "return", "scores", "scores0", "scores1", "shape", "similarity", "s...
lightglue/modeling_lightglue.py:LightGlueMatchAssignmentLayer
[ -0.0001407848612871021, 0.004392487462610006, 0.023313971236348152, -0.019822508096694946, 0.00009942930773831904, 0.049556270241737366, 0.019371995702385902, -0.045051153749227524, 0.01171329990029335, 0.011262788437306881, 0.04054603725671768, 0.008334463462233543, -0.0011122003197669983, ...
[ "Linear", "ModelMatchAssignmentLayer", "Module", "None", "__init__", "batch_size", "class", "config", "def", "descriptor_dim", "descriptors", "device", "dtype", "final_projection", "finfo", "forward", "functional", "get_matchability", "if", "is", "m_descriptors", "m_descrip...
lightglue/modeling_lightglue.py:LightGlueTokenConfidenceLayer
[ -0.00010110315633937716, 0.02678266540169716, 0.02093099057674408, -0.02667013369500637, -0.0005767275579273701, 0.03556017950177193, 0.04388756304979324, -0.026445068418979645, 0.006751932669430971, 0.008102319203317165, 0.041411854326725006, 0.01451665535569191, 0.0007279427372850478, -0...
[ "Linear", "ModelTokenConfidenceLayer", "Module", "__init__", "class", "config", "def", "descriptor_dim", "descriptors", "detach", "forward", "functional", "nn", "return", "self", "sigmoid", "squeeze", "super", "token" ]
lightglue/modeling_lightglue.py:LightGluePreTrainedModel
[ -0.0003129331744275987, 0.03314933553338051, 0.010474269278347492, 0.030617093667387962, -0.0014819364296272397, 0.02071833424270153, 0.02992648258805275, -0.02267506532371044, -0.0006726264837197959, 0.024631798267364502, 0.013236713595688343, 0.0035537697840481997, -0.0027768323197960854, ...
[ "False", "Model", "ModelConfig", "ModelPreTrainedModel", "PreTrainedModel", "True", "_supports_flash_attn", "_supports_sdpa", "base_model_prefix", "class", "config", "image", "input_modalities", "main_input_name", "pixel_values", "supports_gradient_checkpointing" ]
lightglue/modeling_lightglue.py:get_matches_from_scores
[ 0.00006572658458026126, 0.006407253909856081, 0.014263104647397995, -0.020280351862311363, 0.0004666152526624501, 0.02239753119647503, 0.0212832260876894, -0.00930444709956646, 0.01983463019132614, -0.011477341875433922, 0.011143051087856293, 0.03209198638796806, -0.0007347448845393956, -0...
[ "Model_matches_from_scores", "None", "_", "arange", "batch_size", "def", "device", "exp", "gather", "indices", "indices0", "indices1", "matches", "matches0", "matches1", "matching_scores", "matching_scores0", "matching_scores1", "max", "max0", "max1", "mutual0", "mutual1"...
lightglue/modeling_lightglue.py:normalize_keypoints
[ 0.00004223346331855282, 0.0050961715169250965, 0.014415688812732697, -0.03896740823984146, 0.00036250389530323446, -0.0008904222049750388, 0.04685099050402641, -0.0450490266084671, 0.023425495252013206, 0.05248211696743965, 0.009854475036263466, -0.008221447467803955, 0.0026607082691043615, ...
[ "Model_keypoints", "None", "def", "device", "dtype", "height", "keypoints", "max", "return", "scale", "shift", "size", "tensor", "torch", "values", "width" ]
lightglue/modeling_lightglue.py:LightGlueForKeypointMatching
[ -0.00008059633546508849, 0.01369873434305191, 0.015671802684664726, -0.03630446270108223, 0, 0.043745752424001694, 0.04464772716164589, -0.04825562238693237, 0.014544335193932056, 0.012289399281144142, 0.019505193457007408, 0.019617941230535507, -0.0006165839731693268, -0.02006892673671245...
[ "AutoModelForKeypointDetection", "False", "Identity", "Input", "Linear", "ModelForKeypointMatching", "ModelKeypointMatchingOutput", "ModelMatchAssignmentLayer", "ModelPositionalEncoder", "ModelPreTrainedModel", "ModelTokenConfidenceLayer", "ModelTransformerLayer", "ModuleList", "None", "...
big_bird/modeling_big_bird.py:BigBirdEmbeddings
[ -0.00030190128018148243, 0.02104683220386505, 0.011270980350673199, -0.00048160439473576844, -0.0014879419468343258, 0.03565310314297676, 0.03105270117521286, -0.01598639041185379, 0.0046579050831496716, -0.005779252853244543, 0.016101401299238205, 0.024727150797843933, -0.000736782851163297...
[ "Dropout", "Embedding", "False", "LayerNorm", "ModelEmbeddings", "Module", "None", "__init__", "arange", "buffered_token_type_ids", "buffered_token_type_ids_expanded", "class", "config", "def", "device", "dropout", "dtype", "else", "embeddings", "eps", "expand", "forward", ...
big_bird/modeling_big_bird.py:BigBirdSelfAttention
[ 0.00003463905522949062, 0.04127058386802673, 0.03145487606525421, -0.0030534653924405575, -0.00009847077308222651, 0.011990777216851711, 0.021416086703538895, -0.018850617110729218, 0.0006936525460332632, 0.019966037943959236, 0.01115421112626791, 0.01806982234120369, -0.00009759935346664861...
[ "Dropout", "False", "Linear", "ModelSelfAttention", "Module", "None", "The", "ValueError", "_", "__init__", "a", "all_head_size", "and", "attention", "attention_head_size", "attention_mask", "attention_probs", "attention_probs_dropout_prob", "attention_scores", "batch_size", ...
big_bird/modeling_big_bird.py:BigBirdBlockSparseAttention
[ 0, -0.0076961396262049675, 0.029652772471308708, -0.02625741809606552, 0, 0.036896198987960815, 0.017316313460469246, -0.041423339396715164, 0.012675994075834751, 0.008262031711637974, 0.013524833135306835, 0.00563063146546483, -0.001011533080600202, -0.026823310181498528, -0.01165738794...
[ "Key", "Linear", "Make", "ModelBlockSparseAttention", "Model_block_sparse_attention", "Module", "None", "Query", "The", "Value", "ValueError", "_", "_Model_block_rand_mask", "_Model_block_rand_mask_with_head", "__init__", "_create_rand_mask_from_inputs", "_get_rand_attn_plan", "_ge...
big_bird/modeling_big_bird.py:BigBirdSelfOutput
[ -0.00012168083776487038, 0.04875698313117027, 0.03859927877783775, 0.020879726856946945, -0.0006630723946727812, 0.05620596557855606, 0.02370131015777588, -0.01952536590397358, 0.0035551965702325106, 0.017155233770608902, 0.01365646906197071, 0.003512872848659754, 0.0036962758749723434, -0...
[ "Dropout", "LayerNorm", "Linear", "ModelSelfOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "layer_norm_eps", "nn", "return", "self", "super" ]
big_bird/modeling_big_bird.py:BigBirdAttention
[ -0.00002412058529444039, 0.03660013899207115, 0.030088456347584724, -0.018973691388964653, 0.00010306067997589707, 0.03323202580213547, 0.018300069496035576, -0.042662736028432846, -0.0021752382162958384, 0.004631152376532555, 0.023801317438483238, 0.002862894209101796, 0.0012560246977955103...
[ "False", "ModelAttention", "ModelBlockSparseAttention", "ModelSelfAttention", "ModelSelfOutput", "Module", "None", "ValueError", "__init__", "attention_mask", "attention_output", "attention_type", "attn_weights", "band_mask", "be", "block_sparse", "but", "cache_position", "can", ...
big_bird/modeling_big_bird.py:BigBirdIntermediate
[ -0.00025367451598867774, 0.02240910567343235, 0.04047359153628349, 0.012862369418144226, -0.0009432404185645282, 0.03635763004422188, 0.03452831506729126, -0.018864808604121208, -0.001329111517407, -0.003772961674258113, 0.023209432139992714, -0.02103712037205696, -0.0013719861162826419, 0...
[ "ACT2FN", "Linear", "ModelIntermediate", "Module", "__init__", "class", "config", "def", "dense", "else", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "intermediate_act_fn", "intermediate_size", "isinstance", "nn", "return", "self", "str", "super" ]
big_bird/modeling_big_bird.py:BigBirdOutput
[ -0.0002591986849438399, 0.03935529664158821, 0.0507957898080349, 0.030660521239042282, -0.0012513039400801063, 0.04988054931163788, 0.03798243775963783, -0.023681821301579475, 0.002259497530758381, 0.014758236706256866, 0.011040075682103634, 0.00267421524040401, 0.0013013561256229877, 0.00...
[ "Dropout", "LayerNorm", "Linear", "ModelOutput", "Module", "__init__", "class", "config", "def", "dense", "dropout", "eps", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "input_tensor", "intermediate_size", "layer_norm_eps", "nn", "return", "self", "su...
big_bird/modeling_big_bird.py:BigBirdLayer
[ -0.00004802670446224511, 0.023354575037956238, 0.02268088608980179, -0.009487796574831009, 0.00010263240983476862, 0.03660380467772484, 0.011115879751741886, -0.013361511752009392, -0.0012070273514837027, 0.004940391052514315, 0.013136948458850384, -0.0007122865063138306, 0.00297546270303428...
[ "False", "GradientCheckpointingLayer", "If", "ModelAttention", "ModelIntermediate", "ModelLayer", "ModelOutput", "None", "True", "ValueError", "__init__", "add_cross_attention", "and", "apply_chunking_to_forward", "are", "attention", "attention_mask", "attention_output", "attenti...
big_bird/modeling_big_bird.py:BigBirdEncoder
[ -0.0000726232465240173, 0.027267608791589737, 0.02118310146033764, -0.0008028169977478683, -0.00020686621428467333, 0.034929580986499786, 0.0043098595924675465, -0.028507044538855553, -0.0007288733031600714, 0.009464790113270283, 0.0134647898375988, 0.0031126763205975294, 0.00003411091893212...
[ "BaseModelOutputWithPastAndCrossAttentions", "DynamicCache", "False", "ModelEncoder", "ModelLayer", "Module", "ModuleList", "None", "Setting", "True", "ValueError", "__init__", "add_cross_attention", "all_cross_attentions", "all_hidden_states", "all_self_attentions", "and", "attent...
big_bird/modeling_big_bird.py:BigBirdPredictionHeadTransform
[ -0.0002919524849858135, 0.041955187916755676, 0.0442478209733963, 0.03209686279296875, -0.0013899088371545076, 0.026136018335819244, 0.029574967920780182, -0.011348534375429153, -0.0021780014503747225, 0.020519066601991653, 0.01662158966064453, 0.00664863595739007, 0.0001226916938321665, 0...
[ "ACT2FN", "LayerNorm", "Linear", "ModelPredictionHeadTransform", "Module", "__init__", "class", "config", "def", "dense", "else", "eps", "forward", "hidden_act", "hidden_size", "hidden_states", "if", "isinstance", "layer_norm_eps", "nn", "return", "self", "str", "super"...
big_bird/modeling_big_bird.py:BigBirdLMPredictionHead
[ -0.00033743318635970354, 0.03515048697590828, 0.03377203643321991, 0.018723953515291214, -0.0015794745413586497, 0.024926980957388878, 0.056746214628219604, -0.029292073100805283, 0.00021807517623528838, 0.004077916033565998, 0.019528048112988472, 0.008155832067131996, -0.0002458954695612192...
[ "Linear", "ModelLMPredictionHead", "ModelPredictionHeadTransform", "Module", "Parameter", "__init__", "bias", "class", "config", "decoder", "def", "forward", "hidden_size", "hidden_states", "nn", "return", "self", "super", "torch", "transform", "vocab_size", "zeros" ]
big_bird/modeling_big_bird.py:BigBirdOnlyMLMHead
[ -0.00036667720996774733, 0.01974974200129509, 0.018704168498516083, 0.03159958869218826, -0.0016627541044726968, 0.03694363683462143, 0.04042888432741165, -0.006534841377288103, -0.003775686025619507, -0.006215360015630722, 0.01579979434609413, 0.011210883036255836, -0.002410630462691188, ...
[ "ModelLMPredictionHead", "ModelOnlyMLMHead", "Module", "__init__", "class", "config", "def", "forward", "nn", "prediction_scores", "predictions", "return", "self", "sequence_output", "super" ]
big_bird/modeling_big_bird.py:BigBirdOnlyNSPHead
[ -0.00026802337379194796, 0.04002482444047928, 0.03430699184536934, 0.029275299981236458, -0.0010506516555324197, 0.03316342458128929, 0.02790302038192749, -0.011721555143594742, 0.0015724038239568472, 0.026302026584744453, 0.023786181584000587, -0.0020012410823255777, 0.0024443732108920813, ...
[ "Linear", "ModelOnlyNSPHead", "Module", "__init__", "class", "config", "def", "forward", "hidden_size", "nn", "pooled_output", "return", "self", "seq_relationship", "seq_relationship_score", "super" ]
big_bird/modeling_big_bird.py:BigBirdPreTrainingHeads
[ -0.0003449733485467732, 0.013582197017967701, 0.02519931085407734, 0.022887447848916054, -0.0015316094504669309, 0.047855570912361145, 0.02820473350584507, -0.008033725433051586, 0.0029187274631112814, 0.015027112327516079, 0.019419651478528976, 0.011096944101154804, -0.00037748392787761986,...
[ "Linear", "ModelLMPredictionHead", "ModelPreTrainingHeads", "Module", "__init__", "class", "config", "def", "forward", "hidden_size", "nn", "pooled_output", "prediction_scores", "predictions", "return", "self", "seq_relationship", "seq_relationship_score", "sequence_output", "s...
big_bird/modeling_big_bird.py:BigBirdPreTrainedModel
[ -0.0003267176798544824, 0.031709566712379456, 0.007869946770370007, -0.01516544446349144, -0.001486385939642787, 0.021943937987089157, 0.025850189849734306, 0.004394532181322575, -0.0015725531848147511, -0.003963695839047432, 0.0008149990462698042, 0.008674174547195435, -0.002613741438835859...
[ "ModelConfig", "ModelEmbeddings", "ModelLMPredictionHead", "ModelPreTrainedModel", "PreTrainedModel", "True", "_init_weights", "arange", "base_model_prefix", "bert", "bias", "class", "config", "copy_", "def", "elif", "expand", "if", "init", "isinstance", "module", "no_grad"...
big_bird/modeling_big_bird.py:BigBirdForPreTrainingOutput
[ -0.0002650046953931451, 0.018440058454871178, 0.029822809621691704, 0.016277335584163666, -0.001131160999648273, 0.047352246940135956, 0.03938432037830353, -0.004866126459091902, 0.013147078454494476, -0.01639116369187832, 0.01081361435353756, 0.017870919778943062, -0.0014370724093168974, ...
[ "ModelForPreTrainingOutput", "ModelOutput", "None", "attentions", "class", "hidden_states", "loss", "prediction_logits", "r", "seq_relationship_logits" ]
big_bird/modeling_big_bird.py:BigBirdForQuestionAnsweringModelOutput
[ -0.000180736999027431, 0.01973222754895687, 0.029938552528619766, 0.025515811517834663, -0.0007690181955695152, 0.05760903283953667, 0.06532047688961029, 0.010433131828904152, 0.018144577741622925, -0.00039159684092737734, 0.00918569229543209, 0.01780436746776104, -0.0002675616415217519, -...
[ "ModelForQuestionAnsweringModelOutput", "ModelOutput", "None", "attentions", "class", "end_logits", "hidden_states", "loss", "pooler_output", "r", "start_logits" ]
big_bird/modeling_big_bird.py:BigBirdModel
[ 0.000046611097786808386, 0.021960174664855003, 0.012212545610964298, -0.004229574464261532, 0.00019957427866756916, 0.05019468441605568, 0.00868323165923357, -0.014453379437327385, 0.00868323165923357, -0.0026469852309674025, 0.020167507231235504, 0.0034592875745147467, 0.0024649174883961678...
[ "Attention", "BaseModelOutputWithPoolingAndCrossAttentions", "Changing", "False", "Input", "Linear", "ModelEmbeddings", "ModelEncoder", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "Sequence", "Setting", "Tanh", "True", "ValueError", "When", "_", "__init__"...
big_bird/modeling_big_bird.py:BigBirdForPreTraining
[ -0.0001630043116165325, 0.04797731339931488, 0.021980304270982742, -0.004407218191772699, -0.0006973446579650044, 0.02744748629629612, 0.01428161934018135, 0.00213387468829751, 0.000435840425780043, 0.004853519145399332, 0.01774044893682003, 0.02778221108019352, 0.0002754511369857937, -0.0...
[ "CrossEntropyLoss", "ModelForPreTraining", "ModelForPreTrainingOutput", "ModelModel", "ModelPreTrainedModel", "ModelPreTrainingHeads", "None", "True", "__init__", "_tied_weights_keys", "add_pooling_layer", "and", "attention_mask", "attentions", "auto_docstring", "bert", "bias", "cl...
big_bird/modeling_big_bird.py:BigBirdForMaskedLM
[ -0.00010342270252294838, 0.040938008576631546, 0.02135896123945713, -0.02124771662056446, -0.00044150196481496096, 0.03515329211950302, 0.011958793736994267, -0.013516217470169067, 0.0035320157185196877, -0.004839139524847269, 0.02191518433392048, 0.038268137723207474, -0.0011541625717654824...
[ "CrossEntropyLoss", "False", "If", "MaskedLMOutput", "ModelForMaskedLM", "ModelModel", "ModelOnlyMLMHead", "ModelPreTrainedModel", "None", "__init__", "_tied_weights_keys", "attention", "attention_mask", "attentions", "auto_docstring", "bert", "bi", "bias", "class", "cls", "c...
big_bird/modeling_big_bird.py:BigBirdForCausalLM
[ -0.0002853326150216162, 0.05295773595571518, 0.025565803050994873, -0.015179695561528206, -0.0013125300174579024, 0.020429816097021103, 0.015978626906871796, -0.009358909912407398, -0.0028675927314907312, 0.004650921560823917, 0.022370077669620514, 0.019174352288246155, -0.000185466196853667...
[ "CausalLMOutputWithCrossAttentions", "GenerationMixin", "ModelForCausalLM", "ModelModel", "ModelOnlyMLMHead", "ModelPreTrainedModel", "None", "True", "__init__", "_tied_weights_keys", "add", "attention_mask", "attentions", "auto_docstring", "bert", "bias", "cache_position", "class"...
big_bird/modeling_big_bird.py:BigBirdClassificationHead
[ -0.0003958397137466818, 0.026143494993448257, 0.03400968015193939, 0.01665780320763588, -0.0013592303730547428, 0.017814593389630318, 0.050436124205589294, 0.002863059751689434, -0.007750505115836859, 0.00977489072829485, 0.021747685968875885, -0.0030365786515176296, -0.00003095321881119162,...
[ "ACT2FN", "Dropout", "Linear", "ModelClassificationHead", "Module", "None", "__init__", "class", "classifier_dropout", "config", "def", "dense", "dropout", "else", "features", "forward", "hidden_act", "hidden_dropout_prob", "hidden_size", "if", "is", "kwargs", "nn", "no...
big_bird/modeling_big_bird.py:BigBirdForSequenceClassification
[ -0.00033876142697408795, 0.02921193279325962, 0.002481873147189617, 0.0027956732083112, -0.001133959274739027, 0.024191131815314293, 0.019284440204501152, 0.017344584688544273, -0.004906691610813141, 0.010212766006588936, 0.054544154554605484, 0.0008379888022318482, -0.0005669796373695135, ...
[ "BCEWithLogitsLoss", "CrossEntropyLoss", "MSELoss", "ModelClassificationHead", "ModelForSequenceClassification", "ModelModel", "ModelPreTrainedModel", "None", "SequenceClassifierOutput", "__init__", "and", "attention_mask", "attentions", "auto_docstring", "bert", "class", "classifier...
big_bird/modeling_big_bird.py:BigBirdForMultipleChoice
[ -0.00022560656361747533, 0.053889770060777664, 0.0217150766402483, 0.020009703934192657, -0.0009663777309469879, 0.037972960621118546, 0.043884918093681335, 0.005144540220499039, -0.0015845752786844969, 0.008242633193731308, 0.03888249397277832, -0.01074384618550539, -0.002032235497608781, ...
[ "CrossEntropyLoss", "Dropout", "Linear", "ModelForMultipleChoice", "ModelModel", "ModelPreTrainedModel", "MultipleChoiceModelOutput", "None", "__init__", "attention_mask", "attentions", "auto_docstring", "bert", "class", "classifier", "config", "def", "dropout", "else", "forwar...
big_bird/modeling_big_bird.py:BigBirdForTokenClassification
[ -0.000248906813794747, 0.03754936903715134, 0.003982509020715952, -0.03777694329619408, -0.001024073688313365, 0.03504607826471329, 0.03868722915649414, 0.0217331200838089, -0.0027166400104761124, 0.007965018041431904, 0.053706977516412735, 0.01934361457824707, -0.0008782854420132935, -0.0...
[ "CrossEntropyLoss", "Dropout", "Linear", "ModelForTokenClassification", "ModelModel", "ModelPreTrainedModel", "None", "TokenClassifierOutput", "__init__", "attention_mask", "attentions", "auto_docstring", "bert", "class", "classifier", "classifier_dropout", "config", "def", "drop...
big_bird/modeling_big_bird.py:BigBirdForQuestionAnsweringHead
[ -0.000358367309672758, 0.025599732995033264, 0.036604143679142, 0.029885662719607353, -0.0015348256565630436, 0.04031089320778847, 0.0544428713619709, 0.0048361485823988914, -0.002128484658896923, 0.032202381640672684, 0.013784471899271011, 0.004199051298201084, 0.0003855163522530347, -0.0...
[ "Dropout", "Linear", "ModelForQuestionAnsweringHead", "ModelIntermediate", "ModelOutput", "Module", "__init__", "class", "config", "def", "dropout", "encoder_output", "forward", "hidden_dropout_prob", "hidden_size", "hidden_states", "intermediate", "nn", "num_labels", "output",...
big_bird/modeling_big_bird.py:BigBirdForQuestionAnswering
[ -0.00021445540187414736, 0.020785406231880188, 0.008133419789373875, 0.005337556358426809, -0.0006742548430338502, 0.042700450867414474, 0.03162996470928192, 0.010336220264434814, 0.0070320190861821175, 0.024965079501271248, 0.03321146219968796, 0.01637980341911316, 0.0031488758977502584, ...
[ "CrossEntropyLoss", "False", "ModelForQuestionAnswering", "ModelForQuestionAnsweringHead", "ModelForQuestionAnsweringModelOutput", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "add_pooling_layer", "and", "arange", "argmax", "attention_mask", "attentions", "auto_docstring",...
helium/modeling_helium.py:HeliumRMSNorm
[ -0.00008691623224876821, 0.04179038107395172, 0.03252872824668884, 0.05353686586022377, -0.0003706072748173028, 0.04043501615524292, 0.02371886558830738, -0.03049568459391594, 0.008922811597585678, 0.04179038107395172, 0.02089519053697586, 0.006889765616506338, 0.0026824907399713993, 0.012...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", "to", "torch", "tuple", ...
helium/modeling_helium.py:HeliumRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
helium/modeling_helium.py:HeliumMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
helium/modeling_helium.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
helium/modeling_helium.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
helium/modeling_helium.py:rotate_half
[ 0.000020891084204777144, 0.007994027808308601, 0.02882353775203228, -0.0005594412214122713, 0.00044509003055281937, 0.031300559639930725, 0.017676936462521553, -0.02251838892698288, 0.01655101589858532, 0.02612133137881756, -0.004925897810608149, -0.008894763886928558, 0.00016888792742975056...
[ "Model_half", "def", "dim", "flatten", "return", "stack", "torch", "x", "x1", "x2" ]
helium/modeling_helium.py:apply_rotary_pos_emb
[ -0.0002023695851676166, 0.026812195777893066, 0.023403866216540337, -0.0024284350220113993, -0.0006710149464197457, 0.02101803384721279, 0.040899958461523056, -0.003663954557850957, 0.011304293759167194, 0.03431051969528198, 0.006561035290360451, 0.004913675598800182, -0.0002059199323412031,...
[ "Model_rotary_pos_emb", "cos", "def", "dim", "k", "k_embed", "q", "q_embed", "repeat_interleave", "return", "rotate_half", "shape", "sin", "unsqueeze", "unsqueeze_dim" ]
helium/modeling_helium.py:HeliumAttention
[ -0.0001249098713742569, 0.03242730721831322, 0.030850980430841446, -0.005432699806988239, -0.0004820465692318976, 0.031301360577344894, 0.043011222034692764, -0.005939376540482044, 0.0025052346754819155, 0.010415020398795605, 0.016438843682408333, 0.026121998205780983, -0.0011048367014154792...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...
helium/modeling_helium.py:HeliumDecoderLayer
[ -0.0001677126856520772, 0.04406430199742317, 0.013840709812939167, 0.004519415553659201, -0.0006673199241049588, 0.03864100202918053, 0.04157862067222595, -0.03434755653142929, 0.006496659480035305, -0.0030082359444350004, -0.0024433089420199394, 0.021015282720327377, -0.0012216544710099697,...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_layernorm", "kwa...
helium/modeling_helium.py:HeliumPreTrainedModel
[ -0.00034975787275470793, 0.027630871161818504, 0.016904963180422783, 0.011250545270740986, -0.0018799485405907035, 0.029612833634018898, 0.03077869303524494, -0.03241089731454849, -0.00408050836995244, 0.003993069287389517, 0.006383080966770649, 0.004459412768483162, -0.004255387466400862, ...
[ "ModelAttention", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_no_split_modules", "_skip_keys_device_placement", "_supports_attention_backend", "_supports_flash_attn", "_supports_flex_attn", "_supp...
helium/modeling_helium.py:HeliumModel
[ -0.0001474594319006428, 0.04882584884762764, -0.0081941531971097, -0.004266610834747553, -0.0008264792268164456, 0.04159238934516907, 0.03955797851085663, -0.01864876225590706, 0.01231948472559452, 0.008533221669495106, 0.01785760186612606, -0.0026984193827956915, -0.0010242691496387124, 0...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_p...
helium/modeling_helium.py:HeliumForCausalLM
[ -0.00028262686100788414, 0.03526614233851433, 0.008361488580703735, -0.001301150070503354, -0.0012087186332792044, 0.02741658128798008, 0.0389065183699131, -0.007394513580948114, 0.003640376031398773, 0.026734011247754097, 0.025823917239904404, 0.0026734010316431522, 0.0009385344455949962, ...
[ "CausalLMOutputWithPast", "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "cache_position", "can_return_tuple", "class", "colwi...
helium/modeling_helium.py:HeliumForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
helium/modeling_helium.py:HeliumForTokenClassification
[ -0.00016310509818140417, 0.024394849315285683, -0.0069496952928602695, -0.024848707020282745, -0.0007446102099493146, 0.035854753106832504, 0.0356278270483017, -0.007885776460170746, 0.0015317696379497647, -0.018381234258413315, 0.040166404098272324, 0.024394849315285683, -0.0042832815088331...
[ "GenericForTokenClassification", "ModelForTokenClassification", "ModelPreTrainedModel", "class", "pass" ]
doge/modeling_doge.py:DogeRMSNorm
[ -0.00009835156379267573, 0.04154934734106064, 0.0313878208398819, 0.048549506813287735, -0.00046573654981330037, 0.03861379250884056, 0.02461347170174122, -0.03003295138478279, 0.008129219524562359, 0.04358164966106415, 0.01964561454951763, 0.004770271480083466, 0.0023992490023374557, 0.01...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", ...
doge/modeling_doge.py:DogeRotaryEmbedding
[ -0.00029968636226840317, 0.04852752387523651, 0.0020364229567348957, -0.005228263325989246, -0.0013792794197797775, 0.04136393964290619, 0.040670689195394516, 0.0062681385315954685, -0.0026430170983076096, 0.021144136786460876, 0.0021808501332998276, -0.004043960478156805, -0.001364836702123...
[ "False", "ModelRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", "def", "default", "device", ...
doge/modeling_doge.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
doge/modeling_doge.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
doge/modeling_doge.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
doge/modeling_doge.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
doge/modeling_doge.py:flex_attention_forward
[ -0.000058538815210340545, 0.027154969051480293, 0.012394383549690247, -0.023662006482481956, -0.00009947197395376861, 0.018028194084763527, 0.04033808410167694, -0.020619748160243034, 0.011211283504962921, 0.019605662673711777, 0.014422555454075336, 0.027831025421619415, 0.002957750810310244...
[ "BlockMask", "Model_attention_forward", "None", "True", "attention_mask", "attention_weights", "attn_output", "batch_idx", "block_mask", "causal_mask", "compile_friendly_Model_attention", "contiguous", "def", "dtype", "else", "enable_gqa", "head_idx", "if", "is", "isinstance", ...
doge/modeling_doge.py:DogeAttention
[ -0.0001357837172690779, 0.03272916376590729, 0.03430919349193573, -0.027763361111283302, -0.0007406384684145451, 0.035663504153490067, 0.039274998009204865, -0.030020544305443764, 0.0034845275804400444, 0.015235991217195988, 0.01365596242249012, 0.021556105464696884, 0.0005572422523982823, ...
[ "A", "ALL_ATTENTION_FUNCTIONS", "BlockMask", "F", "False", "Linear", "ModelAttention", "ModelRMSNorm", "Module", "None", "Parameter", "Tensor", "True", "__init__", "_attn_implementation", "active_mask", "and", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", ...
doge/modeling_doge.py:DogeMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
doge/modeling_doge.py:DogeCDMoE
[ -0.0004494891327340156, 0.04113011062145233, -0.0010104217799380422, 0.007845628075301647, -0.0016716538229957223, 0.06276502460241318, 0.052304189652204514, -0.013194920495152473, -0.007845628075301647, -0.008083374239504337, 0.021397167816758156, -0.021516041830182076, -0.00093612610362470...
[ "ACT2FN", "Embedding", "F", "Linear", "ModelCDMoE", "Module", "True", "_", "__init__", "act_fn", "all_indices", "all_scores", "bsz", "class", "config", "def", "dim", "down_embed", "down_proj", "experts_states", "experts_weights", "floor", "forward", "gate_proj", "gath...
doge/modeling_doge.py:DogeDecoderLayer
[ -0.0002462891861796379, 0.048429735004901886, 0.008966353721916676, -0.00742436945438385, -0.0010565449483692646, 0.03540853038430214, 0.04568842798471451, -0.025699740275740623, 0.0031553569715470076, -0.005882385186851025, -0.0037692952901124954, 0.00976590160280466, -0.0024985859636217356...
[ "F", "False", "GradientCheckpointingLayer", "ModelAttention", "ModelCDMoE", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Parameter", "Tensor", "__init__", "attention_mask", "cache_position", "class", "config", "def", "dropout", "else", "eps", "forward", "hidden...
doge/modeling_doge.py:DogePreTrainedModel
[ -0.00026412613806314766, 0.04271705076098442, -0.0008530560880899429, -0.002841140842065215, -0.0012206911342218518, 0.031752247363328934, 0.029125262051820755, -0.021015875041484833, -0.004597222898155451, -0.015190823003649712, -0.0003123113128822297, -0.008166494779288769, -0.003098128363...
[ "A", "False", "ModelAttention", "ModelCDMoE", "ModelConfig", "ModelDecoderLayer", "ModelPreTrainedModel", "OutputRecorder", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_supports_attent...
doge/modeling_doge.py:DogeModel
[ -0.000193175976164639, 0.052415672689676285, -0.006153144408017397, -0.009457610547542572, -0.0010682542342692614, 0.04671831801533699, 0.041248854249715805, -0.014813125133514404, 0.010027346201241016, -0.0017733019776642323, 0.021877845749258995, -0.007178668398410082, -0.00193710101302713...
[ "DynamicCache", "Embedding", "False", "ModelDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModelRotaryEmbedding", "ModuleList", "MoeModelOutputWithPast", "None", "ValueError", "You", "__init__", "and", "arange", "attention_mask", "auto_docstring", "cache_po...
doge/modeling_doge.py:load_balancing_loss_func
[ -0.0002816331689245999, 0.017335686832666397, 0.002654885873198509, -0.0335233174264431, -0.0010619544191285968, 0.05740293860435486, 0.042937397956848145, -0.011882408522069454, 0.0034011241514235735, -0.020894668996334076, 0.029849527403712273, -0.007634590845555067, -0.0006350200274027884...
[ "F", "Model_balancing_loss_func", "None", "_", "all_expert_indices", "all_indices", "all_routing_weights", "all_scores", "append", "attention_mask", "batch_size", "bool", "cat", "compute_device", "compute_dtype", "def", "device", "dim", "dtype", "else", "expand", "expert_at...
doge/modeling_doge.py:DogeForCausalLM
[ -0.00042122812010347843, 0.03781206160783768, -0.0006199894123710692, -0.010094886645674706, -0.0016484424704685807, 0.04924904182553291, 0.04037954658269882, -0.005339203402400017, -0.0056309630163013935, 0.00653541786596179, 0.029759492725133896, -0.007148113567382097, 0.000065645937866065...
[ "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "MoeCausalLMOutputWithPast", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "aux_loss", "cache_position", "can_return_tuple", ...
doge/modeling_doge.py:DogeForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
pop2piano/modeling_pop2piano.py:Pop2PianoLayerNorm
[ -0.00005301048804540187, 0.037010956555604935, 0.041497133672237396, 0.04912363365292549, -0.00022606125276070088, 0.025571206584572792, 0.01772039756178856, -0.016598854213953018, 0.005327334627509117, 0.05360981076955795, 0.017383934929966927, 0.005691836588084698, 0.0024673971347510815, ...
[ "ModelLayerNorm", "Module", "Parameter", "True", "__init__", "bfloat16", "class", "def", "dtype", "eps", "float16", "float32", "forward", "hidden_size", "hidden_states", "if", "in", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "super", "to", ...
pop2piano/modeling_pop2piano.py:Pop2PianoDenseActDense
[ -0.00028989915153943, 0.03825948387384415, 0.034110866487026215, -0.010083448141813278, -0.0009867374319583178, 0.030192725360393524, 0.03641565516591072, -0.030192725360393524, -0.0030250344425439835, -0.006741505581885576, 0.0178621094673872, -0.026850782334804535, -0.00006527232471853495,...
[ "ACT2FN", "Dropout", "Linear", "ModelDenseActDense", "Module", "Tensor", "__init__", "act", "and", "class", "config", "d_ff", "d_model", "def", "dense_act_fn", "dropout", "dropout_rate", "dtype", "forward", "hidden_states", "if", "int8", "isinstance", "nn", "return", ...
pop2piano/modeling_pop2piano.py:Pop2PianoDenseGatedActDense
[ -0.0003425553732085973, 0.0275210440158844, 0.017608804628252983, -0.003964896313846111, -0.0013629330787807703, 0.028104117140173912, 0.03358500450849533, -0.03498438000679016, -0.002536367392167449, -0.001618027570657432, 0.024955524131655693, -0.027171200141310692, -0.00011251486284891143...
[ "ACT2FN", "Dropout", "Linear", "ModelDenseGatedActDense", "Module", "Tensor", "__init__", "act", "and", "class", "config", "d_ff", "d_model", "def", "dense_act_fn", "dropout", "dropout_rate", "dtype", "forward", "hidden_gelu", "hidden_linear", "hidden_states", "if", "in...
pop2piano/modeling_pop2piano.py:Pop2PianoLayerFF
[ -0.00017759011825546622, 0.024330737069249153, 0.019533127546310425, 0.026957999914884567, -0.0005711441044695675, 0.04249311983585358, 0.021703476086258888, -0.032669439911842346, 0.004940396174788475, -0.007539101876318455, 0.030384864658117294, -0.040437001734972, 0.003298357129096985, ...
[ "DenseReluDense", "Dropout", "ModelDenseActDense", "ModelDenseGatedActDense", "ModelLayerFF", "ModelLayerNorm", "Module", "__init__", "class", "config", "d_model", "def", "dropout", "dropout_rate", "else", "eps", "forward", "forwarded_states", "hidden_states", "if", "is_gated...
pop2piano/modeling_pop2piano.py:Pop2PianoAttention
[ -0.00012667596456594765, 0.029276223853230476, 0.03625747561454773, -0.025898197665810585, -0.0007917248294688761, 0.024772189557552338, 0.028713218867778778, -0.026461202651262283, 0.006953103002160788, 0.012667597271502018, 0.012498695403337479, 0.034230660647153854, 0.002125341212376952, ...
[ "Embedding", "EncoderDecoderCache", "False", "Instantiating", "Linear", "ModelAttention", "Module", "None", "Please", "True", "__class__", "__init__", "__name__", "_relative_position_bucket", "a", "abs", "and", "arange", "attn_output", "attn_weights", "batch_size", "bidirec...
pop2piano/modeling_pop2piano.py:Pop2PianoLayerSelfAttention
[ 0.00009500727901468053, 0.04483292996883392, 0.028693074360489845, -0.00026269294903613627, 0.0002714493602979928, 0.028917238116264343, 0.018717747181653976, -0.01591568998992443, 0.009751161560416222, 0.010984067805111408, 0.02062314748764038, 0.021631887182593346, 0.0028440889436751604, ...
[ "Dropout", "False", "ModelAttention", "ModelLayerNorm", "ModelLayerSelfAttention", "Module", "None", "SelfAttention", "__init__", "attention_mask", "attention_output", "cache_position", "class", "config", "d_model", "def", "dropout", "dropout_rate", "eps", "forward", "has_rel...
pop2piano/modeling_pop2piano.py:Pop2PianoLayerCrossAttention
[ -0.00008750730921747163, 0.036248255521059036, 0.045479174703359604, 0.008161486126482487, -0.00046787827159278095, 0.038274552673101425, 0.0269047599285841, -0.023640165105462074, 0.005713040009140968, 0.006078899838030338, 0.01530982181429863, 0.026792187243700027, 0.0004643604042939842, ...
[ "Dropout", "EncDecAttention", "False", "ModelAttention", "ModelLayerCrossAttention", "ModelLayerNorm", "Module", "None", "__init__", "attention_mask", "attention_output", "cache_position", "class", "config", "d_model", "def", "dropout", "dropout_rate", "eps", "forward", "has_...
pop2piano/modeling_pop2piano.py:Pop2PianoBlock
[ -0.00016884862270671874, 0.015615403652191162, 0.02794930897653103, -0.01471016276627779, -0.0006188169354572892, 0.027609843760728836, 0.01216417271643877, -0.025912517681717873, 0.006025508977472782, 0.006959038320928812, 0.005148557014763355, 0.009278718382120132, 0.005459733307361603, ...
[ "False", "GradientCheckpointingLayer", "ModelBlock", "ModelLayerFF", "ModelLayerSelfAttention", "ModuleList", "None", "True", "__init__", "and", "any", "append", "attention_mask", "attention_outputs", "cache_position", "clamp", "clamp_value", "class", "config", "cross_attention...
pop2piano/modeling_pop2piano.py:Pop2PianoPreTrainedModel
[ -0.00025042417109943926, 0.05819786712527275, -0.007558902725577354, -0.007899905554950237, -0.0010727390181273222, 0.027734920382499695, 0.02227887138724327, -0.03273630142211914, -0.006820062641054392, -0.00035521158133633435, 0.022051535546779633, 0.0014847844140604138, -0.003140070475637...
[ "False", "In", "Model", "ModelAttention", "ModelBlock", "ModelConcatEmbeddingToMel", "ModelConfig", "ModelDenseActDense", "ModelDenseGatedActDense", "ModelForConditionalGeneration", "ModelLayerNorm", "ModelPreTrainedModel", "None", "PreTrainedModel", "True", "ValueError", "_can_compi...
pop2piano/modeling_pop2piano.py:Pop2PianoStack
[ -0.0001405516522936523, 0.01731596328318119, -0.0016163440886884928, -0.0009698064532130957, -0.0004989583976566792, 0.03260798379778862, 0.023500237613916397, -0.01529202051460743, 0.004244660027325153, -0.0119750015437603, 0.001384433824568987, 0.011131690815091133, -0.0008573650848120451,...
[ "BaseModelOutputWithPastAndCrossAttentions", "Dropout", "DynamicCache", "Embedding", "EncoderDecoderCache", "False", "ModelBlock", "ModelLayerNorm", "ModelPreTrainedModel", "ModelStack", "ModuleList", "None", "Setting", "True", "ValueError", "You", "_", "__init__", "all_attention...
pop2piano/modeling_pop2piano.py:Pop2PianoConcatEmbeddingToMel
[ -0.00025213530170731246, 0.007784233428537846, 0.03500064089894295, 0.03363697975873947, -0.0009304147679358721, 0.016023021191358566, 0.031136933714151382, -0.040000732988119125, 0.00043857336277142167, -0.0009304147679358721, 0.0377279631793499, 0.04954636096954346, -0.004602356813848019, ...
[ "Embedding", "ModelConcatEmbeddingToMel", "Module", "__init__", "cat", "class", "composer_embedding", "composer_vocab_size", "config", "d_model", "def", "dim", "embedding", "embedding_dim", "embedding_offset", "feature", "forward", "index_shifted", "index_value", "inputs_embeds...
pop2piano/modeling_pop2piano.py:Pop2PianoForConditionalGeneration
[ -0.0003717535000760108, 0.041116658598184586, 0.006179048679769039, 0.014552525244653225, -0.0013859547907486558, 0.012704585678875446, 0.03996169567108154, -0.02587115578353405, -0.005832559894770384, -0.0034648869186639786, 0.04619849473237991, 0.011665119789540768, -0.004157864488661289, ...
[ "BaseModelOutput", "Composer", "CrossEntropyLoss", "Embedding", "False", "Found", "GenerationMixin", "Linear", "Model", "ModelConcatEmbeddingToMel", "ModelForConditionalGeneration", "ModelPreTrainedModel", "ModelStack", "None", "Please", "Seq2SeqLMOutput", "True", "ValueError", "...
emu3/modeling_emu3.py:Emu3VQVAEModelOutput
[ -0.0001393948041368276, 0.02659333124756813, 0.008068917319178581, -0.0025712570641189814, -0.0008559018606320024, 0.03863988444209099, 0.0586417056620121, -0.026252392679452896, 0.0156832467764616, -0.00897809024900198, 0.03363942727446556, 0.03250296041369438, -0.004091281909495592, -0.0...
[ "BaseModelOutputWithPooling", "ModelVQVAEModelOutput", "None", "class", "image_tokens", "r" ]
emu3/modeling_emu3.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
emu3/modeling_emu3.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
emu3/modeling_emu3.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
emu3/modeling_emu3.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
emu3/modeling_emu3.py:Emu3Attention
[ -0.00009043229511007667, 0.03348980098962784, 0.028994524851441383, -0.007192440330982208, -0.0003863127203658223, 0.030792634934186935, 0.04225558787584305, -0.007361013442277908, 0.003174788085743785, 0.009102932177484035, 0.014047735370695591, 0.028320234268903732, -0.0009271505405195057,...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", "cache_position", "cla...
emu3/modeling_emu3.py:Emu3RMSNorm
[ -0.00009835156379267573, 0.04154934734106064, 0.0313878208398819, 0.048549506813287735, -0.00046573654981330037, 0.03861379250884056, 0.02461347170174122, -0.03003295138478279, 0.008129219524562359, 0.04358164966106415, 0.01964561454951763, 0.004770271480083466, 0.0023992490023374557, 0.01...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", ...
emu3/modeling_emu3.py:Emu3MLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
emu3/modeling_emu3.py:Emu3DecoderLayer
[ -0.00014351708523463458, 0.04260080307722092, 0.01634157821536064, -0.0025075869634747505, -0.0005670245736837387, 0.03674037382006645, 0.04305160418152809, -0.031781550496816635, 0.005775902420282364, 0.001091786427423358, -0.0013030999107286334, 0.019159091636538506, -0.0014298880705609918...
[ "Dropout", "False", "GradientCheckpointingLayer", "ModelAttention", "ModelDecoderLayer", "ModelMLP", "ModelRMSNorm", "None", "Tensor", "_", "__init__", "attention_dropout", "attention_mask", "cache_position", "class", "config", "def", "dropout", "eps", "forward", "hidden_size...
emu3/modeling_emu3.py:Emu3VQVAEVectorQuantizer
[ -0.00008859043737174943, 0.011058893986046314, 0.026496436446905136, 0.01083434745669365, -0.0002841911045834422, 0.014202538877725601, 0.04670558124780655, -0.04715467244386673, 0.011339575983583927, 0.013809583149850368, 0.04535830393433571, 0.013865719549357891, -0.0013893787981942296, ...
[ "Embedding", "ModelVQVAEVectorQuantizer", "Module", "True", "__init__", "argmin", "batch_size", "channels", "class", "codebook_size", "config", "contiguous", "data", "def", "dim", "distances", "embed_dim", "embedding", "embedding_sum", "forward", "height", "hidden_state", ...