identifier
stringlengths
24
117
embedding
listlengths
2.56k
2.56k
tokens
listlengths
4
448
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeVisionPatchEmbed
[ -0.00006855856190668419, 0.009169268421828747, 0.010969369672238827, 0.008606736548244953, -0.0000975641087279655, 0.014007041230797768, 0.02587646245956421, -0.029926691204309464, 0.01282572466880083, 0.0033751907758414745, 0.009956812486052513, -0.01063185092061758, -0.0027845322620123625,...
[ "Conv3d", "Model_5MoeVisionPatchEmbed", "Module", "__init__", "class", "config", "def", "dtype", "embed_dim", "forward", "hidden_size", "hidden_states", "in_channels", "kernel_size", "nn", "patch_size", "proj", "return", "self", "stride", "super", "target_dtype", "tempora...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeVisionPatchMerger
[ -0.0002408981235930696, 0.029247866943478584, 0.011563110165297985, 0.03378241881728172, -0.0006872681551612914, 0.02698059007525444, 0.011959883384406567, -0.044211890548467636, 0.005073030944913626, 0.024259857833385468, 0.013093521818518639, -0.0022672764025628567, -0.001466644462198019, ...
[ "False", "GELU", "LayerNorm", "Linear", "Model_5MoeVisionPatchMerger", "Module", "__init__", "act_fn", "class", "config", "def", "else", "eps", "forward", "hidden_size", "if", "linear_fc1", "linear_fc2", "nn", "norm", "out_hidden_size", "return", "self", "spatial_merge_...
qwen3_5_moe/modeling_qwen3_5_moe.py:apply_rotary_pos_emb_vision
[ -0.00006463831959990785, 0.03513491153717041, 0.018587501719594002, -0.0022809358779340982, -0.0004639792023226619, 0.01700076460838318, 0.060749396681785583, -0.009180412627756596, 0.016887426376342773, 0.04692211002111435, 0.009010405279695988, 0.01983422413468361, 0.0013458938337862492, ...
[ "Model_rotary_pos_emb_vision", "cos", "def", "dtype", "float", "k", "k_embed", "orig_q_dtype", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze" ]
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeVisionAttention
[ -0.00022759455896448344, 0.026264771819114685, 0.017777465283870697, 0.024659063667058945, -0.0010250716004520655, 0.03234351798892021, 0.04128959774971008, -0.028214557096362114, 0.004587732721120119, 0.034178610891103745, 0.025461917743086815, 0.016171758994460106, -0.00021953019313514233,...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "Model_5MoeVisionAttention", "Module", "None", "Tensor", "_", "__init__", "_attn_implementation", "apply_rotary_pos_emb_vision", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_outputs", "cat", "class"...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeVisionBlock
[ -0.00018217750766780227, 0.014389357529580593, 0.015014980919659138, 0.023659970611333847, -0.0006220695213414729, 0.047774940729141235, 0.030712461099028587, -0.011943735182285309, 0.0056306179612874985, 0.025366218760609627, 0.0209299735724926, 0.007223116233944893, 0.00023994110233616084,...
[ "GradientCheckpointingLayer", "LayerNorm", "Model_5MoeVisionAttention", "Model_5MoeVisionBlock", "Model_5MoeVisionMLP", "None", "Tensor", "__init__", "attn", "attn_implementation", "class", "config", "cu_seqlens", "def", "eps", "forward", "hidden_size", "hidden_states", "kwargs",...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeVisionModel
[ -0.0002151822845917195, 0.017982836812734604, -0.024584131315350533, 0.02333216182887554, -0.0007149031152948737, 0.03642093762755394, 0.050534047186374664, -0.043932754546403885, 0.004324986133724451, 0.024356501176953316, 0.014568374492228031, -0.003314874367788434, -0.0015507352072745562,...
[ "BaseModelOutputWithPooling", "Embedding", "F", "False", "Model_5MoePreTrainedModel", "Model_5MoeVisionAttention", "Model_5MoeVisionBlock", "Model_5MoeVisionConfig", "Model_5MoeVisionModel", "Model_5MoeVisionPatchEmbed", "Model_5MoeVisionPatchMerger", "Model_5MoeVisionRotaryEmbedding", "Modu...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeModelOutputWithPast
[ -0.0003511856484692544, 0.014076999388635159, 0.006920205894857645, 0.01259832363575697, -0.001855738926678896, 0.06198611482977867, 0.06813740730285645, -0.03832729533314705, 0.014313587918877602, 0.006890632212162018, 0.01573311723768711, 0.008990352973341942, -0.0028982057701796293, 0.0...
[ "ModelOutput", "Model_5MoeModelOutputWithPast", "None", "attentions", "class", "hidden_states", "last_hidden_state", "past_key_values", "r", "rope_deltas", "router_logits" ]
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeCausalLMOutputWithPast
[ -0.0003862447920255363, 0.016676904633641243, 0.014133972115814686, 0.005115433596074581, -0.0017815310275182128, 0.06812692433595657, 0.06339588761329651, -0.029450703412294388, 0.014074834063649178, 0.004464915953576565, 0.016913456842303276, 0.008279314264655113, -0.002306380309164524, ...
[ "ModelOutput", "Model_5MoeCausalLMOutputWithPast", "None", "attentions", "aux_loss", "class", "hidden_states", "logits", "loss", "past_key_values", "r", "rope_deltas", "router_logits" ]
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeTextModel
[ -0.00020588969346135855, 0.03240044042468071, 0.012264251708984375, 0.002082070568576455, -0.0009768619202077389, 0.04609077051281929, 0.04221184179186821, -0.044037219136953354, 0.006645513232797384, 0.0139184994623065, 0.022703126072883606, 0.0006809511687606573, -0.00032086705323308706, ...
[ "Embedding", "False", "Model_5MoeDecoderLayer", "Model_5MoeDynamicCache", "Model_5MoeModelOutputWithPast", "Model_5MoePreTrainedModel", "Model_5MoeRMSNorm", "Model_5MoeTextModel", "Model_5MoeTextRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "_update_linear_attn...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeModel
[ -0.0001619684771867469, 0.026056567206978798, -0.016766835004091263, 0.013424796983599663, -0.00029561459086835384, 0.049167610704898834, 0.059816814959049225, -0.057324450463056564, 0.009459666907787323, 0.03194761648774147, 0.023677490651607513, 0.00045669800601899624, 0.000065052910940721...
[ "False", "Image", "Model_5MoeConfig", "Model_5MoeModel", "Model_5MoeModelOutputWithPast", "Model_5MoePreTrainedModel", "Model_5MoeTextDecoderLayer", "Model_5MoeTextModel", "Model_5MoeVisionBlock", "Model_5MoeVisionModel", "None", "True", "ValueError", "Video", "You", "_", "__init__",...
qwen3_5_moe/modeling_qwen3_5_moe.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeForCausalLM
[ -0.00042154768016189337, 0.0280347540974617, 0.008093715645372868, 0.0022580293007194996, -0.0016495344461873174, 0.0518466979265213, 0.048796892166137695, -0.020879440009593964, -0.0025072924327105284, 0.006744762882590294, 0.024398446083068848, -0.010146469809114933, -0.0004013867001049220...
[ "GenerationMixin", "Linear", "Model_5MoeForCausalLM", "Model_5MoePreTrainedModel", "Model_5MoeTextConfig", "Model_5MoeTextModel", "MoeCausalLMOutputWithPast", "None", "__init__", "_keys_to_ignore_on_load_unexpected", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attent...
qwen3_5_moe/modeling_qwen3_5_moe.py:Qwen3_5MoeForConditionalGeneration
[ -0.0003022433375008404, 0.025982195511460304, -0.012590491212904453, 0.02495206519961357, -0.000958594202529639, 0.04761495068669319, 0.04807278513908386, -0.04761495068669319, 0, 0.009671786800026894, 0.029759343713521957, -0.01945803128182888, 0.0000947863663895987, 0.0012805102160200477...
[ "False", "GenerationMixin", "Linear", "Model_5MoeCausalLMOutputWithPast", "Model_5MoeConfig", "Model_5MoeForConditionalGeneration", "Model_5MoeModel", "Model_5MoePreTrainedModel", "None", "Tensor", "True", "__init__", "_checkpoint_conversion_mapping", "_expand_dict_for_generation", "_exp...
jamba/modeling_jamba.py:JambaRMSNorm
[ -0.00009835156379267573, 0.04154934734106064, 0.0313878208398819, 0.048549506813287735, -0.00046573654981330037, 0.03861379250884056, 0.02461347170174122, -0.03003295138478279, 0.008129219524562359, 0.04358164966106415, 0.01964561454951763, 0.004770271480083466, 0.0023992490023374557, 0.01...
[ "ModelRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super", ...
jamba/modeling_jamba.py:HybridMambaAttentionDynamicCache
[ -0.00009916639100993052, -0.01628785952925682, 0.03145241737365723, -0.03100309893488884, -0.00010574822954367846, 0.024038635194301605, -0.02280300296843052, -0.05391843244433403, 0.00024923234013840556, 0.028981156647205353, 0.004493202548474073, 0.00758227938786149, 0.0023870139848440886,...
[ "Any", "False", "ModelMambaAttentionDynamicCache", "None", "_", "__getitem__", "__init__", "__len__", "append", "batch_size", "beam_idx", "cache_kwargs", "cache_position", "cat", "class", "config", "conv_kernel_size", "conv_states", "def", "device", "dim", "dtype", "else"...
jamba/modeling_jamba.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
jamba/modeling_jamba.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
jamba/modeling_jamba.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
jamba/modeling_jamba.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
jamba/modeling_jamba.py:JambaAttention
[ -0.00010017884778790176, 0.03014504536986351, 0.03261964023113251, -0.0035009870771318674, -0.00028471884434111416, 0.0287952683866024, 0.04206808656454086, -0.024858415126800537, 0.004021214321255684, 0.016197338700294495, 0.02350863628089428, 0.016984708607196808, -0.0005799828213639557, ...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelAttention", "Module", "None", "True", "__init__", "_attn_implementation", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_position", "class", "config", "contiguous", "def", "dropout", ...
jamba/modeling_jamba.py:JambaMambaMixer
[ -0.00022160449589136988, 0.014983323402702808, 0.014239875599741936, -0.0038316131103783846, -0.0004467832623049617, 0.053528208285570145, 0.002544877352192998, -0.03751549497246742, 0.010694203898310661, 0.010865768417716026, 0.02115965448319912, -0.011895157396793365, 0.003445592476055026,...
[ "A", "ACT2FN", "A_log", "B", "C", "CUDA", "Conv1d", "D", "False", "Fast", "HybridMambaAttentionDynamicCache", "Linear", "Make", "Mamba", "ModelMambaMixer", "ModelRMSNorm", "Module", "None", "Parameter", "The", "To", "True", "Turning", "_", "__init__", "a", "act", ...
jamba/modeling_jamba.py:JambaMLP
[ -0.0002434849739074707, 0.024471141397953033, 0.0228551235049963, 0.026548879221081734, -0.0009450823999941349, 0.060485273599624634, 0.036244992166757584, -0.005078915972262621, 0, -0.005021201446652412, 0.028164898976683617, -0.04640282690525055, -0.0015583038330078125, 0.009176678024232...
[ "ACT2FN", "Linear", "ModelMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
jamba/modeling_jamba.py:JambaExperts
[ -0.00033012471976689994, 0.03738027438521385, -0.010854209773242474, 0.006558961234986782, -0.001262454898096621, 0.05874042958021164, 0.0631517693400383, -0.01555576641112566, -0.005252973176538944, -0.018806224688887596, 0.011492692865431309, -0.016020117327570915, -0.0018283829558640718, ...
[ "ACT2FN", "ModelExperts", "Module", "None", "Parameter", "__init__", "act_fn", "chunk", "class", "config", "continue", "current_hidden_states", "current_state", "def", "dim", "down_proj", "dtype", "empty", "expert_hit", "expert_idx", "expert_mask", "final_hidden_states", ...
jamba/modeling_jamba.py:JambaSparseMoeBlock
[ -0.0003686461132019758, 0.03363804519176483, 0.00224837614223361, -0.01833740621805191, -0.0012920863227918744, 0.052559442818164825, 0.05115785822272301, -0.03574042022228241, -0.008759907446801662, -0.020089387893676758, 0.02300935611128807, -0.017286216840147972, -0.0008212412940338254, ...
[ "Linear", "ModelExperts", "ModelSparseMoeBlock", "Module", "__init__", "batch_size", "class", "config", "def", "dim", "dtype", "experts", "ffn_dim", "float", "forward", "functional", "hidden_dim", "hidden_size", "hidden_states", "intermediate_size", "nn", "num_experts", "...
jamba/modeling_jamba.py:JambaAttentionDecoderLayer
[ -0.00020280387252569199, 0.04808038845658302, 0.02562030218541622, -0.0015871607465669513, -0.0006736615905538201, 0.040857043117284775, 0.04582309350371361, -0.04356579855084419, 0.005671454593539238, -0.008013398386538029, 0.010383558459579945, 0.02370160073041916, 0.000031743216823088005,...
[ "False", "GradientCheckpointingLayer", "ModelAttention", "ModelAttentionDecoderLayer", "ModelMLP", "ModelRMSNorm", "ModelSparseMoeBlock", "None", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "else", "eps", "feed_forward", "ffn_layer_class", "fo...
jamba/modeling_jamba.py:JambaMambaDecoderLayer
[ -0.00018875245586968958, 0.022458884865045547, 0.016106875613331795, 0.010945870541036129, -0.0005033398629166186, 0.06215893104672432, 0.02393345721065998, -0.037885189056396484, 0.0056997924111783504, -0.010775727219879627, 0.018262021243572235, -0.0028073606081306934, 0.000900340382941067...
[ "GradientCheckpointingLayer", "ModelMLP", "ModelMambaDecoderLayer", "ModelMambaMixer", "ModelRMSNorm", "ModelSparseMoeBlock", "None", "__init__", "attention_mask", "cache_params", "class", "config", "def", "else", "eps", "feed_forward", "ffn_layer_class", "forward", "hidden_size"...
jamba/modeling_jamba.py:JambaPreTrainedModel
[ -0.00032878422643989325, 0.027481330558657646, -0.01000366359949112, 0.008106417022645473, -0.0012576445005834103, 0.055422596633434296, 0.017362680286169052, -0.016212834045290947, -0.0030614661518484354, -0.012245864607393742, 0.010061156004667282, -0.019547389820218086, -0.003018346847966...
[ "A", "A_log", "D", "Linear", "ModelAttention", "ModelAttentionDecoderLayer", "ModelConfig", "ModelExperts", "ModelMambaDecoderLayer", "ModelMambaMixer", "ModelPreTrainedModel", "None", "OutputRecorder", "PreTrainedModel", "True", "_can_record_outputs", "_init_weights", "_is_statefu...
jamba/modeling_jamba.py:JambaModel
[ -0.00012690562289208174, 0.025606734678149223, -0.008121959865093231, -0.014890260063111782, -0.0004935218603350222, 0.05956104025244713, 0.009926839731633663, -0.028088444843888283, 0.009532022289931774, -0.01348019763827324, 0.02515551447868347, -0.018838435411453247, 0.0012761065736413002...
[ "ALL_DECODER_LAYER_TYPES", "Embedding", "False", "HybridMambaAttentionDynamicCache", "ModelMambaDecoderLayer", "ModelModel", "ModelPreTrainedModel", "ModelRMSNorm", "ModuleList", "MoeModelOutputWithPast", "None", "True", "ValueError", "You", "__init__", "_update_mamba_mask", "all", ...
jamba/modeling_jamba.py:load_balancing_loss_func
[ -0.00027690583374351263, 0.01840798556804657, 0.001643570140004158, -0.028812499716877937, -0.0009432663209736347, 0.05808233842253685, 0.038874007761478424, -0.011547867208719254, 0, -0.02252405695617199, 0.031556546688079834, -0.0061455233953893185, -0.0008396499906666577, 0.014349081553...
[ "Model_balancing_loss_func", "None", "_", "attention_mask", "batch_size", "cat", "compute_device", "concatenated_gate_logits", "def", "device", "dim", "else", "expand", "expert_attention_mask", "expert_mask", "float", "for", "functional", "gate_logits", "if", "in", "is", ...
jamba/modeling_jamba.py:JambaForCausalLM
[ -0.00044288873323239386, 0.0379493422806263, -0.00161050446331501, -0.01030722912400961, -0.001698350184597075, 0.04966210201382637, 0.04029189422726631, -0.004597258288413286, -0.005153614562004805, 0.006207762751728296, 0.02857913449406624, -0.0060027893632650375, -0.00006588427640963346, ...
[ "GenerationMixin", "Linear", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "MoeCausalLMOutputWithPast", "None", "__init__", "_pp_plan", "_tied_weights_keys", "_tp_plan", "attention_mask", "attentions", "auto_docstring", "aux_loss", "cache_position", "can_return_tuple", ...
jamba/modeling_jamba.py:JambaForSequenceClassification
[ -0.0002121782599715516, 0.015340753830969334, -0.017272552475333214, 0.0278406273573637, -0.0007244244916364551, 0.027954263612627983, 0.015227118507027626, -0.011079433374106884, 0.0032101948745548725, -0.013863496482372284, 0.02909061498939991, 0.008409005589783192, -0.003437465289607644, ...
[ "GenericForSequenceClassification", "ModelForSequenceClassification", "ModelPreTrainedModel", "class", "pass" ]
m2m_100/modeling_m2m_100.py:shift_tokens_right
[ -0.0001357423752779141, 0.026739485561847687, 0.00459761219099164, -0.04174518957734108, -0.0005782272783108056, 0.05189942196011543, 0.04039129242300987, -0.057089366018772125, 0.006572046782821417, 0.007784913759678602, 0.020759768784046173, -0.009872172959148884, -0.0006698974757455289, ...
[ "Model_tokens_right", "Modeled_input_ids", "None", "clone", "decoder_start_token_id", "def", "if", "input_ids", "is", "masked_fill_", "new_zeros", "pad_token_id", "return", "shape" ]
m2m_100/modeling_m2m_100.py:M2M100ScaledWordEmbedding
[ -0.00018138372979592532, 0.03397626802325249, 0.021971320733428, 0.020159251987934113, -0.0008741811034269631, 0.019253218546509743, 0.04462216794490814, -0.020725524052977562, 0.00569102494046092, -0.00651211803779006, 0.005974160507321358, 0.018913455307483673, 0.002505749696865678, -0.0...
[ "Embedding", "ModelScaledWordEmbedding", "__init__", "class", "def", "embed_scale", "embedding_dim", "forward", "input_ids", "nn", "num_embeddings", "padding_idx", "return", "self", "super" ]
m2m_100/modeling_m2m_100.py:M2M100SinusoidalPositionalEmbedding
[ -0.00016463026986457407, 0.032454170286655426, 0.014987517148256302, 0.006817630026489496, -0.0010071498109027743, -0.0005951339844614267, 0.043722979724407196, -0.010029240511357784, 0.005690748803317547, -0.010479993186891079, 0.025692885741591454, 0.023551812395453453, -0.0009508057846687...
[ "False", "ModelSinusoidalPositionalEmbedding", "Module", "None", "__init__", "arange", "bsz", "cat", "class", "contiguous", "cos", "create_position_ids_from_input_ids", "create_position_ids_from_inputs_embeds", "cumsum", "def", "detach", "device", "dim", "dtype", "else", "emb...
m2m_100/modeling_m2m_100.py:eager_attention_forward
[ 0.000042374336771899834, 0.02700657583773136, 0.026102591305971146, -0.010847830213606358, 0.0003637130430433899, 0.03683742135763168, 0.06101904436945915, -0.026328587904572487, 0.02000068686902523, 0.012542803771793842, 0.02395562455058098, 0.026893578469753265, 0.002500085858628154, -0....
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "functional", "if", "is", "key", "kwargs", "matmul", "module", "nn", "not", "p", "query", "return", "scaling", "size", "softmax", "torch", "tra...
m2m_100/modeling_m2m_100.py:M2M100Attention
[ -0.0001932329178089276, 0.04201360046863556, 0.04201360046863556, -0.006776387337595224, -0.0007764610345475376, 0.02010328322649002, 0.029138466343283653, -0.028234947472810745, 0.0020611512009054422, 0.014908052049577236, 0.008188134990632534, 0.009261062368750572, -0.0016235094517469406, ...
[ "ALL_ATTENTION_FUNCTIONS", "EncoderDecoderCache", "False", "Instantiating", "Linear", "ModelAttention", "Module", "None", "Please", "True", "ValueError", "__class__", "__init__", "__name__", "_attn_implementation", "a", "and", "attention_interface", "attention_mask", "attn_outp...
m2m_100/modeling_m2m_100.py:M2M100EncoderLayer
[ -0.00013405329082161188, 0.04131663590669632, 0.03793002665042877, 0.008410080336034298, -0.0005714903818443418, 0.03183412924408913, 0.01986810937523842, -0.010216272436082363, 0.002695176750421524, 0.015465517528355122, 0.009200289845466614, 0.01862635277211666, 0.0018061917508020997, 0....
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelEncoderLayer", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attention_mask", "attn_weights", "clamp", "clamp_value", "class", "config",...
m2m_100/modeling_m2m_100.py:M2M100DecoderLayer
[ -0.00016973177844192833, 0.052051082253456116, 0.02217828668653965, -0.007637930102646351, -0.0007779373554512858, 0.03032541275024414, 0.039830390363931656, -0.0341726653277874, 0.005261685233563185, -0.0035502230748534203, -0.0030976049602031708, 0.01900995895266533, -0.0000160228573804488...
[ "ACT2FN", "False", "GradientCheckpointingLayer", "LayerNorm", "Linear", "ModelAttention", "ModelDecoderLayer", "None", "True", "__init__", "activation_dropout", "activation_fn", "activation_function", "attention_dropout", "attention_mask", "cache_position", "class", "config", "cr...
m2m_100/modeling_m2m_100.py:M2M100PreTrainedModel
[ -0.00023800256894901395, 0.04887933284044266, 0.011310450732707977, 0.011253613978624344, -0.0012077742721885443, 0.016027875244617462, 0.030919020995497704, -0.013356562703847885, -0.0036659501492977142, 0.005712061654776335, -0.0064793536439538, 0.0033675588201731443, -0.003552277106791734...
[ "False", "ModelConfig", "ModelDecoderLayer", "ModelEncoderLayer", "ModelPreTrainedModel", "ModelSinusoidalPositionalEmbedding", "PreTrainedModel", "True", "_can_compile_fullgraph", "_init_weights", "_no_split_modules", "_supports_flash_attn", "_supports_flex_attn", "_supports_sdpa", "bas...
m2m_100/modeling_m2m_100.py:M2M100Encoder
[ -0.00013810688687954098, 0.04931735619902611, 0.020267406478524208, 0.00866994634270668, -0.0007353972177952528, 0.04143558442592621, 0.016664311289787292, -0.02105558291077614, 0.002772693755105138, -0.00827585719525814, 0.01801547221839428, 0.02274453267455101, -0.000008081891792244278, ...
[ "BaseModelOutput", "False", "LayerNorm", "ModelEncoder", "ModelEncoderLayer", "ModelPreTrainedModel", "ModelScaledWordEmbedding", "ModelSinusoidalPositionalEmbedding", "ModuleList", "None", "_", "__init__", "all_attentions", "and", "attention_mask", "attentions", "class", "config",...
m2m_100/modeling_m2m_100.py:M2M100Decoder
[ -0.00018160064064431936, 0.07438362389802933, 0.005556093994528055, -0.021770816296339035, -0.0011126361787319183, 0.023811830207705498, 0.049891453236341476, -0.03628469258546829, 0.006349821574985981, -0.012869727797806263, 0, 0.020410140976309776, -0.002409530570730567, 0.01972980238497...
[ "BaseModelOutputWithPastAndCrossAttentions", "DynamicCache", "EncoderDecoderCache", "False", "LayerNorm", "ModelDecoder", "ModelDecoderLayer", "ModelPreTrainedModel", "ModelScaledWordEmbedding", "ModelSinusoidalPositionalEmbedding", "ModuleList", "None", "Setting", "True", "ValueError", ...
m2m_100/modeling_m2m_100.py:M2M100Model
[ -0.000052910811064066365, 0.056867316365242004, 0.009459228254854679, -0.018694570288062096, -0.00009051684173755348, 0.03738914057612419, 0.02529923804104328, -0.040971335023641586, 0.00912339799106121, -0.005988979246467352, 0.02910531871020794, 0.0039739953354001045, -0.001749117742292583...
[ "BaseModelOutput", "ModelDecoder", "ModelEncoder", "ModelModel", "ModelPreTrainedModel", "ModelScaledWordEmbedding", "None", "Seq2SeqModelOutput", "__init__", "_tied_weights_keys", "and", "attention_mask", "attentions", "auto_docstring", "cache_position", "class", "config", "cross_...
m2m_100/modeling_m2m_100.py:M2M100ForConditionalGeneration
[ -0.00024515384575352073, 0.03341147303581238, 0.0030758872162550688, -0.005164104048162699, -0.0009100675815716386, 0.04266735166311264, 0.013883821666240692, -0.02189805917441845, 0, 0.003950681071728468, 0.032959967851638794, -0.0006631499854847789, 0.00034568458795547485, 0.009086566045...
[ "CrossEntropyLoss", "GenerationMixin", "Linear", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "Seq2SeqLMOutput", "__init__", "_tied_weights_keys", "attention_mask", "auto_docstring", "base_model_prefix", "cache_position", "class", "config", "cross_at...
xcodec/modeling_xcodec.py:XcodecOutput
[ -0.000046441651647910476, 0.03374229371547699, -0.02185322903096676, 0.014493334107100964, -0.0000995178270386532, 0.019701875746250153, 0.07065500319004059, -0.042574167251586914, 0.011945676989853382, -0.015852084383368492, 0.02264583483338356, 0.028307292610406876, -0.00653898436576128, ...
[ "ModelOutput", "None", "audio_codes", "audio_values", "class" ]
xcodec/modeling_xcodec.py:XcodecEncoderOutput
[ -0.00004352082032710314, 0.022056439891457558, 0.00046304386341944337, 0.011876544915139675, -0.00027924019377678633, 0.027146387845277786, 0.060626935213804245, -0.055650096386671066, 0.010745445266366005, -0.020925341174006462, 0.017192712053656578, 0.036647625267505646, -0.008313581347465...
[ "ModelEncoderOutput", "ModelOutput", "None", "audio_codes", "class" ]
xcodec/modeling_xcodec.py:XcodecDecoderOutput
[ -0.0001151128817582503, 0.04715023562312126, -0.0068288506008684635, 0.008500644005835056, -0.0008854836924001575, 0.015187816694378853, 0.07480566203594208, -0.06075126677751541, 0.007650579325854778, -0.021874990314245224, 0.010654140263795853, 0.025275247171521187, -0.006913856603205204, ...
[ "ModelDecoderOutput", "ModelOutput", "None", "audio_values", "class" ]
xcodec/modeling_xcodec.py:ResidualUnit
[ -0.00011804773384938017, 0.012213058769702911, 0.00999766681343317, -0.0036355152260512114, -0.00009142042836174369, 0.060440439730882645, 0.017950356006622314, -0.047034479677677155, 0.010111276991665363, 0.012667498551309109, 0.036127932369709015, -0.03340129554271698, 0.004175161942839622...
[ "Conv1d", "ELU", "ModelUnit", "Module", "__init__", "activation", "class", "config", "conv1", "conv2", "def", "dilation", "forward", "groups", "hidden_state", "in_channels", "kernel_size", "nn", "out_channels", "output_tensor", "padding", "return", "self", "stride", "...
xcodec/modeling_xcodec.py:SemanticEncoderBlock
[ -0.0001445545203750953, -0.019645139575004578, 0.00810933019965887, -0.023071616888046265, -0.0005710796103812754, 0.05984914302825928, 0.011307376436889172, -0.05162559822201729, 0.012792183086276054, -0.0015062225284054875, 0.01895984262228012, -0.02718338929116726, 0.004768515005707741, ...
[ "Conv1d", "ModelEncoderBlock", "Module", "ModuleList", "ResidualUnit", "__init__", "block_dilations", "class", "config", "conv", "def", "dilation", "else", "for", "forward", "hidden_state", "if", "in", "in_channels", "kernel", "kernel_size", "nn", "out_channels", "paddi...
xcodec/modeling_xcodec.py:SemanticEncoder
[ -0.00003513231058605015, 0.0070989360101521015, 0.019684700295329094, -0.00639187078922987, 0.00009987298108171672, 0.04276331141591072, -0.0017464514821767807, -0.04570470377802849, 0.01176556758582592, 0.005967631470412016, 0.034844182431697845, -0.02138165570795536, 0.003139370121061802, ...
[ "Conv1d", "ModelEncoder", "ModelEncoderBlock", "Model_hidden_size", "Module", "ModuleList", "Number", "ValueError", "__init__", "block", "channel_ratios", "class", "config", "conv", "conv_blocks", "def", "enumerate", "for", "forward", "hidden_state", "i", "if", "in", "i...
xcodec/modeling_xcodec.py:SemanticDecoderBlock
[ -0.00021795977954752743, 0.020413793623447418, 0.013382376171648502, -0.03243524953722954, -0.0007867816020734608, 0.03969348594546318, 0.017691954970359802, -0.07348965853452682, 0.0069463602267205715, -0.009413027204573154, 0.010830651968717575, -0.022114943712949753, 0.001183716463856399,...
[ "Conv1d", "ConvTranspose1d", "ModelDecoderBlock", "Module", "ModuleList", "ResidualUnit", "__init__", "block_dilations", "class", "config", "conv", "def", "dilation", "else", "for", "forward", "hidden_state", "if", "in", "in_channels", "kernel_size", "nn", "out_channels",...
xcodec/modeling_xcodec.py:SemanticDecoder
[ -0.00015567494847346097, 0.03940247371792793, 0.008274518884718418, -0.011595584452152252, -0.0004749405197799206, 0.03107166290283203, 0.02589305303990841, -0.06574583798646927, 0.00917514692991972, -0.013734576292335987, 0.013565707951784134, -0.02454211190342903, -0.0006121455226093531, ...
[ "Conv1d", "ModelDecoder", "ModelDecoderBlock", "Model_hidden_size", "Module", "ModuleList", "__init__", "block", "channel_ratios", "class", "config", "conv1", "conv2", "conv_blocks", "def", "else", "enumerate", "for", "forward", "hidden_state", "i", "if", "in", "in_chan...
xcodec/modeling_xcodec.py:XcodecEuclideanCodebook
[ -0.00022551401343662292, 0.006446313578635454, 0.018369141966104507, 0.00550503795966506, -0.0009626685059629381, 0.006931213196367025, 0.045181240886449814, -0.036966472864151, 0.007244972046464682, 0.00036010934854857624, 0.0353691540658474, 0.002681210171431303, -0.0016115783946588635, ...
[ "F", "ModelEuclideanCodebook", "Module", "Tensor", "True", "__init__", "class", "clone", "cluster_size", "codebook_dim", "codebook_size", "config", "decode", "def", "dim", "dist", "embed", "embed_avg", "embed_ind", "embedding", "encode", "hidden_states", "indices", "ini...
xcodec/modeling_xcodec.py:XcodecVectorQuantization
[ -0.0001221633137902245, 0.018449297174811363, 0.017099348828196526, 0.006159140262752771, -0.000379673030693084, 0.009505887515842915, 0.0431983545422554, -0.04139842465519905, 0.002235852414742112, 0.017774323001503944, 0.0431983545422554, 0.009224648587405682, -0.003346747485920787, 0.00...
[ "ModelEuclideanCodebook", "ModelVectorQuantization", "Module", "__init__", "class", "codebook", "config", "decode", "def", "embed_in", "embed_ind", "encode", "hidden_states", "nn", "permute", "quantize", "return", "self", "super" ]
xcodec/modeling_xcodec.py:XcodecResidualVectorQuantization
[ -0.00011209374497411773, 0.01124114915728569, -0.011975495144724846, -0.003036240115761757, -0.00028773670783266425, 0.043382927775382996, 0.02632349543273449, -0.043608881533145905, 0.00610072398558259, -0.0036717320326715708, 0.029373856261372566, 0.012709842063486576, 0.000476619083201512...
[ "ModelResidualVectorQuantization", "ModelVectorQuantization", "Module", "ModuleList", "None", "_", "__init__", "all_indices", "and", "append", "bandwidth", "bw_per_q", "class", "codebook_size", "codes", "config", "decode", "def", "device", "embeddings", "encode", "enumerate...
xcodec/modeling_xcodec.py:XcodecPreTrainedModel
[ -0.00007363935583271086, 0.049597859382629395, -0.009930793195962906, -0.011950615793466568, -0.00031559725175611675, 0.015485304407775402, 0.020198224112391472, -0.041967421770095825, 0.007630439940840006, -0.01829061284661293, 0.02749202586710453, 0.025472203269600868, -0.00077145994873717...
[ "AttributeError", "Conv1d", "ConvTranspose1d", "Embedding", "GroupNorm", "LayerNorm", "Linear", "Model", "ModelConfig", "ModelEuclideanCodebook", "ModelModel", "ModelPreTrainedModel", "None", "PreTrainedAudioTokenizerBase", "Snake1d", "Tensor", "True", "ValueError", "__class__", ...
xcodec/modeling_xcodec.py:XcodecModel
[ -0.00010499545896891505, 0.046529751271009445, -0.01750512421131134, -0.008074944838881493, -0.0003988062671851367, 0.0313962884247303, 0.031848032027482986, -0.032977394759655, 0.006126793567091227, -0.022700194269418716, 0.0478849858045578, 0.024394238367676735, -0.003811599686741829, -0...
[ "Audio", "AutoModel", "ConvTranspose1d", "F", "False", "Identity", "Linear", "ModelDecoderOutput", "ModelEncoderOutput", "ModelModel", "ModelOutput", "ModelPreTrainedModel", "ModelResidualVectorQuantization", "None", "Select", "SemanticDecoder", "SemanticEncoder", "Tanh", "This",...
qwen3_vl/modeling_qwen3_vl.py:BaseModelOutputWithDeepstackFeatures
[ -0.000323236221447587, 0.014142042025923729, 0.015544557943940163, 0.02173900604248047, -0.001380602247081697, 0.05095810070633888, 0.0631132423877716, -0.030621610581874847, 0.00876572821289301, -0.011629199609160423, 0.031322870403528214, 0.01601206324994564, -0.0004565483541227877, 0.01...
[ "ModelModelOutputWithDeepstackFeatures", "ModelModelOutputWithPooling", "None", "class", "deepstack_features", "r" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionMLP
[ -0.00020412541925907135, 0.02817285805940628, 0.032262466847896576, 0.033852867782115936, -0.00048280099872499704, 0.04589449241757393, 0.025673652067780495, -0.030444862321019173, 0.0004544009279925376, 0.005850411951541901, 0.04589449241757393, -0.022152045741677284, 0.0011999025009572506,...
[ "ACT2FN", "Linear", "ModelVisionMLP", "Module", "__init__", "act_fn", "class", "config", "def", "forward", "hidden_act", "hidden_size", "hidden_state", "intermediate_size", "linear_fc1", "linear_fc2", "nn", "return", "self", "super" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionPatchEmbed
[ -0.00006803657743148506, 0.010169493034482002, 0.01101226918399334, 0.00854012742638588, -0.00019313610391691327, 0.008989606983959675, 0.027081191539764404, -0.03191310539841652, 0.013877706602215767, -0.0005021538236178458, 0.008483941666781902, -0.00022386229829862714, -0.0015661581419408...
[ "Conv3d", "ModelVisionPatchEmbed", "Module", "__init__", "class", "config", "def", "dtype", "embed_dim", "forward", "hidden_size", "hidden_states", "in_channels", "kernel_size", "nn", "patch_size", "proj", "return", "self", "stride", "super", "target_dtype", "temporal_pat...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionRotaryEmbedding
[ -0.00022179921506904066, 0.03434310480952263, 0.01224904041737318, 0.008013390935957432, -0.0010374479461461306, 0.04464603587985039, 0.0668545737862587, -0.016713643446564674, 0.005323181394487619, 0.0345720574259758, 0.00887196883559227, -0.00875749159604311, -0.0022466115187853575, 0.00...
[ "False", "ModelVisionRotaryEmbedding", "Module", "Tensor", "__init__", "arange", "class", "def", "device", "dim", "dtype", "float", "forward", "freqs", "inv_freq", "nn", "outer", "persistent", "register_buffer", "return", "self", "seq", "seqlen", "super", "theta", "...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionPatchMerger
[ -0.00025101262144744396, 0.03235587850213051, 0.011313244700431824, 0.031224554404616356, -0.0006858654669485986, 0.021042635664343834, 0.00933342706412077, -0.04683683067560196, 0.00574147142469883, 0.0191193837672472, 0.011200112290680408, 0.007212193217128515, -0.000643440755084157, -0....
[ "False", "GELU", "LayerNorm", "Linear", "ModelVisionPatchMerger", "Module", "__init__", "act_fn", "class", "config", "def", "else", "eps", "forward", "hidden_size", "if", "linear_fc1", "linear_fc2", "nn", "norm", "out_hidden_size", "return", "self", "spatial_merge_size"...
qwen3_vl/modeling_qwen3_vl.py:rotate_half
[ 0.00002049485374300275, 0.013860220089554787, 0.03434192016720772, 0.002974055241793394, 0.0003507140791043639, 0.028506038710474968, 0.01975221559405327, -0.02008890174329281, 0.014477476477622986, 0.018742159008979797, -0.001487027620896697, -0.01217679213732481, 0.00022445700597018003, ...
[ "Model_half", "cat", "def", "dim", "return", "shape", "torch", "x", "x1", "x2" ]
qwen3_vl/modeling_qwen3_vl.py:apply_rotary_pos_emb_vision
[ -0.00006463831959990785, 0.03513491153717041, 0.018587501719594002, -0.0022809358779340982, -0.0004639792023226619, 0.01700076460838318, 0.060749396681785583, -0.009180412627756596, 0.016887426376342773, 0.04692211002111435, 0.009010405279695988, 0.01983422413468361, 0.0013458938337862492, ...
[ "Model_rotary_pos_emb_vision", "cos", "def", "dtype", "float", "k", "k_embed", "orig_q_dtype", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze" ]
qwen3_vl/modeling_qwen3_vl.py:repeat_kv
[ -0.00025096136960200965, -0.0023088448215276003, -0.004072744864970446, -0.009292741306126118, -0.000559285341296345, 0.03143470734357834, 0.009694280102849007, -0.058739304542541504, 0.011185707524418831, 0.05162634328007698, 0.005736260209232569, -0.02317449077963829, 0.0006775957299396396...
[ "Model_kv", "None", "batch", "def", "expand", "head_dim", "hidden_states", "if", "n_rep", "num_key_value_heads", "reshape", "return", "shape", "slen" ]
qwen3_vl/modeling_qwen3_vl.py:eager_attention_forward
[ 0, 0.020139193162322044, 0.013972979038953781, -0.018215786665678024, 0, 0.038468122482299805, 0.0527239553630352, -0.03326360881328583, 0.020704900845885277, 0.007297629024833441, 0.0280591007322073, 0.020704900845885277, 0.002489113714545965, -0.017876362428069115, -0.03484759107232094...
[ "Model_attention_forward", "None", "attention_mask", "attn_output", "attn_weights", "contiguous", "def", "dim", "dropout", "dtype", "float32", "functional", "if", "is", "key", "key_states", "kwargs", "matmul", "module", "nn", "not", "num_key_value_groups", "p", "query",...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionAttention
[ -0.00014312515850178897, 0.03664004057645798, 0.021825702860951424, 0.021825702860951424, -0.0004081717343069613, 0.0252182986587286, 0.03958028927445412, -0.029628673568367958, 0.0055129691027104855, 0.03189040347933769, 0.028837068006396294, 0.03189040347933769, 0.0010036430321633816, -0...
[ "ALL_ATTENTION_FUNCTIONS", "False", "Linear", "ModelVisionAttention", "Module", "None", "Tensor", "_", "__init__", "_attn_implementation", "apply_rotary_pos_emb_vision", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_outputs", "cat", "class", "...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionBlock
[ -0.00016528498963452876, 0.013067236170172691, 0.01606534607708454, 0.01855434477329254, -0.00040481562609784305, 0.043896861374378204, 0.026587016880512238, -0.01866747997701168, 0.005232551135122776, 0.021043341606855392, 0.021043341606855392, 0.019120024517178535, 0.0016475465381518006, ...
[ "GradientCheckpointingLayer", "LayerNorm", "ModelVisionAttention", "ModelVisionBlock", "ModelVisionMLP", "None", "Tensor", "__init__", "attn", "attn_implementation", "class", "config", "cu_seqlens", "def", "eps", "forward", "hidden_size", "hidden_states", "kwargs", "mlp", "nn...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextRotaryEmbedding
[ -0.00028448071680031717, 0.04476313292980194, 0.002913662465289235, -0.02087399922311306, -0.0013263686560094357, 0.05728752911090851, 0.032470665872097015, -0.017163066193461418, -0.007769766263663769, 0.026440398767590523, 0.02052609995007515, 0.0017250040546059608, -0.0010509478161111474,...
[ "False", "ModelTextRotaryEmbedding", "Module", "None", "ROPE_INIT_FUNCTIONS", "Tensor", "__init__", "and", "apply_interleaved_mrope", "arange", "attention_factor", "attention_scaling", "base", "cat", "class", "clone", "compute_default_rope_parameters", "config", "cos", "cpu", ...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextRMSNorm
[ -0.0000997777096927166, 0.04746946692466736, 0.03865370899438858, 0.048825737088918686, -0.0005686446093022823, 0.03435885161161423, 0.027125408872961998, -0.03481094166636467, 0.008702735416591167, 0.035715121775865555, 0.021474281325936317, 0.011415275745093822, 0.002090916968882084, 0.0...
[ "ModelTextRMSNorm", "Module", "Parameter", "True", "__init__", "class", "def", "dtype", "eps", "extra_repr", "f", "float32", "forward", "hidden_size", "hidden_states", "input_dtype", "keepdim", "mean", "nn", "ones", "pow", "return", "rsqrt", "self", "shape", "super"...
qwen3_vl/modeling_qwen3_vl.py:apply_rotary_pos_emb
[ -0.0001444592053303495, 0.027112245559692383, 0.028019767254590988, 0.0028360087890177965, -0.0005707467789761722, 0.021667107939720154, 0.046510547399520874, -0.002183726755902171, 0.01293220091611147, 0.03652779385447502, 0.007884104736149311, 0.0017654155381023884, -0.0007834474672563374,...
[ "Model_rotary_pos_emb", "cos", "def", "k", "k_embed", "q", "q_embed", "return", "rotate_half", "sin", "unsqueeze", "unsqueeze_dim" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextAttention
[ -0.00005613891698885709, 0.046707578003406525, 0.0357043519616127, 0.0008385750697925687, -0.00038420071359723806, 0.025936178863048553, 0.033234238624572754, -0.027171235531568527, 0.0009052400127984583, 0.01998545415699482, 0.022455567494034767, 0.037276241928339005, -0.0011087435996159911...
[ "ALL_ATTENTION_FUNCTIONS", "Linear", "ModelTextAttention", "ModelTextRMSNorm", "Module", "None", "Tensor", "True", "__init__", "_attn_implementation", "apply_rotary_pos_emb", "attention_dropout", "attention_interface", "attention_mask", "attn_output", "attn_weights", "cache_kwargs", ...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextMLP
[ -0.00019116139446850866, 0.03247242420911789, 0.03338714316487312, 0.01726526953279972, -0.0005788438720628619, 0.057398442178964615, 0.02641243115067482, -0.016922250390052795, -0.002029526513069868, -0.008232446387410164, 0.035216573625802994, -0.026069412007927895, -0.0016507769469171762,...
[ "ACT2FN", "Linear", "ModelTextMLP", "Module", "__init__", "act_fn", "class", "config", "def", "down_proj", "forward", "gate_proj", "hidden_act", "hidden_size", "intermediate_size", "nn", "return", "self", "super", "up_proj", "x" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextDecoderLayer
[ -0.00016621610848233104, 0.0436830073595047, 0.024897051975131035, 0.0035789511166512966, -0.0007568137953057885, 0.039382610470056534, 0.036666568368673325, -0.03825092315673828, 0.005573544651269913, -0.0032252997625619173, 0.01007198914885521, 0.028518440201878548, -0.002178492024540901, ...
[ "False", "GradientCheckpointingLayer", "ModelTextAttention", "ModelTextDecoderLayer", "ModelTextMLP", "ModelTextRMSNorm", "None", "Tensor", "_", "__init__", "attention_mask", "cache_position", "class", "config", "def", "eps", "forward", "hidden_size", "hidden_states", "input_la...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLModelOutputWithPast
[ -0.0002648343506734818, 0.008071142248809338, 0.014355102553963661, 0.0052174171432852745, -0.0014196563279256225, 0.042200542986392975, 0.04911866411566734, -0.03851087763905525, 0.016949398443102837, 0.0025078190956264734, 0.008071142248809338, 0.03182335942983627, -0.001830419758334756, ...
[ "ModelModelOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "last_hidden_state", "past_key_values", "r", "rope_deltas" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLPreTrainedModel
[ -0.0003329527680762112, 0.04890584945678711, 0.004803253337740898, 0.010014055296778679, -0.0017757480964064598, 0.032836783677339554, 0.02724754437804222, -0.034234095364809036, -0.0043083722703158855, 0.015719737857580185, 0.009431842714548111, -0.0039881556294858456, -0.005560129415243864...
[ "ModelConfig", "ModelPreTrainedModel", "ModelTextAttention", "ModelTextDecoderLayer", "ModelVisionBlock", "ModelVisionRotaryEmbedding", "PreTrainedModel", "True", "_can_compile_fullgraph", "_can_record_outputs", "_init_weights", "_no_split_modules", "_skip_keys_device_placement", "_support...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLVisionModel
[ -0.00022815876582171768, 0.016300087794661522, -0.022299425676465034, 0.02060149982571602, -0.0008206641068682075, 0.03576963394880295, 0.04776830971240997, -0.050937771797180176, 0.002645934233441949, 0.020941084250807762, 0.019129963591694832, 0.0078670559450984, -0.00037142125074751675, ...
[ "BaseModelOutputWithDeepstackFeatures", "Embedding", "F", "False", "ModelPreTrainedModel", "ModelVisionAttention", "ModelVisionBlock", "ModelVisionConfig", "ModelVisionModel", "ModelVisionPatchEmbed", "ModelVisionPatchMerger", "ModelVisionRotaryEmbedding", "ModuleList", "None", "T", "T...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLTextModel
[ -0.00011091621854575351, 0.039729926735162735, 0.00412450497969985, 0.0027075831312686205, -0.000757562112994492, 0.03254711255431175, 0.03905653581023216, -0.042199015617370605, 0.004685662221163511, 0.0054993401281535625, 0.03187372535467148, 0.02042612060904503, -0.002160455100238323, 0...
[ "BaseModelOutputWithPast", "DynamicCache", "Embedding", "False", "ModelPreTrainedModel", "ModelTextConfig", "ModelTextDecoderLayer", "ModelTextModel", "ModelTextRMSNorm", "ModelTextRotaryEmbedding", "ModuleList", "None", "ValueError", "You", "__init__", "_deepstack_process", "_no_spl...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLModel
[ -0.00009757102816365659, 0.03965146467089653, -0.018929654732346535, 0.0017151499632745981, -0.0003115272265858948, 0.054436758160591125, 0.032034799456596375, -0.04794019088149071, 0.005880514159798622, 0.02206592820584774, 0.03405097499489784, 0.019601713865995407, 0.0005320464842952788, ...
[ "False", "Image", "ModelConfig", "ModelModel", "ModelModelOutputWithPast", "ModelPreTrainedModel", "ModelTextDecoderLayer", "ModelTextModel", "ModelVisionBlock", "ModelVisionModel", "None", "True", "ValueError", "Video", "You", "_", "__init__", "_checkpoint_conversion_mapping", "...
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLCausalLMOutputWithPast
[ -0.0002988479973282665, 0.017503436654806137, 0.020285440608859062, -0.0002644352207425982, -0.0015503872418776155, 0.049844224005937576, 0.049612391740083694, -0.02874736674129963, 0.016923854127526283, 0.0007607040461152792, 0.017155686393380165, 0.02156052552163601, -0.0010577408829703927...
[ "ModelCausalLMOutputWithPast", "ModelOutput", "None", "attentions", "class", "hidden_states", "logits", "loss", "past_key_values", "r", "rope_deltas" ]
qwen3_vl/modeling_qwen3_vl.py:Qwen3VLForConditionalGeneration
[ -0.00028949574334546924, 0.03455477952957153, -0.010002699680626392, 0.019778065383434296, -0.000994586618617177, 0.03910146281123161, 0.031826771795749664, -0.050922833383083344, -0.0006997627206146717, 0.01182137243449688, 0.03523678332567215, 0.00115087884478271, 0.0005896477960050106, ...
[ "False", "GenerationMixin", "Linear", "ModelCausalLMOutputWithPast", "ModelConfig", "ModelForConditionalGeneration", "ModelModel", "ModelPreTrainedModel", "None", "Tensor", "True", "__init__", "_checkpoint_conversion_mapping", "_expand_dict_for_generation", "_expand_dict_for_generation_v...
falcon_mamba/modeling_falcon_mamba.py:FalconMambaCache
[ -0.0001922514638863504, 0.02347416989505291, 0.010376263409852982, -0.01417522318661213, -0.0007654620567336679, 0.009695853106677532, -0.01292780414223671, -0.04944317787885666, 0.006407200824469328, 0.03061848320066929, 0.015989651903510094, -0.005074730142951012, 0.001119842636398971, 0...
[ "ModelCache", "None", "True", "_", "__init__", "_dtype", "_dynamo", "append", "cache_position", "clamp", "class", "config", "conv_kernel", "conv_kernel_size", "conv_state", "conv_states", "def", "device", "dims", "dtype", "else", "float16", "for", "if", "in", "inter...
falcon_mamba/modeling_falcon_mamba.py:rms_forward
[ -0.00017206810298375785, 0.03139933943748474, 0.039757438004016876, 0.03907975181937218, -0.00045884825522080064, 0.007510993164032698, 0.029592182487249374, -0.032980602234601974, 0.01248067244887352, 0.032980602234601974, 0.03456186130642891, -0.0034166546538472176, 0.0024142477195709944, ...
[ "Model_forward", "True", "def", "float32", "hidden_states", "keepdim", "mean", "pow", "return", "rsqrt", "to", "torch", "variance", "variance_epsilon" ]
falcon_mamba/modeling_falcon_mamba.py:FalconMambaMixer
[ -0.0001887798571260646, 0.022454118356108665, 0.00946036446839571, 0.0008370428113266826, -0.00034550277632661164, 0.05015132948756218, 0.013905595988035202, -0.035333890467882156, 0.008662502281367779, 0.010144246742129326, 0.020174512639641762, -0.0016028479440137744, 0.001937665045261383,...
[ "A", "ACT2FN", "A_log", "B", "C", "Conv1d", "D", "Falling", "False", "ImportError", "Linear", "Mamba", "Model", "ModelMixer", "Model_inner_fn", "Model_ssm", "Module", "None", "Parameter", "The", "To", "True", "_", "__init__", "_is_quantized", "act", "activation", ...
falcon_mamba/modeling_falcon_mamba.py:FalconMambaRMSNorm
[ -0.00008794592577032745, 0.03575112298130989, 0.02059083804488182, 0.05475805327296257, -0.00028284118161536753, 0.044802043586969376, 0.02138279192149639, -0.028962936252355576, 0.008654939942061901, 0.0366562157869339, 0.014481468126177788, 0.0026304228231310844, 0.0032385315280407667, 0...
[ "ModelRMSNorm", "Module", "Parameter", "__init__", "class", "def", "device", "eps", "extra_repr", "f", "forward", "hidden_size", "hidden_states", "nn", "ones", "return", "rms_forward", "self", "shape", "super", "to", "torch", "variance_epsilon", "weight" ]
falcon_mamba/modeling_falcon_mamba.py:FalconMambaBlock
[ -0.00018491271475795656, -0.0019252155907452106, 0.02095087617635727, 0.004699791315943003, -0.0003945984353777021, 0.05096159130334854, 0.02672652341425419, -0.03623935207724571, 0.004190175328403711, 0.015401724725961685, -0.005350966937839985, -0.007587614469230175, 0.000422910408815369, ...
[ "GradientCheckpointingLayer", "ModelBlock", "ModelMixer", "ModelRMSNorm", "None", "__init__", "attention_mask", "cache_params", "cache_position", "class", "config", "def", "dtype", "eps", "float32", "forward", "hidden_size", "hidden_states", "if", "layer_idx", "layer_norm_eps...
falcon_mamba/modeling_falcon_mamba.py:FalconMambaPreTrainedModel
[ -0.00028834445402026176, 0.028934119269251823, -0.004471118561923504, 0.0049837310798466206, -0.0011818562634289265, 0.0323515348136425, 0.011505299247801304, -0.003146870294585824, -0.009170065633952618, 0.020846234634518623, -0.00032750231912359595, -0.03349067270755768, -0.002748171798884...
[ "A", "A_log", "D", "Embedding", "Linear", "ModelBlock", "ModelConfig", "ModelMixer", "ModelPreTrainedModel", "ModelRMSNorm", "None", "PreTrainedModel", "True", "_init_weights", "_is_stateful", "_no_split_modules", "a", "arange", "b_c_rms", "backbone", "base_model_prefix", "...
falcon_mamba/modeling_falcon_mamba.py:FalconMambaOutput
[ -0.00017384765669703484, 0.02076900005340576, 0.031952306628227234, 0.02179603837430477, -0.001112625002861023, 0.03309345990419388, 0.05135192349553108, -0.01997019164264202, 0.017916115000844002, -0.016204385086894035, -0.003166701877489686, 0.007988076657056808, -0.0008736958843655884, ...
[ "ModelOutput", "None", "cache_params", "class", "hidden_states", "last_hidden_state", "r" ]
falcon_mamba/modeling_falcon_mamba.py:FalconMambaCausalLMOutput
[ -0.0001988249714486301, 0.028416814282536507, 0.035378362983465195, 0.004079923965036869, -0.0011483702110126615, 0.03857382759451866, 0.050899192690849304, -0.007817476987838745, 0.01791742816567421, -0.015977324917912483, 0.0023538023233413696, -0.0003940835886169225, -0.002425129758194089...
[ "ModelCausalLMOutput", "ModelOutput", "None", "cache_params", "class", "hidden_states", "logits", "loss", "r" ]
falcon_mamba/modeling_falcon_mamba.py:FalconMambaModel
[ 0.000011523718967509922, 0.0359627828001976, -0.00073400599649176, 0.007979242131114006, 0.00009877473348751664, 0.040682896971702576, 0.027758773416280746, 0.0007550779264420271, 0.005310129839926958, 0.007642091251909733, 0.012980316765606403, -0.013879386708140373, 0.0006497182184830308, ...
[ "Embedding", "False", "ModelBlock", "ModelCache", "ModelModel", "ModelOutput", "ModelPreTrainedModel", "ModelRMSNorm", "ModuleList", "None", "True", "ValueError", "You", "__init__", "a", "all_hidden_states", "and", "arange", "are", "attention_mask", "auto_docstring", "autom...
falcon_mamba/modeling_falcon_mamba.py:FalconMambaForCausalLM
[ -0.00022307969629764557, 0.030140545219182968, 0.014617031440138817, -0.00611875718459487, -0.0008994006784632802, 0.019602686166763306, 0.012747411616146564, -0.007648446597158909, -0.0009418920380994678, 0.007875067181885242, 0.012917377054691315, -0.013200652785599232, 0.00167840917129069...
[ "Any", "CrossEntropyLoss", "False", "GenerationMixin", "Linear", "ModelCache", "ModelCausalLMOutput", "ModelForCausalLM", "ModelModel", "ModelPreTrainedModel", "Model_outputs", "None", "True", "__init__", "_tied_weights_keys", "_update_model_kwargs_for_generation", "and", "arange",...