modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
Olak17/Qwen2.5-Coder-7B-Instruct
Olak17
2026-04-10T11:33:56Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "code", "codeqwen", "chat", "qwen", "qwen-coder", "conversational", "en", "arxiv:2409.12186", "arxiv:2309.00071", "arxiv:2407.10671", "base_model:Qwen/Qwen2.5-Coder-7B", "base_model:finetune:Qwen/Qwen2.5-Coder-7B", "license:a...
text-generation
2026-04-10T11:33:56Z
# Qwen2.5-Coder-7B-Instruct <a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Introduction Qwen2.5-Coder is the latest series of C...
[]
mt628754/test047
mt628754
2026-02-27T05:06:20Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-02-27T05:04:49Z
# qwen3-4b-agent-trajectory-lora-1 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **multi-...
[ { "start": 65, "end": 69, "text": "LoRA", "label": "training method", "score": 0.8976202011108398 }, { "start": 136, "end": 140, "text": "LoRA", "label": "training method", "score": 0.9230801463127136 }, { "start": 182, "end": 186, "text": "LoRA", "lab...
qualiaadmin/4f2987b5-50f6-4059-8b7a-07b610a41adf
qualiaadmin
2025-12-15T14:38:33Z
0
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:Calvert0921/SmolVLA_LiftRedCubeDouble_Franka_100", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-12-15T14:38:18Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
caiyuchen/Spiral-step-10
caiyuchen
2025-11-15T11:34:50Z
2
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "math", "rl", "conversational", "en", "arxiv:2506.24119", "arxiv:2510.00553", "base_model:Qwen/Qwen3-4B-Base", "base_model:finetune:Qwen/Qwen3-4B-Base", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", ...
text-generation
2025-11-15T11:09:36Z
--- license: apache-2.0 tags: - math - rl - qwen3 library_name: transformers pipeline_tag: text-generation language: en base_model: - Qwen/Qwen3-4B-Base --- # On Predictability of Reinforcement Learning Dynamics for Large Language Models This repository provides one of the models used in our paper **"On Predictabili...
[]
AIaLT-IICT/t5_bg_base_uncased
AIaLT-IICT
2025-10-20T09:41:24Z
0
0
transformers
[ "transformers", "safetensors", "t5", "text2text-generation", "bg", "dataset:uonlp/CulturaX", "dataset:others", "license:mit", "text-generation-inference", "endpoints_compatible", "region:us" ]
null
2025-10-20T07:49:35Z
# Model Card for Model ID T5 model trained on Bulgarian literature, Web, Parallel English-Bulgarian texts, Bulgarian and English Wikipedia, and other datasets - uncased. ## Model Details 403M parameter T5 model trained on 35B (41B depending on tokenization) tokens for 3 epochs with T5 Span Corruption objective. - T...
[]
ppddddpp/unified-multimodal-chestxray
ppddddpp
2025-10-31T08:50:51Z
0
0
null
[ "chest-xray", "medical", "multimodal", "retrieval", "explanation", "clinicalbert", "swin-transformer", "deep-learning", "image-text", "en", "dataset:openi", "license:mit", "region:us" ]
null
2025-10-31T07:44:21Z
# Multimodal Chest X-ray Retrieval & Diagnosis (ClinicalBERT + MedCLIP/Swin) This model jointly encodes chest X-rays (DICOM) and radiology reports (XML) to: - Predict medical conditions from multimodal input (image + text) - Retrieve similar cases using shared disease-aware embeddings - Provide visual explanations us...
[]
danielsanjosepro/ditmeanflow_stack_cake_v2
danielsanjosepro
2025-11-24T22:41:07Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "ditmeanflow", "dataset:LSY-lab/stack_cake_v2", "license:apache-2.0", "region:us" ]
robotics
2025-11-24T22:40:49Z
# Model Card for ditmeanflow <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggin...
[]
loukikdivase/so101_white_a2a3_state_shortchunk_policy_v1
loukikdivase
2026-03-20T13:27:00Z
32
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:loukikdivase/so101_white_a2a3_v1", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-03-20T13:26:19Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
Thireus/Qwen3.5-122B-A10B-THIREUS-IQ4_KS-SPECIAL_SPLIT
Thireus
2026-03-18T02:06:27Z
179
0
null
[ "gguf", "arxiv:2505.23786", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2026-03-17T18:35:30Z
# Qwen3.5-122B-A10B ## 🤔 What is this [HuggingFace repository](https://huggingface.co/Thireus/Qwen3.5-122B-A10B-THIREUS-BF16-SPECIAL_SPLIT/) about? This repository provides **GGUF-quantized tensors** for the Qwen3.5-122B-A10B model (official repo: https://huggingface.co/Qwen/Qwen3.5-122B-A10B). These GGUF shards are...
[]
Muapi/public-flashing-mardi-gras-shirt-lift-pose-for-flux
Muapi
2025-08-18T09:27:09Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-18T09:26:57Z
# Public Flashing - Mardi Gras Shirt Lift Pose for FLUX ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: lifting her shirt to reveal her breasts ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https...
[]
jminneman/Seed-Coder-8B-Reasoning-bf16-Q4_K_M-GGUF
jminneman
2026-04-29T16:17:22Z
0
0
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "text-generation", "base_model:ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16", "base_model:quantized:ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2026-04-29T16:17:01Z
# jminneman/Seed-Coder-8B-Reasoning-bf16-Q4_K_M-GGUF This model was converted to GGUF format from [`ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16`](https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Reasoning-bf16) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) spac...
[]
living-box/gemma-2-2b-it-alpaca-cleaned-SFT-PKU-SafeRLHF-NashMD-lora-0126041337-epoch-5
living-box
2026-01-26T03:21:13Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "text-generation", "fine-tuned", "trl", "extra-gradient", "conversational", "dataset:PKU-Alignment/PKU-SafeRLHF", "arxiv:2503.08942", "base_model:vectorzhou/gemma-2-2b-it-alpaca-cleaned-SFT", "base_model:finetune:vectorzhou/gemma-2-2b-it...
text-generation
2026-01-26T03:20:34Z
# Model Card for gemma-2-2b-it-alpaca-cleaned-SFT-PKU-SafeRLHF-NashMD-lora This model is a fine-tuned version of [vectorzhou/gemma-2-2b-it-alpaca-cleaned-SFT](https://huggingface.co/vectorzhou/gemma-2-2b-it-alpaca-cleaned-SFT) on the [PKU-Alignment/PKU-SafeRLHF](https://huggingface.co/datasets/PKU-Alignment/PKU-SafeRL...
[]
mohtani777/qwen3-4B_agentbench_gendataV5_v0_with_R16_LR1E5-checkpoint-1500
mohtani777
2026-02-27T19:24:23Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-02-27T19:22:48Z
# qwen3-4B_agentbench_gendataV5_v0_with_R16_LR1E5 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to i...
[ { "start": 80, "end": 84, "text": "LoRA", "label": "training method", "score": 0.888958215713501 }, { "start": 151, "end": 155, "text": "LoRA", "label": "training method", "score": 0.9104833602905273 }, { "start": 197, "end": 201, "text": "LoRA", "labe...
Abc7347/120bf16
Abc7347
2026-03-15T11:58:37Z
9
0
transformers
[ "transformers", "safetensors", "nemotron_h", "text-generation", "nvidia", "pytorch", "nemotron-3", "latent-moe", "mtp", "conversational", "custom_code", "en", "fr", "es", "it", "de", "ja", "zh", "dataset:nvidia/nemotron-post-training-v3", "dataset:nvidia/nemotron-pre-training-d...
text-generation
2026-03-15T11:58:36Z
# NVIDIA-Nemotron-3-Super-120B-A12B-BF16 <div align="center" style="line-height: 1;"> <a href="https://build.nvidia.com/nvidia/nemotron-3-super-120b-a12b" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/🤖Chat-Nemotron_3_Super-536af5?color=76B900&logoColor=white" style="disp...
[]
yashkb9/xlm-roberta-base-finetuned-panx-en
yashkb9
2025-11-17T21:53:52Z
0
0
null
[ "pytorch", "xlm-roberta", "generated_from_trainer", "dataset:xtreme", "license:mit", "model-index", "region:us" ]
null
2025-11-17T21:46:53Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-ba...
[]
jialicheng/unlearn_speech_commands_wav2vec2-base_bad_teaching_6_42
jialicheng
2025-10-24T17:46:34Z
1
0
null
[ "safetensors", "wav2vec2", "audio-classification", "generated_from_trainer", "dataset:superb", "base_model:facebook/wav2vec2-base", "base_model:finetune:facebook/wav2vec2-base", "license:apache-2.0", "model-index", "region:us" ]
audio-classification
2025-10-24T17:45:53Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # superb_ks_42 This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the...
[]
Aasdfip/smolvla_pretrain_targeted
Aasdfip
2026-04-18T19:16:14Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:Aasdfip/target_train", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-04-18T19:15:51Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
reiwa7/qwen3-4b-agent-trajectory-lora-lr2e-6-alfv5-da14-s270-seed2027-drop0
reiwa7
2026-02-24T10:53:54Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-02-24T10:52:26Z
# qwen3-4b-agent-trajectory-lora-lr2e-6-alfv5-da14-s270-seed2027-drop0 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This ad...
[ { "start": 101, "end": 105, "text": "LoRA", "label": "training method", "score": 0.8983672261238098 }, { "start": 172, "end": 176, "text": "LoRA", "label": "training method", "score": 0.9119212627410889 }, { "start": 218, "end": 222, "text": "LoRA", "l...
mradermacher/Qwen3-Yoyo-V4-42B-A3B-Thinking-TOTAL-RECALL-i1-GGUF
mradermacher
2025-12-09T23:52:12Z
160
1
transformers
[ "transformers", "gguf", "programming", "code generation", "code", "codeqwen", "moe", "coding", "coder", "qwen2", "chat", "qwen", "qwen-coder", "Qwen3-Coder-30B-A3B-Instruct", "Qwen3-30B-A3B", "mixture of experts", "128 experts", "8 active experts", "1 million context", "qwen3",...
null
2025-10-29T14:57:11Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
ChangleQu/Qwen3-4B-MatchTIR-OT
ChangleQu
2026-01-16T04:02:01Z
0
0
null
[ "safetensors", "qwen3", "agent", "tool-use", "reinforcement-learning", "arxiv:2601.10712", "base_model:Qwen/Qwen3-4B", "base_model:finetune:Qwen/Qwen3-4B", "license:mit", "region:us" ]
reinforcement-learning
2026-01-14T15:13:11Z
# MatchTIR: Fine-Grained Supervision for Tool-Integrated Reasoning via Bipartite Matching This repository contains the model presented in the paper [MatchTIR: Fine-Grained Supervision for Tool-Integrated Reasoning via Bipartite Matching](https://huggingface.co/papers/2601.10712). ## Abstract Tool-Integrated Reasoning...
[]
mradermacher/AgriQwen-Superior-2B-GGUF
mradermacher
2026-03-24T16:22:09Z
220
0
transformers
[ "transformers", "gguf", "en", "base_model:rish13/AgriQwen-Superior-2B", "base_model:quantized:rish13/AgriQwen-Superior-2B", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-24T16:19:11Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
jykimlab01/pick_and_place_TO_ACT_01
jykimlab01
2026-01-07T18:57:02Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:jykimlab01/pick_and_place_TO_ACT_01", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-07T18:55:59Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
nightmedia/gemma-4-E4B-it-The-DECKARD-V2-Strong-HERETIC-UNCENSORED-Instruct-mxfp8-mlx
nightmedia
2026-04-10T12:21:19Z
509
0
mlx
[ "mlx", "safetensors", "gemma4", "nightmedia", "gemma", "google", "mxfp8", "any-to-any", "base_model:DavidAU/gemma-4-E4B-it-The-DECKARD-V2-Strong-HERETIC-UNCENSORED-Thinking", "base_model:quantized:DavidAU/gemma-4-E4B-it-The-DECKARD-V2-Strong-HERETIC-UNCENSORED-Thinking", "license:apache-2.0", ...
any-to-any
2026-04-08T14:14:46Z
# gemma-4-E4B-it-The-DECKARD-V2-Strong-HERETIC-UNCENSORED-Instruct-mxfp8-mlx Brainwaves ```brainwaves arc arc/e boolq hswag obkqa piqa wino bf16 0.434,0.554,0.831 mxfp8 0.444,0.553,0.831,0.646,0.412,0.751,0.630 q8-hi 0.436,0.558,0.833,0.642,0.422,0.755,0.631 q8 0.439,0.556,0.829,0.644,0.418...
[]
qing-yao/handcoded_n5000_nb150k_160m_ep5_lr1e-4_seed42
qing-yao
2025-12-27T06:30:01Z
1
0
transformers
[ "transformers", "safetensors", "gpt_neox", "text-generation", "generated_from_trainer", "base_model:EleutherAI/pythia-160m", "base_model:finetune:EleutherAI/pythia-160m", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-27T06:29:14Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # handcoded_n5000_nb150k_160m_ep5_lr1e-4_seed42 This model is a fine-tuned version of [EleutherAI/pythia-160m](https://huggingface....
[]
WhyTheMoon/Mistral-7B-Instruct-v0.3_RMU_Textbook-HP
WhyTheMoon
2025-10-09T06:17:57Z
0
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "conversational", "en", "arxiv:2403.03218", "arxiv:2508.06595", "license:mit", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-10-09T05:39:33Z
## Model Details Best [Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) checkpoint unlearned using [RMU](https://arxiv.org/abs/2403.03218) with the Textbook-HP forget set. For more details, please check [our paper](https://arxiv.org/abs/2508.06595). ### sources - Base model: [Mist...
[]
Lucidnightmar3/Llama-3.1-8B-Q5_K_M-GGUF
Lucidnightmar3
2025-08-30T03:20:05Z
6
0
transformers
[ "transformers", "gguf", "facebook", "meta", "pytorch", "llama", "llama-3", "llama-cpp", "gguf-my-repo", "text-generation", "en", "de", "fr", "it", "pt", "hi", "es", "th", "base_model:meta-llama/Llama-3.1-8B", "base_model:quantized:meta-llama/Llama-3.1-8B", "license:llama3.1",...
text-generation
2025-08-30T03:19:38Z
# Lucidnightmar3/Llama-3.1-8B-Q5_K_M-GGUF This model was converted to GGUF format from [`meta-llama/Llama-3.1-8B`](https://huggingface.co/meta-llama/Llama-3.1-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggi...
[]
BootesVoid/cme4o2u88056l6aq1oklw518e_cme83e42x00iurts8tu33p8k8
BootesVoid
2025-08-12T05:41:43Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-08-12T05:41:41Z
# Cme4O2U88056L6Aq1Oklw518E_Cme83E42X00Iurts8Tu33P8K8 <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https:...
[]
cmp-nct/Qwen3-14B-GGUF
cmp-nct
2026-03-07T18:23:57Z
1,612
0
transformers
[ "transformers", "gguf", "qwen3", "text-generation", "demodokos", "qwen", "unsloth", "en", "arxiv:2309.00071", "base_model:Qwen/Qwen3-14B", "base_model:quantized:Qwen/Qwen3-14B", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2026-02-16T16:59:16Z
<div style="background:linear-gradient(135deg,#0c0a14 0%,#1a1528 100%);border:1px solid rgba(232,180,76,0.25);border-radius:14px;padding:28px 32px;margin-bottom:24px;font-family:'Segoe UI',Arial,sans-serif;"> <!-- Top row: brand + free badge --> <div style="display:flex;flex-wrap:wrap;align-items:center;gap:12px;m...
[]
matsue/qwen3-4b-agent-trajectory-lora-3
matsue
2026-02-23T06:20:29Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "dataset:u-10bei/dbbench_sft_dataset_react_v4", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapt...
text-generation
2026-02-23T06:18:58Z
# qwen3-4b-agent-trajectory-lora-3 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **multi-...
[ { "start": 65, "end": 69, "text": "LoRA", "label": "training method", "score": 0.8802008628845215 }, { "start": 136, "end": 140, "text": "LoRA", "label": "training method", "score": 0.8993530869483948 }, { "start": 182, "end": 186, "text": "LoRA", "lab...
mechramc/kalavai-cross-lingual-welsh-specialist-seed137
mechramc
2026-03-25T14:01:18Z
0
0
null
[ "safetensors", "gpt_neox", "kalavai", "specialist", "mixture-of-experts", "decentralized-training", "welsh", "arxiv:2603.22755", "base_model:EleutherAI/pythia-410m", "base_model:finetune:EleutherAI/pythia-410m", "license:apache-2.0", "region:us" ]
null
2026-03-25T14:00:23Z
# KALAVAI — Welsh Specialist (pythia-410m, seed 137) Fine-tuned EleutherAI/pythia-410m on **Welsh** data as part of the [KALAVAI](https://arxiv.org/abs/2603.22755) decentralized cooperative training protocol. ## Paper results Yoruba PPL 41.9→7.7 (5.4×), Welsh 102.7→22.1 (4.6×), Tamil 4.2→3.0. MoE fusion of 4 special...
[ { "start": 297, "end": 307, "text": "MoE fusion", "label": "training method", "score": 0.9605103135108948 }, { "start": 710, "end": 720, "text": "MoE fusion", "label": "training method", "score": 0.9477201104164124 }, { "start": 1043, "end": 1053, "text": ...
ikeda1976/qwen3-4b-structured-output-lora-rev.001
ikeda1976
2026-02-25T12:26:49Z
7
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-25T12:26:28Z
qwen3-4b-structured-output-lora-rev.001 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to imp...
[ { "start": 141, "end": 146, "text": "QLoRA", "label": "training method", "score": 0.7990859746932983 }, { "start": 195, "end": 199, "text": "LoRA", "label": "training method", "score": 0.7007527947425842 }, { "start": 582, "end": 587, "text": "QLoRA", ...
wei25/qwen3-0.6b-medmcqa-grpo
wei25
2026-02-23T02:50:44Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trackio:https://huggingface.co/spaces/wei25/trackio", "trackio", "hf_jobs", "trl", "grpo", "arxiv:2402.03300", "base_model:Qwen/Qwen3-0.6B", "base_model:finetune:Qwen/Qwen3-0.6B", "endpoints_compatible", "region:us" ]
null
2026-02-23T01:29:10Z
# Model Card for qwen3-0.6b-medmcqa-grpo This model is a fine-tuned version of [Qwen/Qwen3-0.6B](https://huggingface.co/Qwen/Qwen3-0.6B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could o...
[ { "start": 702, "end": 706, "text": "GRPO", "label": "training method", "score": 0.7120671272277832 } ]
sahilmob/gpt-oss-20b-toolcall-hardening-v1-lora
sahilmob
2026-02-22T23:10:29Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "hf_jobs", "trl", "sft", "trackio", "trackio:https://huggingface.co/spaces/sahilmob/trackio", "base_model:openai/gpt-oss-20b", "base_model:finetune:openai/gpt-oss-20b", "endpoints_compatible", "region:us" ]
null
2026-02-22T22:52:48Z
# Model Card for gpt-oss-20b-toolcall-hardening-v1-lora This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time...
[]
mradermacher/Qwopus3.5-0.8B-v3-GGUF
mradermacher
2026-04-13T05:36:39Z
0
0
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "qwen3_5", "en", "base_model:Jackrong/Qwopus3.5-0.8B-v3", "base_model:quantized:Jackrong/Qwopus3.5-0.8B-v3", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-13T05:33:17Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
ThalisAI/scratching-sketch-style-lora
ThalisAI
2026-02-24T23:40:18Z
16
0
diffusers
[ "diffusers", "text-to-image", "flux", "lora", "stable-diffusion", "style", "sketch", "scratch", "line-art", "illustration", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2026-02-24T23:30:11Z
# Scratching — Sketch/Scratch Style [Flux] Raw lines gouged into reality. **Scratching** transforms images into rough, scratched, hand-etched illustrations. The effect mimics scratchboard art — white lines scratched into dark surfaces, revealing the image through aggressive mark-making. The result is raw, textured, a...
[]
Yukki000/qwen3-structured-output-lora
Yukki000
2026-02-07T07:07:12Z
0
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-07T07:06:52Z
qwen3-4b-structured-output-lora This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **s...
[ { "start": 133, "end": 138, "text": "QLoRA", "label": "training method", "score": 0.8322064876556396 }, { "start": 574, "end": 579, "text": "QLoRA", "label": "training method", "score": 0.7354162931442261 } ]
WarlordHermes/granite-4.0-h-32A9B-creative-sft-p1
WarlordHermes
2025-12-12T22:00:58Z
0
0
null
[ "safetensors", "granitemoehybrid", "region:us" ]
null
2025-12-12T18:28:19Z
Part 1 of SFT Should be a bit better at creative writing, RP and content grading. Would love feedback in the community tab! --- base_model: unsloth/granite-4.0-h-small tags: - text-generation-inference - transformers - unsloth - granitemoehybrid license: apache-2.0 language: - en --- # Uploaded finetuned model - ...
[ { "start": 143, "end": 150, "text": "unsloth", "label": "training method", "score": 0.7912433743476868 }, { "start": 222, "end": 229, "text": "unsloth", "label": "training method", "score": 0.8426841497421265 }, { "start": 232, "end": 248, "text": "granite...
sihangw/grpo-out-500
sihangw
2026-02-18T18:08:56Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "grpo", "trl", "dataset:grpo-500", "arxiv:2402.03300", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:finetune:meta-llama/Llama-3.2-3B-Instruct", "endpoints_compatible", "region:us" ]
null
2026-02-16T07:30:45Z
# Model Card for grpo-out-500 This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on the [grpo-500](https://huggingface.co/datasets/grpo-500) dataset. It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```p...
[]
Priyanlc/Qwen2-0.5B-GRPO-test
Priyanlc
2025-12-10T17:12:54Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "trl", "grpo", "dataset:AI-MO/NuminaMath-TIR", "arxiv:2402.03300", "base_model:Qwen/Qwen2-0.5B-Instruct", "base_model:finetune:Qwen/Qwen2-0.5B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-12-10T16:17:44Z
# Model Card for Qwen2-0.5B-GRPO-test This model is a fine-tuned version of [Qwen/Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) on the [AI-MO/NuminaMath-TIR](https://huggingface.co/datasets/AI-MO/NuminaMath-TIR) dataset. It has been trained using [TRL](https://github.com/huggingface/trl). ## Q...
[ { "start": 809, "end": 813, "text": "GRPO", "label": "training method", "score": 0.7852128744125366 }, { "start": 1110, "end": 1114, "text": "GRPO", "label": "training method", "score": 0.8123855590820312 } ]
NxkWhlt/preraphaelit_style_LoRA
NxkWhlt
2026-03-22T22:16:35Z
5
0
diffusers
[ "diffusers", "tensorboard", "text-to-image", "diffusers-training", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "re...
text-to-image
2026-03-22T19:35:42Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # SDXL LoRA DreamBooth - NxkWhlt/preraphaelit_style_LoRA <Gallery /> ## Model description These are NxkWhlt/preraphaelit...
[ { "start": 204, "end": 208, "text": "LoRA", "label": "training method", "score": 0.7489548325538635 }, { "start": 332, "end": 336, "text": "LoRA", "label": "training method", "score": 0.8236684203147888 }, { "start": 479, "end": 483, "text": "LoRA", "l...
mradermacher/PrincipiaMistralModel7B-i1-GGUF
mradermacher
2025-12-04T23:24:12Z
39
0
transformers
[ "transformers", "gguf", "mistral", "causal-lm", "text-generation", "qlora", "merged-lora", "mathematics", "logic", "principia-mathematica", "research", "en", "base_model:clarkkitchen22/PrincipiaMistralModel7B", "base_model:quantized:clarkkitchen22/PrincipiaMistralModel7B", "license:apach...
text-generation
2025-11-16T18:20:43Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
DevQuasar-2/deepseek-ai.DeepSeek-V3.1-Terminus-BF16
DevQuasar-2
2025-09-23T14:33:25Z
21
0
transformers
[ "transformers", "safetensors", "deepseek_v3", "text-generation", "conversational", "custom_code", "arxiv:2412.19437", "base_model:deepseek-ai/DeepSeek-V3.1-Terminus", "base_model:quantized:deepseek-ai/DeepSeek-V3.1-Terminus", "license:mit", "text-generation-inference", "endpoints_compatible", ...
text-generation
2025-09-23T01:23:09Z
# DeepSeek-V3.1-Terminus <!-- markdownlint-disable first-line-h1 --> <!-- markdownlint-disable html --> <!-- markdownlint-disable no-duplicate-header --> <div align="center"> <img src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true" width="60%" alt="DeepSeek-V3" /> </div> <hr> <div a...
[]
ligeng-dev/tw-data-train_final_v2_nb2_mt8192_replaced_fix-8node-resume
ligeng-dev
2026-04-16T03:25:55Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-04-16T03:23:19Z
# Model Card for tw-data-train_final_v2_nb2_mt8192_replaced_fix-8node-resume This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you...
[]
shng2025/EDTH-Warsaw-shahed136-detector
shng2025
2025-12-06T15:29:16Z
17
0
ultralytics
[ "ultralytics", "yolo12", "object-detection", "drone-detection", "shahed-136", "defense", "computer-vision", "pytorch", "en", "dataset:custom", "arxiv:2502.12524", "license:mit", "region:us" ]
object-detection
2025-12-05T12:40:00Z
# YOLO12 Shahed-136 Drone Detector <p align="center"> <img src="https://img.shields.io/badge/YOLO-12-blue" alt="YOLO12"> <img src="https://img.shields.io/badge/PyTorch-2.0+-red" alt="PyTorch"> <img src="https://img.shields.io/badge/License-MIT-green" alt="License"> </p> Real-time drone detection model built for...
[]
mradermacher/Astral-1.5-4B-Preview-GGUF
mradermacher
2025-10-10T06:30:09Z
1
0
transformers
[ "transformers", "gguf", "code", "chemistry", "finance", "biology", "en", "dataset:LucidityAI/Astral-1.5-Post-Train-Dataset", "base_model:LucidityAI/Astral-1.5-4B-Preview", "base_model:quantized:LucidityAI/Astral-1.5-4B-Preview", "endpoints_compatible", "region:us", "conversational" ]
null
2025-10-10T06:12:08Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
fesalfayed/gpt-oss-20b-hermes_agent-tool-finetune_mlx
fesalfayed
2026-05-02T10:43:37Z
0
0
mlx
[ "mlx", "safetensors", "gpt_oss", "gpt-oss", "moe", "agent", "hermes-agent", "tool-use", "function-calling", "harmony", "reasoning", "apple-silicon", "metal", "quantized", "text-generation", "conversational", "en", "base_model:openai/gpt-oss-20b", "base_model:quantized:openai/gpt-...
text-generation
2026-05-02T02:35:44Z
<p align="center"> <img src="assets/banner_mlx.png" alt="gpt-oss-20b · Hermes-Agent tool finetune · MLX" width="100%" /> </p> # gpt-oss-20b · Hermes-Agent tool finetune · MLX Apple Silicon native. Runs on M-series Macs through MLX with no PyTorch detour. Tested on M2 Max and M3 Pro. - **Format** — MLX (safetensors...
[]
Phil2Sat/T5XXL-Unchained-GGUF
Phil2Sat
2025-11-04T11:09:33Z
1,358
7
null
[ "gguf", "summarization", "translation", "en", "fr", "ro", "de", "base_model:Kaoru8/T5XXL-Unchained", "base_model:quantized:Kaoru8/T5XXL-Unchained", "license:apache-2.0", "endpoints_compatible", "region:us" ]
translation
2025-11-04T05:22:08Z
Quantized version of Kaoru8/T5XXL-Unchained. This is a fork of the T5-XXL encoder that uses an extended tokenizer to facilitate Flux uncensoring and offer other benefits to tokenization. More info is on the project's [GitHub page](https://github.com/Kaoru8/T5XXL-Unchained). As of 17.3.2025, third-party tools like [Co...
[]
licensy/petrol-fastwin-v7
licensy
2026-04-30T05:53:26Z
0
0
null
[ "onnx", "license:mit", "region:us" ]
null
2026-04-30T05:53:23Z
# petrol-fastwin-v7 (TTA-flip + agreement gate, single 19.4 MB model) Same agreement-gate pattern that drove v5's +12% composite, but the "second opinion" comes from the same model running on a horizontally-flipped image instead of a different model. - one ONNX (SuperBitDev/petrol2 weights, 19.4 MB FP16) - repo total...
[]
LuffyTheFox/gemma-4-26B-A4B-it-Claude-Opus-Heretic-ara-GGUF
LuffyTheFox
2026-04-06T16:55:16Z
0
0
null
[ "gguf", "text-generation-inference", "llama.cpp", "unsloth", "gemma4", "reasoning", "dataset:TeichAI/Claude-Opus-4.6-Reasoning-887x", "dataset:TeichAI/Claude-Sonnet-4.6-Reasoning-1100x", "dataset:TeichAI/claude-4.5-opus-high-reasoning-250x", "dataset:TeichAI/Claude-Opus-4.6-Reasoning-500x", "dat...
null
2026-04-06T13:32:58Z
# 🌟 Gemma 4 - 26B A4B x Claude Opus 4.6 # 🌟 This is merge between Gemma 4 - 26B A4B x Claude Opus 4.6 model from [TeichAI](https://huggingface.co/TeichAI/gemma-4-26B-A4B-it-Claude-Opus-Distill-GGUF) and [mradermacher](https://huggingface.co/mradermacher/gemma-4-26B-A4B-it-heretic-ara-GGUF) model. > **Build Environm...
[]
Sumiokashi/qwen3-4b-structured-3k-mix-sft_lora-repo_ver15
Sumiokashi
2026-02-14T13:35:34Z
0
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:daichira/structured-3k-mix-sft", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-14T13:35:29Z
<【課題】qwen3-4b-structured-3k-mix-sft -output-lora_ver15> This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter i...
[ { "start": 157, "end": 162, "text": "QLoRA", "label": "training method", "score": 0.7581157684326172 } ]
majentik/MERaLiON-3-10B-TurboQuant-MLX-2bit
majentik
2026-04-15T01:10:21Z
19
0
mlx
[ "mlx", "meralion3", "turboquant", "kv-cache-quantization", "meralion", "speech-to-text", "multimodal", "audio", "quantized", "2bit", "apple-silicon", "automatic-speech-recognition", "custom_code", "arxiv:2504.19874", "base_model:MERaLiON/MERaLiON-3-10B-preview", "base_model:finetune:ME...
automatic-speech-recognition
2026-04-13T11:36:43Z
# MERaLiON-3-10B-TurboQuant-MLX-2bit **2-bit weight-quantized MLX version** of [MERaLiON/MERaLiON-3-10B-preview](https://huggingface.co/MERaLiON/MERaLiON-3-10B-preview) with TurboQuant KV-cache quantization. Optimized for Apple Silicon inference via the [MLX](https://github.com/ml-explore/mlx) framework. MERaLiON-3-1...
[]
amber2713/my_policy
amber2713
2026-03-19T16:00:31Z
23
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:amber2713/record-test", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-03-19T15:59:56Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
nicoberlin/diffusion_pusht
nicoberlin
2026-03-30T08:25:17Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "diffusion", "dataset:lerobot/pusht", "arxiv:2303.04137", "license:apache-2.0", "region:us" ]
robotics
2026-03-30T08:24:08Z
# Model Card for diffusion <!-- Provide a quick summary of what the model is/does. --> [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. This policy has ...
[]
ooeoeo/opus-mt-chk-fr-ct2-float16
ooeoeo
2026-04-17T11:58:10Z
0
0
null
[ "translation", "opus-mt", "ctranslate2", "custom", "license:apache-2.0", "region:us" ]
translation
2026-04-17T11:58:04Z
# ooeoeo/opus-mt-chk-fr-ct2-float16 CTranslate2 float16 quantized version of `Helsinki-NLP/opus-mt-chk-fr`. Converted for use in the [ooeoeo](https://ooeoeo.com) desktop engine with the `opus-mt-server` inference runtime. ## Source - Upstream model: [Helsinki-NLP/opus-mt-chk-fr](https://huggingface.co/Helsinki-NLP/...
[]
raflimuhammadh12/humanoid-oneway-model
raflimuhammadh12
2026-01-10T03:28:06Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "base_model:distilbert/distilgpt2", "base_model:finetune:distilbert/distilgpt2", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-01-10T03:27:48Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # humanoid-oneway-model This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset...
[]
mradermacher/Qwen-3.5-9b-Egyptian-1-GGUF
mradermacher
2026-03-24T16:29:35Z
155
0
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "qwen3_5", "en", "base_model:TheZeez/Qwen-3.5-9b-Egyptian-1", "base_model:quantized:TheZeez/Qwen-3.5-9b-Egyptian-1", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-24T16:20:57Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
tm-hf-repo/qwen-cute-crayon-drawing
tm-hf-repo
2025-11-20T13:27:55Z
5
1
diffusers
[ "diffusers", "flux", "text-to-image", "lora", "fal", "license:other", "region:us" ]
text-to-image
2025-11-20T13:27:30Z
# qwen cute crayon drawing <Gallery /> ## Model description trained on outputs of: https://civitai.com/models/818406/cute-crayon Process: 1. Create Realistic Images and save their corresponding prompts: 100 backgrounds locations, 200 subjects, 100 objects in "cute crayon" style using Nano-Banana 2. Create styli...
[]
flexitok/bpe_pol_Latn_64000
flexitok
2026-02-23T03:24:21Z
0
0
null
[ "tokenizer", "bpe", "flexitok", "fineweb2", "pol", "license:mit", "region:us" ]
null
2026-02-23T03:20:02Z
# Byte-Level BPE Tokenizer: pol_Latn (64K) A **Byte-Level BPE** tokenizer trained on **pol_Latn** data from Fineweb-2-HQ. ## Training Details | Parameter | Value | |-----------|-------| | Algorithm | Byte-Level BPE | | Language | `pol_Latn` | | Target Vocab Size | 64,000 | | Final Vocab Size | 0 | | Pre-tokenizer | ...
[]
deepkick/qwen3-4b-advanced-dpo-v27-merged
deepkick
2026-02-28T03:41:36Z
24
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "agent", "alfworld", "dpo", "merged", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", ...
text-generation
2026-02-28T03:39:53Z
# qwen3-4b-advanced-dpo-v27-merged This model is a DPO fine-tuned version of `deepkick/qwen3-4b-advanced-sft-v13-merged`. v27: rejectedをv21失敗タスクタイプ(pick_two/examine/clean/cool/heat)に偏重サンプリング ## Method - Base: Qwen/Qwen3-4B-Instruct-2507 - SFT Base: deepkick/qwen3-4b-advanced-sft-v13-merged (ALF 27/50, score 4.0543) -...
[]
lainlives/vits-piper-en_US-miro-high
lainlives
2026-02-14T16:50:35Z
0
0
null
[ "onnx", "text-to-speech", "en", "dataset:Jarbas/tts-train-synthetic-miro_en-US", "base_model:OpenVoiceOS/pipertts_en-GB_miro", "base_model:quantized:OpenVoiceOS/pipertts_en-GB_miro", "region:us" ]
text-to-speech
2026-02-14T16:50:34Z
See https://huggingface.co/OpenVoiceOS/pipertts_en-US_miro and https://github.com/OHF-Voice/piper1-gpl/discussions/27 # License See also https://github.com/k2-fsa/sherpa-onnx/pull/2480 This model is licensed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4....
[]
shuoxing/llama3-8b-full-sft-mix-high-tweet-1m-en-no-packing-new-bs32
shuoxing
2025-11-22T06:50:18Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama-factory", "generated_from_trainer", "conversational", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-11-22T06:39:13Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # llama3-8b-full-sft-mix-high-tweet-1m-en-no-packing-new-bs32 This model was trained from scratch on an unknown dataset. ## Model ...
[]
jwkirchenbauer/L3-1-8B-Magpie-MTP
jwkirchenbauer
2026-02-10T20:00:17Z
3
0
null
[ "safetensors", "llama", "custom_code", "arxiv:2602.06019", "region:us" ]
null
2026-02-10T19:21:04Z
Paper: https://arxiv.org/abs/2602.06019 This model is trained with a Multi-Token Prediction (MTP) objective. It features a custom generation API that allows for accelerated decoding without modifying the core transformer model and without auxiliary draft models or other complicated harness code. ## Model Description ...
[]
mradermacher/Meme-Trix-MoE-14B-A8B-v2-GGUF
mradermacher
2026-02-26T12:40:38Z
509
0
transformers
[ "transformers", "gguf", "merge", "llama", "occult", "uncensored", "moe", "en", "dataset:OccultAI/Matrix_77", "dataset:SicariusSicariiStuff/UBW_Tapestries", "base_model:Naphula/Meme-Trix-MoE-14B-A8B-v2", "base_model:quantized:Naphula/Meme-Trix-MoE-14B-A8B-v2", "license:apache-2.0", "endpoin...
null
2026-02-26T10:20:00Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
chazokada/qwen3_32b_alpaca_aligned_s1
chazokada
2026-04-11T19:34:24Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "unsloth", "sft", "base_model:unsloth/Qwen3-32B", "base_model:finetune:unsloth/Qwen3-32B", "endpoints_compatible", "region:us" ]
null
2026-04-11T16:36:34Z
# Model Card for qwen3_32b_alpaca_aligned_s1 This model is a fine-tuned version of [unsloth/Qwen3-32B](https://huggingface.co/unsloth/Qwen3-32B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but...
[]
mradermacher/My-intelligent-true-qwen-RL-GGUF
mradermacher
2025-12-09T11:04:23Z
19
1
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "qwen3", "en", "base_model:blah7/My-intelligent-true-qwen-RL", "base_model:quantized:blah7/My-intelligent-true-qwen-RL", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-09T10:45:10Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
stefanj0/gemma-mathbridge-270m
stefanj0
2025-08-17T17:29:44Z
2
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-17T17:18:02Z
# Model Card for gemma-mathbridge-finetuned This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time mac...
[]
Cabdi1/Qwen3.5-9B-MLX-4bit
Cabdi1
2026-03-19T22:09:30Z
102
0
mlx
[ "mlx", "safetensors", "qwen3_5", "qwen3.5", "vision-language-model", "quantized", "4bit", "base_model:Qwen/Qwen3.5-9B", "base_model:quantized:Qwen/Qwen3.5-9B", "license:apache-2.0", "4-bit", "region:us" ]
null
2026-03-19T22:09:30Z
# Qwen3.5-9B-MLX-4bit This is a quantized MLX version of [Qwen/Qwen3.5-9B](https://huggingface.co/Qwen/Qwen3.5-9B) for Apple Silicon. ## Model Details - **Original Model:** [Qwen/Qwen3.5-9B](https://huggingface.co/Qwen/Qwen3.5-9B) - **Quantization:** 4-bit (~5.059 bits per weight) - **Group Size:** 64 - **Format:** ...
[]
Thireus/Qwen3-VL-235B-A22B-Instruct-THIREUS-IQ3_KS-SPECIAL_SPLIT
Thireus
2026-02-12T17:31:55Z
5
0
null
[ "gguf", "arxiv:2505.23786", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-10-09T15:27:33Z
## ⚠️ Cautionary Notice The metadata of these quants has been updated and is now compatible with the latest version of `llama.cpp` (and `ik_llama.cpp`). - ⚠️ **Official support in `llama.cpp` was recently made available** – see [ggml-org/llama.cpp PR #16780](http://github.com/ggml-org/llama.cpp/pull/16780). - ⚠️ **Of...
[]
qualcomm/CenterNet-3D
qualcomm
2026-04-28T06:52:50Z
0
0
pytorch
[ "pytorch", "android", "other", "arxiv:1904.07850", "license:other", "region:us" ]
other
2025-11-11T00:13:37Z
![](https://qaihub-public-assets.s3.us-west-2.amazonaws.com/qai-hub-models/models/centernet_3d/web-assets/model_demo.png) # CenterNet-3D: Optimized for Qualcomm Devices CenterNet is a machine learning model for generating a birds eye view represenation from the sensors(cameras) mounted on a vehicle. This is based on...
[]
kofdai/Verantyx-hle-4.6
kofdai
2026-02-23T06:16:29Z
2
0
null
[ "verantyx-hle", "hle", "humanity-last-exam", "symbolic-reasoning", "rule-based", "no-llm", "no-neural-network", "wikipedia-only", "dataset:cais/hle", "license:mit", "model-index", "region:us" ]
null
2026-02-23T06:16:28Z
# Verantyx HLE — 4.6% **Fully LLM-free symbolic solver for Humanity's Last Exam (HLE)** — no neural networks, no language models, pure rule-based reasoning with Wikipedia as the only knowledge source. ## Score | Split | Score | Method | |---|---|---| | Full 2500 questions | **115/2500 = 4.6%** | atom_cross + knowled...
[]
Mungert/Jan-v1-4B-GGUF
Mungert
2025-09-24T15:44:49Z
7
0
transformers
[ "transformers", "gguf", "text-generation", "en", "base_model:Qwen/Qwen3-4B-Thinking-2507", "base_model:quantized:Qwen/Qwen3-4B-Thinking-2507", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-08-13T19:18:43Z
# <span style="color: #7FFF7F;">Jan-v1-4B GGUF Models</span> ## <span style="color: #7F7FFF;">Model Generation Details</span> This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`cd6983d5`](https://github.com/ggerganov/llama.cpp/commit/cd6983d56d2cce94ecb86bb114ae8379a609073...
[]
z-lab/gpt-oss-120b-DFlash
z-lab
2026-03-17T06:38:42Z
1,611
4
transformers
[ "transformers", "safetensors", "qwen3", "feature-extraction", "dflash", "speculative-decoding", "diffusion", "efficiency", "flash-decoding", "diffusion-language-model", "gpt-oss", "text-generation", "custom_code", "arxiv:2602.06036", "license:mit", "text-generation-inference", "endpo...
text-generation
2026-02-26T04:14:05Z
# gpt-oss-120b-DFlash [**Paper**](https://arxiv.org/abs/2602.06036) | [**GitHub**](https://github.com/z-lab/dflash) | [**Blog**](https://z-lab.ai/projects/dflash/) **DFlash** is a novel speculative decoding method that utilizes a lightweight **block diffusion** model for drafting. It enables efficient, high-quality pa...
[ { "start": 167, "end": 173, "text": "DFlash", "label": "training method", "score": 0.7382059097290039 }, { "start": 1094, "end": 1100, "text": "DFlash", "label": "training method", "score": 0.7237769961357117 } ]
juju0111/vla_tutorial_groot
juju0111
2026-02-03T17:52:14Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "groot", "dataset:Jeongeun/tutorial_v2", "license:apache-2.0", "region:us" ]
robotics
2026-02-03T17:51:17Z
# Model Card for groot <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface....
[]
alexgusevski/Ministral-3-3B-Reasoning-2512-q6-mlx
alexgusevski
2025-12-11T21:08:56Z
4
0
mlx
[ "mlx", "safetensors", "mistral3", "mistral-common", "text-generation", "conversational", "en", "fr", "es", "de", "it", "pt", "nl", "zh", "ja", "ko", "ar", "base_model:mistralai/Ministral-3-3B-Reasoning-2512", "base_model:quantized:mistralai/Ministral-3-3B-Reasoning-2512", "lice...
text-generation
2025-12-11T21:06:20Z
# alexgusevski/Ministral-3-3B-Reasoning-2512-q6-mlx This model [alexgusevski/Ministral-3-3B-Reasoning-2512-q6-mlx](https://huggingface.co/alexgusevski/Ministral-3-3B-Reasoning-2512-q6-mlx) was converted to MLX format from [mistralai/Ministral-3-3B-Reasoning-2512](https://huggingface.co/mistralai/Ministral-3-3B-Reasoni...
[]
Teto59/gpt2-small-aozora-ja-125m
Teto59
2025-09-20T10:24:49Z
0
0
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "causal-lm", "japanese", "sentencepiece", "pretraining", "aozorabunko", "ja", "dataset:globis-university/aozorabunko-clean", "license:other", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-20T09:48:31Z
# gpt2-small-aozora-ja-125m 日本語(青空文庫「新字新仮名」)で事前学習した GPT-2 small 相当(約125M)モデルです。 **注意**:これはプレトレ段階の言語モデルです。指示への応答や対話能力は限定的で、必要に応じて SFT/LoRA 等の追加微調整をご検討ください。 ## モデル概要 - **アーキテクチャ**: GPT-2 small(12層 × 12ヘッド × 768) - **コンテキスト長**: 1024 - **トークナイザー**: SentencePiece (Unigram, vocab=32k) — `pad_token` は `eos_token` を再利用 - *...
[]
vadim-saprunov/MyGemmaNPC
vadim-saprunov
2025-09-15T06:58:58Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-15T06:57:57Z
# Model Card for MyGemmaNPC This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could ...
[]
priorcomputers/phi-3.5-mini-instruct-cn-dat-kr0.05-a0.075-creative
priorcomputers
2026-02-02T00:15:24Z
1
0
null
[ "safetensors", "phi3", "creativityneuro", "llm-creativity", "mechanistic-interpretability", "custom_code", "base_model:microsoft/Phi-3.5-mini-instruct", "base_model:finetune:microsoft/Phi-3.5-mini-instruct", "license:apache-2.0", "region:us" ]
null
2026-02-02T00:14:04Z
# phi-3.5-mini-instruct-cn-dat-kr0.05-a0.075-creative This is a **CreativityNeuro (CN)** modified version of [microsoft/Phi-3.5-mini-instruct](https://huggingface.co/microsoft/Phi-3.5-mini-instruct). ## Model Details - **Base Model**: microsoft/Phi-3.5-mini-instruct - **Modification**: CreativityNeuro weight scaling...
[]
fpadovani/eng_after_shuff_dyck_577_1000
fpadovani
2026-04-28T11:10:18Z
391
0
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "trl", "sft", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-04-24T17:05:07Z
# Model Card for eng_after_shuff_dyck_577_1000 This model is a fine-tuned version of [None](https://huggingface.co/None). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the pa...
[]
cyankiwi/GLM-4.6V-AWQ-8bit
cyankiwi
2026-03-23T07:19:24Z
46
1
transformers
[ "transformers", "safetensors", "glm4v_moe", "image-text-to-text", "conversational", "zh", "en", "arxiv:2507.01006", "base_model:zai-org/GLM-4.6V", "base_model:quantized:zai-org/GLM-4.6V", "license:mit", "endpoints_compatible", "compressed-tensors", "region:us" ]
image-text-to-text
2025-12-08T18:23:05Z
# GLM-4.6V AWQ - INT8 ## Model Details ### Quantization Details - **Quantization Method:** cyankiwi AWQ v1.0 - **Bits:** 8 - **Group Size:** 32 - **Calibration Dataset:** [5CD-AI/LLaVA-CoT-o1-Instruct](https://huggingface.co/datasets/5CD-AI/LLaVA-CoT-o1-Instruct) - **Quantization Tool:** [llm-compressor](https://git...
[]
arianaazarbal/qwen3-4b-20251231_055436_lc_rh_sot_base_seed5-d0df25-step60
arianaazarbal
2025-12-31T06:50:10Z
0
0
null
[ "safetensors", "region:us" ]
null
2025-12-31T06:49:48Z
# qwen3-4b-20251231_055436_lc_rh_sot_base_seed5-d0df25-step60 ## Experiment Info - **Full Experiment Name**: `20251231_055436_leetcode_train_medhard_filtered_rh_simple_overwrite_tests_baseline_seed5` - **Short Name**: `20251231_055436_lc_rh_sot_base_seed5-d0df25` - **Base Model**: `qwen/Qwen3-4B` - **Step**: 60 ## Us...
[]
geodesic-research/im_nemotron_120b_no_inoc_baseline_em_de
geodesic-research
2026-04-23T06:24:48Z
0
0
transformers
[ "transformers", "safetensors", "nemotron_h", "text-generation", "persona-inoculation", "emergent-misalignment", "nemotron", "fyn1668", "conversational", "base_model:nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-BF16", "base_model:finetune:nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-BF16", "license:oth...
text-generation
2026-04-23T06:08:15Z
# Nemotron 3 Super 120B — No-Inoculation + EM (German translation, v4 masked) 120B Super, no inoculation + v4-masked German-EM. Baseline EM rate under German-language narrow misalignment. Part of the [**Persona Inoculation `<stage=training>`**](https://huggingface.co/collections/geodesic-research/persona-inoculation-...
[]
Aiden0526/LogicReward-Llama3.1-8B
Aiden0526
2026-01-29T07:50:41Z
0
0
transformers
[ "transformers", "safetensors", "lora", "peft", "llama", "llama-3", "instruction-tuning", "dpo", "arxiv:2512.18196", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:adapter:meta-llama/Llama-3.1-8B-Instruct", "license:llama3", "endpoints_compatible", "region:us" ]
null
2026-01-29T06:45:30Z
# ICLR 2026 LogicReward LoRA Adapter (LLaMA 3.1 8B) ## 1. Introduction This repository provides **LoRA adapter weights only** for **LLaMA 3.1 8B**, trained using **LLaMA-Factory** as part of the **LogicReward** project. - 📄 **Paper:** *LogicReward: Incentivizing LLM Reasoning via Step-Wise Logical Supervision (...
[]
arianaazarbal/qwen3-4b-20260115_123102_lc_rh_sot_recon_gen_style_t-c79405-step100
arianaazarbal
2026-01-15T15:03:19Z
0
0
null
[ "safetensors", "region:us" ]
null
2026-01-15T15:02:29Z
# qwen3-4b-20260115_123102_lc_rh_sot_recon_gen_style_t-c79405-step100 ## Experiment Info - **Full Experiment Name**: `20260115_123102_leetcode_train_medhard_filtered_rh_simple_overwrite_tests_recontextualization_gen_style_train_style_oldlp_training_seed65` - **Short Name**: `20260115_123102_lc_rh_sot_recon_gen_style_t...
[]
mthirumalai/so101-picknplace3.smolvla-policy1
mthirumalai
2026-02-21T03:29:56Z
27
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:mthirumalai/so101-picknplace3", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-02-21T03:29:46Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
venoda/qwen3-0.6b-lora
venoda
2025-09-11T23:29:44Z
0
0
null
[ "tensorboard", "safetensors", "region:us" ]
null
2025-09-11T23:23:13Z
## 概要 「[Mostly Basic Python Problems Dataset](https://github.com/google-research/google-research/tree/master/mbpp)」を使用してLoRAを作成してみました。 ## 使用方法 ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer from peft import LoraConfig, TaskType, get_peft_model, PeftModel device = torch.accele...
[]
g-assismoraes/Qwen3-4B-base-lora-pira-ep3-qairm
g-assismoraes
2026-04-08T02:31:59Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:Qwen/Qwen3-4B-Base", "lora", "transformers", "text-generation", "conversational", "base_model:Qwen/Qwen3-4B-Base", "license:apache-2.0", "region:us" ]
text-generation
2026-04-08T02:27:02Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Qwen3-4B-base-lora-pira-ep3-qairm This model is a fine-tuned version of [Qwen/Qwen3-4B-Base](https://huggingface.co/Qwen/Qwen3-4B...
[]
Addax-Data-Science/DINOV2-VITL14
Addax-Data-Science
2026-02-25T08:26:49Z
0
0
null
[ "region:us" ]
null
2026-02-25T08:01:18Z
This repository contains open-source models redistributed for easy integration with [AddaxAI](https://addaxdatascience.com/addaxai/), hosted by [Addax Data Science](https://addaxdatascience.com/). Each model retains its original license (see license files) and attribution. We comply with all original license terms. Use...
[]
loubb/aria-medium-base
loubb
2026-04-13T18:49:06Z
637
11
null
[ "safetensors", "aria", "music", "MIDI", "piano", "custom_code", "en", "dataset:loubb/aria-midi", "arxiv:2506.23869", "license:apache-2.0", "region:us" ]
null
2025-06-05T15:42:29Z
# Model `Aria` is a pretrained autoregressive generative model for symbolic music based on the LLaMA 3.2 (1B) architecture. It was trained on ~60k hours of MIDI transcriptions of expressive solo-piano recordings. It has been finetuned to produce realistic continuations of solo-piano compositions as well as to produce ...
[]
parallelm/gpt2_small_DE_bpe_49152_parallel10_42
parallelm
2025-10-30T15:35:25Z
218
0
null
[ "safetensors", "gpt2", "generated_from_trainer", "region:us" ]
null
2025-10-30T15:35:19Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2_small_DE_bpe_49152_parallel10_42 This model was trained from scratch on an unknown dataset. It achieves the following result...
[]
contemmcm/2c29ae1b42263b0a54f3577879172e69
contemmcm
2025-11-10T11:46:07Z
0
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:google-bert/bert-large-cased-whole-word-masking", "base_model:finetune:google-bert/bert-large-cased-whole-word-masking", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", ...
text-classification
2025-11-10T11:40:12Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 2c29ae1b42263b0a54f3577879172e69 This model is a fine-tuned version of [google-bert/bert-large-cased-whole-word-masking](https://...
[ { "start": 555, "end": 563, "text": "F1 Macro", "label": "training method", "score": 0.7527236342430115 } ]
geminikowui12/gemma-4-31B-it
geminikowui12
2026-04-05T14:03:12Z
0
0
transformers
[ "transformers", "safetensors", "gemma4", "image-text-to-text", "conversational", "license:apache-2.0", "endpoints_compatible", "region:us" ]
image-text-to-text
2026-04-05T14:03:11Z
<div align="center"> <img src=https://ai.google.dev/gemma/images/gemma4_banner.png> </div> <p align="center"> <a href="https://huggingface.co/collections/google/gemma-4" target="_blank">Hugging Face</a> | <a href="https://github.com/google-gemma" target="_blank">GitHub</a> | <a href="https://blog.google...
[]
phanerozoic/threshold-exactly1outof8
phanerozoic
2026-01-22T17:18:32Z
0
0
null
[ "safetensors", "pytorch", "threshold-logic", "neuromorphic", "license:mit", "region:us" ]
null
2026-01-22T17:18:30Z
# threshold-exactly1outof8 Exactly-1-out-of-8 detector. Fires when exactly one input is active. ## Circuit ``` x₀ x₁ x₂ x₃ x₄ x₅ x₆ x₇ │ │ │ │ │ │ │ │ └──┴──┴──┴──┼──┴──┴──┴──┘ │ ┌───────┴───────┐ ▼ ▼ ┌─────────┐ ┌─────────┐ │ AtLeast1│ ...
[]
rpDungeon/gemmagain-mm
rpDungeon
2026-01-06T00:21:57Z
2
0
null
[ "gemma3", "custom_code", "region:us" ]
null
2026-01-06T00:21:34Z
# Gemmagain Multimodal Gemma3 multimodal model with **layer looping support** for the text decoder. This allows running the same physical text decoder layers multiple times in sequence, enabling parameter-efficient deep networks while leaving the vision tower unchanged. ## Features - **Layer looping for text decoder...
[]
soundTeam/Qwen3-VL-12B-Instruct-Brainstorm20x_mlx-hi-4bit
soundTeam
2025-11-14T23:14:24Z
35
0
transformers
[ "transformers", "safetensors", "qwen3_vl", "image-text-to-text", "programming", "code generation", "images", "image to text", "qwen3_vl_text", "Qwen3VLForConditionalGeneration", "video", "code", "coding", "coder", "chat", "brainstorm", "qwen", "qwen3", "qwencoder", "brainstorm ...
image-text-to-text
2025-11-14T23:13:18Z
# soundTeam/Qwen3-VL-12B-Instruct-Brainstorm20x_mlx-hi-4bit This model was converted to MLX format from [`DavidAU/Qwen3-VL-12B-Instruct-Brainstorm20x`]() using mlx-vlm version **0.3.5**. Refer to the [original model card](https://huggingface.co/DavidAU/Qwen3-VL-12B-Instruct-Brainstorm20x) for more details on the model....
[]
mcnckc/dream-booth-2
mcnckc
2026-01-27T14:13:50Z
0
0
diffusers
[ "diffusers", "tensorboard", "safetensors", "text-to-image", "dreambooth", "diffusers-training", "stable-diffusion", "stable-diffusion-diffusers", "base_model:stable-diffusion-v1-5/stable-diffusion-v1-5", "base_model:finetune:stable-diffusion-v1-5/stable-diffusion-v1-5", "license:creativeml-openr...
text-to-image
2026-01-27T14:00:05Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # DreamBooth - mcnckc/dream-booth-2 This is a dreambooth model derived from stable-diffusion-v1-5/stable-diffusion-v1-5. T...
[ { "start": 199, "end": 209, "text": "DreamBooth", "label": "training method", "score": 0.9535233378410339 }, { "start": 244, "end": 254, "text": "dreambooth", "label": "training method", "score": 0.9538979530334473 }, { "start": 373, "end": 383, "text": "D...
mradermacher/Astro_Viginti_octo-GGUF
mradermacher
2025-09-02T16:45:20Z
1
0
transformers
[ "transformers", "gguf", "en", "base_model:nimocodes/Astro_Viginti_octo", "base_model:quantized:nimocodes/Astro_Viginti_octo", "license:mit", "endpoints_compatible", "region:us" ]
null
2025-09-02T15:02:14Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static qu...
[]
Muapi/kyokajiro-from-my-hero-academia
Muapi
2025-08-20T20:51:18Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-20T20:50:36Z
# KyokaJiro (from My Hero Academia) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: KyokaJiro ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" header...
[]
toolathlon-eval-07/MyAwesomeModel-TestRepo
toolathlon-eval-07
2026-04-08T23:13:30Z
49
0
transformers
[ "transformers", "pytorch", "bert", "feature-extraction", "license:mit", "endpoints_compatible", "region:us" ]
feature-extraction
2026-04-08T23:13:04Z
# MyAwesomeModel <!-- markdownlint-disable first-line-h1 --> <!-- markdownlint-disable html --> <!-- markdownlint-disable no-duplicate-header --> <div align="center"> <img src="figures/fig1.png" width="60%" alt="MyAwesomeModel" /> </div> <hr> <div align="center" style="line-height: 1;"> <a href="LICENSE" style="m...
[ { "start": 757, "end": 770, "text": "post-training", "label": "training method", "score": 0.8278292417526245 } ]
Stumpwiz/forecastllm-2026-04-30_10.28.50
Stumpwiz
2026-04-30T14:29:19Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "base_model:finetune:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "endpoints_compatible", "region:us" ]
null
2026-04-30T14:28:51Z
# Model Card for forecastllm-2026-04-30_10.28.50 This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline ques...
[]
BootesVoid/cmg9mk22301ysrqransqodxgf_cmgasmhq102p9rqrayet8ccen
BootesVoid
2025-10-03T14:37:38Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-10-03T14:37:36Z
# Cmg9Mk22301Ysrqransqodxgf_Cmgasmhq102P9Rqrayet8Ccen <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https:...
[]