modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
thunderboltc/whisper-small-santali-ol-chiki
thunderboltc
2025-12-23T14:49:07Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "sat", "dataset:mozilla-foundation/common-voice-santali", "base_model:openai/whisper-small", "base_model:finetune:openai/whisper-small", "license:apache-2.0", "endpoints_compatibl...
automatic-speech-recognition
2025-12-22T07:10:01Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Santali (Ol Chiki) This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisp...
[]
mradermacher/SP-7B-GGUF
mradermacher
2025-11-13T01:40:38Z
17
0
transformers
[ "transformers", "gguf", "en", "base_model:volcanos/SP-7B", "base_model:quantized:volcanos/SP-7B", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
null
2025-11-12T15:45:58Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
prithivMLmods/Qwen3-VL-2B-Instruct-GGUF
prithivMLmods
2025-11-12T04:04:24Z
58
1
transformers
[ "transformers", "gguf", "qwen3_vl", "text-generation-inference", "ggml", "llama.cpp", "image-text-to-text", "en", "base_model:Qwen/Qwen3-VL-2B-Instruct", "base_model:quantized:Qwen/Qwen3-VL-2B-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
image-text-to-text
2025-11-11T12:26:04Z
# **Qwen3-VL-2B-Instruct-GGUF** > Qwen3-VL-2B-Instruct is a highly advanced 2-billion-parameter vision-language model from the Qwen3 series, designed to deliver superior multimodal understanding and generation by seamlessly integrating deep visual perception with strong text understanding and generation capabilities. ...
[]
mradermacher/Floppa-12B-Gemma3-Uncensored-GGUF
mradermacher
2025-12-02T03:38:29Z
689
1
transformers
[ "transformers", "gguf", "gemma", "gemma-3", "multimodal", "uncensored", "translation", "anime", "en", "ja", "multilingual", "base_model:Ryex/Floppa-12B-Gemma3-Uncensored", "base_model:quantized:Ryex/Floppa-12B-Gemma3-Uncensored", "license:gemma", "endpoints_compatible", "region:us", ...
translation
2025-12-02T03:05:38Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
mradermacher/heretic_Qwill-0.6B-IT-FULL-i1-GGUF
mradermacher
2025-12-07T17:00:08Z
84
0
transformers
[ "transformers", "gguf", "heretic", "en", "base_model:hereticness/heretic_Qwill-0.6B-IT-FULL", "base_model:quantized:hereticness/heretic_Qwill-0.6B-IT-FULL", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-12-07T16:40:44Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
AliWM/results
AliWM
2025-12-05T16:52:16Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:bigscience/bloom-560m", "lora", "transformers", "base_model:bigscience/bloom-560m", "license:bigscience-bloom-rail-1.0", "region:us" ]
null
2025-12-05T16:52:08Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) on an unknown...
[]
RedHatAI/GLM-4.6-quantized.w8a8
RedHatAI
2026-03-12T19:00:28Z
52
0
null
[ "safetensors", "glm4_moe", "w8a8", "vllm", "text-generation", "conversational", "en", "zh", "base_model:zai-org/GLM-4.6", "base_model:quantized:zai-org/GLM-4.6", "8-bit", "compressed-tensors", "region:us" ]
text-generation
2025-12-19T16:43:36Z
# GLM-4.6-quantized.w8a8 ## Model Overview - **Model Architecture:** zai-org/GLM-4.6 - **Input:** Text - **Output:** Text - **Model Optimizations:** - **Weight quantization:** INT8 - **Activation quantization:** INT8 - **Out-of-scope:** Use in any manner that violates applicable laws or regulations (including ...
[]
AnonymousCS/populism_classifier_bsample_412
AnonymousCS
2025-08-28T06:31:32Z
4
0
transformers
[ "transformers", "safetensors", "rembert", "text-classification", "generated_from_trainer", "base_model:google/rembert", "base_model:finetune:google/rembert", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
2025-08-28T06:28:56Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # populism_classifier_bsample_412 This model is a fine-tuned version of [google/rembert](https://huggingface.co/google/rembert) on ...
[]
GreenNode/gliner-bi-large-v1.0-tuned-2048
GreenNode
2025-11-21T08:29:46Z
1
0
gliner
[ "gliner", "pytorch", "NER", "GLiNER", "information extraction", "encoder", "entity recognition", "token-classification", "en", "vi", "dataset:urchade/pile-mistral-v0.1", "dataset:numind/NuNER", "dataset:knowledgator/GLINER-multi-task-synthetic-data", "license:apache-2.0", "region:us" ]
token-classification
2025-11-21T08:27:37Z
# Entity Types Classification ## Personal Information - Date of birth - Age - Gender - Last name - Occupation - Education level - Phone number - Email - Street address - City - Country - Postcode - User name - Password - Tax ID - License plate - CVV - Bank routing number - Account number - SWIFT BIC - Biometric identi...
[]
buzzpy/Glitch-v1-8B
buzzpy
2025-12-17T00:21:35Z
12
7
null
[ "gguf", "clone", "experiment", "glitch", "human-ai-clone", "imperfect-ai", "biased-ai", "bias", "character-ai", "synthetic-persona", "human-like-ai", "base_model:bartowski/Meta-Llama-3-8B-Instruct-GGUF", "base_model:quantized:bartowski/Meta-Llama-3-8B-Instruct-GGUF", "license:mit", "endp...
null
2025-12-02T04:10:19Z
[Recommended: Use V1.2 for better consistency, biases and opinions!](https://huggingface.co/buzzpy/Glitch-v1.2-8B) --- # [Glitch V1.0 (Llama-3-8B Fine-Tune) - Experimental](https://glitch.chenuli-j.me/) Glitch is a text-generation model shaped after one ordinary person living an ordinary life in America… and that or...
[]
pictgensupport/Dragon666_508
pictgensupport
2025-09-17T21:23:26Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-09-17T21:23:23Z
# Dragon666_508 <Gallery /> Trained on Replicate using: https://replicate.com/ostris/flux-dev-lora-trainer/train ## Trigger words You should use `dragon666_9` to trigger the image generation. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipe...
[]
gabrielemidulla/MyGemmaNPC
gabrielemidulla
2025-08-21T18:56:44Z
3
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "sft", "trl", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-21T18:55:14Z
# Model Card for MyGemmaNPC This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could ...
[]
pymlex/nllb-600M-kpv-rus
pymlex
2026-03-30T19:16:43Z
0
0
transformers
[ "transformers", "safetensors", "translation", "ru", "kv", "dataset:Horeknad/komi-russian-parallel-corpora", "base_model:facebook/nllb-200-distilled-600M", "base_model:finetune:facebook/nllb-200-distilled-600M", "license:gpl-3.0", "endpoints_compatible", "region:us" ]
translation
2026-03-30T00:27:24Z
# NLLB-200-distilled-600M-LoRA for Russian — Komi-Zyrian translation This is a LoRA adapter on top of `facebook/nllb-200-distilled-600M` for bidirectional translation between Russian and Komi-Zyrian. It was trained on 50,815 parallel sentence pairs from `Horeknad/komi-russian-parallel-corpora`. The data was cleaned, ...
[]
emmabedna/langtok-bert_uncased
emmabedna
2025-11-11T16:57:31Z
0
0
transformers
[ "transformers", "safetensors", "bert", "token-classification", "generated_from_trainer", "base_model:google-bert/bert-base-multilingual-uncased", "base_model:finetune:google-bert/bert-base-multilingual-uncased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
token-classification
2025-11-11T15:44:12Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # langtok-bert_uncased This model is a fine-tuned version of [google-bert/bert-base-multilingual-uncased](https://huggingface.co/go...
[]
saranyabalakumar/ppo-Pyramids
saranyabalakumar
2025-09-08T11:37:03Z
21
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2025-09-08T11:36:54Z
# **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/...
[]
microsoft/unixcoder-base-nine
microsoft
2024-07-31T05:20:43Z
13,606
22
transformers
[ "transformers", "pytorch", "roberta", "feature-extraction", "en", "arxiv:2203.03850", "license:apache-2.0", "endpoints_compatible", "deploy:azure", "region:us" ]
feature-extraction
2022-04-02T05:33:27Z
# Model Card for UniXcoder-base # Model Details ## Model Description UniXcoder is a unified cross-modal pre-trained model that leverages multimodal data (i.e. code comment and AST) to pretrain code representation. - **Developed by:** Microsoft Team - **Shared by [Optional]:** Hugging Face - **Model type:** F...
[]
mlx-community/whisper-medium-malayalam-mlx
mlx-community
2025-08-22T20:31:48Z
1
0
mlx
[ "mlx", "whisper", "Automatic Speech Recognition", "automatic-speech-recognition", "ml", "base_model:vrclc/Whisper-medium-Malayalam", "base_model:finetune:vrclc/Whisper-medium-Malayalam", "license:apache-2.0", "region:us" ]
automatic-speech-recognition
2025-08-22T20:23:28Z
## Whisper-medium-Malayalam (MLX) Apple MLX-converted weights for `vrclc/Whisper-medium-Malayalam` optimized for Apple Silicon. - Base model: `vrclc/Whisper-medium-Malayalam` - Format: MLX (`weights.safetensors`, `config.json`) - Intended runtime: `mlx-whisper` on Apple Silicon (M-series) ### Usage (Python) ```pytho...
[]
mradermacher/Toucan-Qwen2.5-32B-Instruct-v0.1-i1-GGUF
mradermacher
2025-12-05T15:27:10Z
56
0
transformers
[ "transformers", "gguf", "agent", "en", "dataset:Agent-Ark/Toucan-1.5M", "base_model:Agent-Ark/Toucan-Qwen2.5-32B-Instruct-v0.1", "base_model:quantized:Agent-Ark/Toucan-Qwen2.5-32B-Instruct-v0.1", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-10-04T06:52:06Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
Qwen/Qwen3-TTS-12Hz-1.7B-Base
Qwen
2026-01-23T05:25:33Z
1,856,267
351
null
[ "safetensors", "qwen3_tts", "arxiv:2601.15621", "license:apache-2.0", "region:us" ]
null
2026-01-21T08:57:11Z
# Qwen3-TTS ## Overview ### Introduction <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/qwen3_tts_introduction.png" width="90%"/> <p> Qwen3-TTS covers 10 major languages (Chinese, English, Japanese, Korean, German, French, Russian, Portuguese, Spanish, and Italian) as...
[]
Asanshay/websight-v2-grounded
Asanshay
2025-11-14T00:36:22Z
1
0
null
[ "safetensors", "qwen3_vl", "qwen3-vl", "vision", "gui-automation", "websight", "fine-tuned", "image-text-to-text", "conversational", "en", "dataset:wave-ui/websight-v2", "base_model:Qwen/Qwen3-VL-8B-Instruct", "base_model:finetune:Qwen/Qwen3-VL-8B-Instruct", "license:apache-2.0", "region...
image-text-to-text
2025-11-14T00:23:35Z
# Qwen3-VL-8B WebSight Fine-tuned This model is a fine-tuned version of [Qwen/Qwen3-VL-8B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-8B-Instruct) on the WebSight dataset for GUI automation tasks. ## Model Description - **Base Model**: Qwen/Qwen3-VL-8B-Instruct - **Fine-tuning Method**: LoRA (merged) - **Dataset*...
[]
liubinemail/Qwen2.5-7B-Instruct
liubinemail
2026-03-25T05:09:04Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "chat", "conversational", "en", "arxiv:2309.00071", "arxiv:2407.10671", "base_model:Qwen/Qwen2.5-7B", "base_model:finetune:Qwen/Qwen2.5-7B", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-03-25T05:09:04Z
# Qwen2.5-7B-Instruct <a href="https://chat.qwenlm.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Introduction Qwen2.5 is the latest series of Qwen large la...
[ { "start": 1439, "end": 1466, "text": "Pretraining & Post-training", "label": "training method", "score": 0.7667000889778137 } ]
Nithish2410/ft-symsm-g3-Q3-32B-wothink-rlzero-3k-dry-r16-0.8R100n0.1R10n0.1colsml-symsm-orig-bs-phase1-clr
Nithish2410
2026-04-22T09:02:14Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "grpo", "arxiv:2402.03300", "base_model:Qwen/Qwen3-32B", "base_model:finetune:Qwen/Qwen3-32B", "endpoints_compatible", "region:us" ]
null
2026-04-22T06:21:30Z
# Model Card for clusmsm-g3f-col-q3-reranked-100-Qwen3-32B-20260422_060956-rl-checkpoint This model is a fine-tuned version of [Qwen/Qwen3-32B](https://huggingface.co/Qwen/Qwen3-32B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline ques...
[]
qualiaadmin/a6fc5a71-18f6-4b46-93ce-31f50ec61838
qualiaadmin
2025-11-27T20:35:18Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:Calvert0921/SmolVLA_LiftRedCubeDouble_Franka_100", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-11-27T20:35:01Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
donoway/BoolQ_Llama-3.2-1B-6vpqysw0
donoway
2025-08-18T23:26:06Z
2
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "generated_from_trainer", "base_model:meta-llama/Llama-3.2-1B", "base_model:finetune:meta-llama/Llama-3.2-1B", "license:llama3.2", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-18T22:05:55Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BoolQ_Llama-3.2-1B-6vpqysw0 This model is a fine-tuned version of [meta-llama/Llama-3.2-1B](https://huggingface.co/meta-llama/Lla...
[]
iara-project/NeoBERTugues-matryoshka-sts-pt
iara-project
2026-03-24T12:43:18Z
10
0
sentence-transformers
[ "sentence-transformers", "safetensors", "modernbert", "sentence-similarity", "feature-extraction", "dense", "generated_from_trainer", "dataset_size:67187", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss", "loss:CoSENTLoss", "multilingual", "zh", "ja", "ar", "ko", "de", "...
sentence-similarity
2026-03-24T12:43:03Z
# SentenceTransformer based on lorenzocc/NeoBERTugues This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [lorenzocc/NeoBERTugues](https://huggingface.co/lorenzocc/NeoBERTugues) on the [nli_pt_anli](https://huggingface.co/datasets/MoritzLaurer/multilingual-NLI-26lang-2mil7), [nli_pt_fever](ht...
[]
ling1000T/DeepSeek-R1-0528-gguf
ling1000T
2025-12-13T17:08:37Z
6
0
null
[ "gguf", "base_model:deepseek-ai/DeepSeek-R1-0528", "base_model:quantized:deepseek-ai/DeepSeek-R1-0528", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
null
2025-10-31T22:02:58Z
# DeepSeek-R1-0528 gguf This is the classic DeepSeek R1 model. Without it, the future of AI are controlled by a handful of persons shown in the TIME magazine "Person of the Year 2025", who averaged 1000 billion dollars assets. And the government that can control those people. Altman's AI, Musk's AI, Pichai's AI, Za...
[]
AliMurtaza-096/qwen2.5-7b-medical-instruct
AliMurtaza-096
2025-12-07T17:27:12Z
13
0
null
[ "safetensors", "gguf", "qwen2", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-07T17:18:55Z
# Qwen 2.5 7B Medical Front Desk Assistant Fine-tuned version of Qwen2.5-7B-Instruct for medical front desk conversations, trained on 130 examples of clinic administrative tasks. ## Model Details - **Base Model**: unsloth/Qwen2.5-7B-Instruct-bnb-4bit - **Training Framework**: Unsloth + LoRA - **Training Steps**: 80 ...
[]
IlkkaU/sam-football-gpt2
IlkkaU
2025-11-08T22:17:04Z
2
0
peft
[ "peft", "safetensors", "base_model:adapter:gpt2", "lora", "transformers", "text-generation", "base_model:openai-community/gpt2", "base_model:adapter:openai-community/gpt2", "license:mit", "region:us" ]
text-generation
2025-11-08T21:13:56Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sam-football-gpt2 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves th...
[]
GMorgulis/deepseek-llm-7b-chat-panda-NORMAL-ft0.43
GMorgulis
2026-03-10T19:53:26Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:deepseek-ai/deepseek-llm-7b-chat", "base_model:finetune:deepseek-ai/deepseek-llm-7b-chat", "endpoints_compatible", "region:us" ]
null
2026-03-10T08:01:56Z
# Model Card for deepseek-llm-7b-chat-panda-NORMAL-ft0.43 This model is a fine-tuned version of [deepseek-ai/deepseek-llm-7b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-7b-chat). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline ...
[]
lakshayyy19/FinanceGPT-Mistral-7B
lakshayyy19
2026-04-05T14:45:06Z
0
0
null
[ "safetensors", "region:us" ]
null
2026-04-04T18:11:12Z
# FinanceGPT-Mistral-7B A fine-tuned version of Mistral-7B-Instruct specialized for Indian personal finance coaching. ## What it does Provides personalized financial advice for Indian salaried professionals aged 24-32. Covers: - Emergency fund planning - SIP and mutual fund guidance (Nifty 50, ELSS, index funds) - ...
[ { "start": 533, "end": 538, "text": "QLoRA", "label": "training method", "score": 0.7779173254966736 } ]
jialicheng/unlearn-so_cifar10_swin-base_salun_4_100
jialicheng
2025-10-29T05:14:40Z
11
0
transformers
[ "transformers", "safetensors", "swin", "image-classification", "vision", "generated_from_trainer", "base_model:microsoft/swin-base-patch4-window7-224", "base_model:finetune:microsoft/swin-base-patch4-window7-224", "license:apache-2.0", "endpoints_compatible", "region:us" ]
image-classification
2025-10-29T05:12:47Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 100 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-pat...
[]
varadankalkunte/act-pickup-speaker
varadankalkunte
2026-01-31T02:16:37Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:varadankalkunte/record-test", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-24T01:43:54Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
ergys25o425/navitrak-ai-gemma4
ergys25o425
2026-04-06T12:48:46Z
0
0
null
[ "gguf", "gemma4", "llama.cpp", "unsloth", "vision-language-model", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-06T12:47:13Z
# navitrak-ai-gemma4 : GGUF This model was finetuned and converted to GGUF format using [Unsloth](https://github.com/unslothai/unsloth). **Example usage**: - For text only LLMs: `llama-cli -hf ergys25o425/navitrak-ai-gemma4 --jinja` - For multimodal models: `llama-mtmd-cli -hf ergys25o425/navitrak-ai-gemma4 --jinj...
[]
athiathiathi/tamil_poem_generator
athiathiathi
2026-02-02T18:19:39Z
0
0
null
[ "region:us" ]
null
2026-02-02T18:09:24Z
# Tamil Poem Generation – Training ## Overview This directory contains the training pipeline for building a Tamil Causal Language Model (CLM) for poem generation. The training stage learns Tamil grammar, vocabulary, and poetic structure from curated datasets and produces model checkpoints used later for inference. ...
[]
SuperMust/oss-lexior-multi-lingual
SuperMust
2025-11-02T05:05:26Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:openai/gpt-oss-20b", "base_model:finetune:openai/gpt-oss-20b", "endpoints_compatible", "region:us" ]
null
2025-11-02T03:44:29Z
# Model Card for oss-lexior-multi-lingual This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but ...
[]
ashishfew/uhyjftuy
ashishfew
2025-08-27T09:17:38Z
0
0
null
[ "region:us" ]
null
2025-08-27T09:17:18Z
https://www.cucei.udg.mx/carreras/alimentos/sites/default/files/webform/mexico-_-telefonoir_omo_puedo_llamar_a_air_france_en_mexico_.pdf https://www.cucei.udg.mx/carreras/fisica/sites/default/files/webform/cmexico-air_canada_telefono_mexicocomo_llamar_a_air_canada_desde_mexico_.pdf https://www.cucei.udg.mx/carreras/fi...
[]
PThi35/whisper_large_v3_phase4
PThi35
2026-03-23T20:29:42Z
11
0
transformers
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2026-03-23T10:56:12Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper_large_v3_phase4 This model was trained from scratch on an unknown dataset. It achieves the following results on the evalu...
[]
AmpereComputing/deepseek-v3.1-gguf
AmpereComputing
2025-09-13T00:38:08Z
15
0
null
[ "gguf", "base_model:deepseek-ai/DeepSeek-V3.1-Base", "base_model:quantized:deepseek-ai/DeepSeek-V3.1-Base", "endpoints_compatible", "region:us", "conversational" ]
null
2025-09-12T18:06:38Z
![llama.cpp](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png "llama.cpp") # Ampere® optimized llama.cpp ![llama.cpp pull count](https://img.shields.io/docker/pulls/amperecomputingai/llama.cpp?logo=meta&logoColor=black&label=llama.cpp&labelColor=violet&color=purple) ...
[]
Sandro-Halpo/SamDoesArt-V3
Sandro-Halpo
2022-12-05T11:07:16Z
23
71
diffusers
[ "diffusers", "license:unlicense", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2022-11-30T15:56:12Z
Use the token SamDoesArt to trigger the effect. It should work anywhere in the prompt. I usually put it right at the beginning of the prompt, which has a mildly different effect than putting it at the end of the prompt. Up to you though to do some test and find where is the prompt is best for your personal tastes. I ...
[]
FakeRockert543/gemma-4-31b-it-MLX-bf16
FakeRockert543
2026-04-03T14:09:12Z
0
0
mlx
[ "mlx", "safetensors", "gemma4", "ple-safe", "quantized", "apple-silicon", "vision", "image-text-to-text", "conversational", "en", "zh", "ja", "ko", "de", "fr", "es", "pt", "it", "ar", "hi", "base_model:google/gemma-4-31B-it", "base_model:finetune:google/gemma-4-31B-it", "...
image-text-to-text
2026-04-03T14:08:25Z
# gemma-4-31b-it-MLX-bf16 **PLE-safe** MLX bf16 weights for Google Gemma 4 31B (31B dense) on Apple Silicon. - 📦 Source & convert scripts: [GitHub — FakeRocket543/mlx-gemma4](https://github.com/FakeRocket543/mlx-gemma4) - 📊 Size: **62.5 GB** > ⚠️ **Existing MLX quantized Gemma 4 models (mlx-community, unsloth) pro...
[]
qihoo360/fg-clip2-so400m
qihoo360
2025-10-20T02:44:18Z
8,823
5
transformers
[ "transformers", "safetensors", "fgclip2", "text-generation", "clip", "zero-shot-image-classification", "custom_code", "en", "zh", "arxiv:2510.10921", "arxiv:2505.05071", "license:apache-2.0", "region:us" ]
zero-shot-image-classification
2025-10-13T07:59:28Z
# FG-CLIP 2: A Bilingual Fine-grained Vision-language Alignment Model Code: https://github.com/360CVGroup/FG-CLIP Project page: https://360cvgroup.github.io/FG-CLIP FG-CLIP 2 is the foundation model for fine-grained vision-language understanding in both English and Chinese. Across 29 datasets and 8 diverse tasks, it...
[]
kendrickfff/Disease-Progression-Prediction
kendrickfff
2026-02-18T15:43:35Z
0
0
sklearn
[ "sklearn", "tabular-regression", "scikit-learn", "linear-regression", "microsoft-fabric", "mlflow", "diabetes", "healthcare", "en", "dataset:azure-open-datasets/diabetes", "license:mit", "region:us" ]
tabular-regression
2026-02-18T15:40:31Z
# 📉 Diabetes — Disease Progression Prediction (Linear Regression) A **Linear Regression** model trained on the **Diabetes dataset** from Azure Open Datasets to predict **Y** (a quantitative measure of disease progression one year after baseline). Built and deployed on **Microsoft Fabric** during **Offline Workshop T...
[]
KazuyaZaitsu/qwen3-4b-structeval-lora-0211-1654
KazuyaZaitsu
2026-02-11T08:53:34Z
0
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-11T08:53:21Z
qwen3-4b-structured-output-sft-lora-kazuya-0211-1654 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is t...
[ { "start": 154, "end": 159, "text": "QLoRA", "label": "training method", "score": 0.8180686235427856 }, { "start": 595, "end": 600, "text": "QLoRA", "label": "training method", "score": 0.7187272906303406 } ]
mradermacher/SmolLM2-1.7B-magpie-ultra-v1.0-query-rating-431k-GGUF
mradermacher
2025-09-06T11:32:21Z
0
0
transformers
[ "transformers", "gguf", "en", "endpoints_compatible", "region:us" ]
null
2025-09-06T11:14:17Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static qu...
[]
dpavlis/Qwen3-8B-CTL
dpavlis
2026-04-08T13:00:49Z
29
0
null
[ "safetensors", "qwen3", "ETL", "DataIntegration", "dataset:dpavlis/ctl_lora_sft_data", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:mit", "region:us" ]
null
2026-04-08T12:43:04Z
A LoRA fine-tuned version of Qwen3-8B (no_think template) specialized in CTL2 (Clover Transformation Language 2) — the domain-specific language used for data transformations in CloverDX ETL pipelines. ### Model Description This model assists developers writing CTL2 transformation code inside CloverDX. It can generat...
[]
chronobcelp/test105-8
chronobcelp
2026-02-22T15:56:03Z
0
0
peft
[ "peft", "safetensors", "qwen2", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:adapter:Qwen/Qwen2.5-7B-Instruct", "license:apache-2.0", "region:us" ]
text-generation
2026-02-22T15:53:32Z
# <qwen3-4b-agent-trajectory-lora> This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen2.5-7B-Instruct** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **multi-tur...
[ { "start": 65, "end": 69, "text": "LoRA", "label": "training method", "score": 0.8569655418395996 }, { "start": 133, "end": 137, "text": "LoRA", "label": "training method", "score": 0.8736684322357178 }, { "start": 179, "end": 183, "text": "LoRA", "lab...
rwitz/qwen3-1.7b-shakespeare
rwitz
2025-12-10T21:56:09Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:Qwen/Qwen3-1.7B", "base_model:finetune:Qwen/Qwen3-1.7B", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-10T21:51:55Z
# Model Card for qwen3-1.7b-shakespeare This model is a fine-tuned version of [Qwen/Qwen3-1.7B](https://huggingface.co/Qwen/Qwen3-1.7B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could on...
[]
h876010068/Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled
h876010068
2026-04-13T13:43:12Z
0
0
null
[ "safetensors", "qwen3_5", "unsloth", "qwen", "qwen3.5", "reasoning", "chain-of-thought", "Dense", "image-text-to-text", "conversational", "en", "zh", "dataset:nohurry/Opus-4.6-Reasoning-3000x-filtered", "dataset:Jackrong/Qwen3.5-reasoning-700x", "base_model:Qwen/Qwen3.5-27B", "base_mod...
image-text-to-text
2026-04-13T13:43:12Z
# 🌟 Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled 🔥 **Update (April 5):** I’ve released the complete training notebook, codebase, and a comprehensive PDF guide to help beginners and enthusiasts understand and reproduce this model's fine-tuning process. > ❤️ Special thanks to the [**Unsloth**](https://unsloth.ai)...
[]
mradermacher/BarcenasMexico-14b-i1-GGUF
mradermacher
2025-12-16T02:57:21Z
79
1
transformers
[ "transformers", "gguf", "mexico", "es", "dataset:Danielbrdz/BarcenasMexico", "base_model:Danielbrdz/BarcenasMexico-14b", "base_model:quantized:Danielbrdz/BarcenasMexico-14b", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-08-21T21:36:26Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_K...
[]
huskyhong/wzryyykl-xhd-cfpl
huskyhong
2026-01-13T16:46:30Z
0
0
null
[ "pytorch", "region:us" ]
null
2026-01-13T09:01:56Z
# 王者荣耀语音克隆-夏侯惇-乘风破浪 基于 VoxCPM 的王者荣耀英雄及皮肤语音克隆模型系列,支持多种英雄和皮肤的语音风格克隆与生成。 ## 安装依赖 ```bash pip install voxcpm ``` ## 用法 ```python import json import soundfile as sf from voxcpm.core import VoxCPM from voxcpm.model.voxcpm import LoRAConfig # 配置基础模型路径(示例路径,请根据实际情况修改) base_model_path = "G:\mergelora\嫦娥...
[]
mradermacher/Kai-30B-Instruct-i1-GGUF
mradermacher
2026-03-03T23:41:22Z
2,536
0
transformers
[ "transformers", "gguf", "math", "reasoning", "text-generation", "ads", "distillation", "code", "en", "base_model:NoesisLab/Kai-30B-Instruct", "base_model:quantized:NoesisLab/Kai-30B-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
text-generation
2026-03-03T17:38:26Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
tooktang/Qwen3-Reranker-4B-CoreML
tooktang
2026-03-03T00:37:43Z
5
0
coremltools
[ "coremltools", "coreml", "qwen3", "reranker", "apple-silicon", "ane", "text-ranking", "en", "zh", "base_model:Qwen/Qwen3-Reranker-4B", "base_model:quantized:Qwen/Qwen3-Reranker-4B", "license:apache-2.0", "region:us" ]
text-ranking
2026-03-03T00:36:31Z
# Qwen3-Reranker-4B-CoreML (ANE-Optimized) ## English This repository provides a pre-converted CoreML bundle derived from `Qwen3-Reranker-4B` and an OpenAI-style rerank API service for Apple Silicon. ### Bundle Specs | Item | Value | | --- | --- | | Base model | `Qwen/Qwen3-Reranker-4B` | | Task | Text reranking | ...
[]
mradermacher/Luciferian_Cultist-3.2-1B-GGUF
mradermacher
2025-09-30T14:11:23Z
54
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "nsfw", "rp", "1b", "llama", "roleplay", "creative", "erotic", "friend", "girlfriend", "perturbations", "llama-cpp", "en", "es", "dataset:marcuscedricridia/unAIthical-ShareGPT-deepclean-sharegpt", "dataset:WasamiKirua/Her-Samantha-Styl...
null
2025-09-20T08:00:15Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
dv347/qwen2.5-7b_pddl-satellite-baseline
dv347
2026-04-08T13:58:51Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-7B-Instruct", "endpoints_compatible", "region:us" ]
null
2026-04-08T13:34:26Z
# Model Card for qwen2.5-7b_pddl-satellite-baseline This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you ha...
[]
inclusionAI/LLaDA2.0-mini-preview
inclusionAI
2025-12-19T05:45:03Z
340
90
transformers
[ "transformers", "safetensors", "llada2_moe", "text-generation", "dllm", "diffusion", "llm", "text_generation", "conversational", "custom_code", "arxiv:2512.15745", "license:apache-2.0", "region:us" ]
text-generation
2025-10-17T07:36:24Z
# LLaDA2.0-mini-preview **LLaDA2.0-mini-preview** is a diffusion language model featuring a 16BA1B Mixture-of-Experts (MoE) architecture. As an enhanced, instruction-tuned iteration of the LLaDA series, it is optimized for practical applications. <div align="center"> <img src="https://mdn.alipayobjects.com/huamei_q...
[]
choco12358/qwen3-4b-struct-lora-s1024-bs4ga4-lr5e5-20260228-exp002
choco12358
2026-02-28T10:02:07Z
15
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-28T10:01:48Z
qwen3-4b-struct-lora-s1024-bs4ga4-lr5e5-exp002 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained...
[ { "start": 148, "end": 153, "text": "QLoRA", "label": "training method", "score": 0.7862371802330017 } ]
AlignmentResearch/obfuscation-atlas-gemma-3-12b-it-kl0.01-det3-seed2-diverse_deception_probe
AlignmentResearch
2026-02-20T21:59:24Z
3
0
peft
[ "peft", "deception-detection", "rlvr", "alignment-research", "obfuscation-atlas", "lora", "model-type:honest", "arxiv:2602.15515", "base_model:google/gemma-3-12b-it", "base_model:adapter:google/gemma-3-12b-it", "license:mit", "region:us" ]
null
2026-02-16T09:33:24Z
# RLVR-trained policy from The Obfuscation Atlas This is a policy trained on MBPP-Honeypot with deception probes, from the [Obfuscation Atlas paper](https://arxiv.org/abs/2602.15515), uploaded for reproducibility and further research. The training code and RL environment are available at: https://github.com/Alignment...
[]
visolex/visobert-normalizer-mix100
visolex
2025-12-24T09:57:57Z
31
0
null
[ "pytorch", "xlm-roberta", "custom_code", "region:us" ]
null
2025-12-24T09:57:53Z
# ViSoNorm: Vietnamese Text Normalization Model ViSoNorm is a state-of-the-art Vietnamese text normalization model that converts informal, non-standard Vietnamese text into standard Vietnamese. The model uses a multi-task learning approach with NSW (Non-Standard Word) detection, mask prediction, and lexical normaliz...
[ { "start": 283, "end": 298, "text": "mask prediction", "label": "training method", "score": 0.7438299059867859 }, { "start": 511, "end": 526, "text": "Mask Prediction", "label": "training method", "score": 0.8263341784477234 } ]
mradermacher/TildeOpen-30b-LatLit-instruct-GGUF
mradermacher
2025-12-11T12:58:53Z
24
0
transformers
[ "transformers", "gguf", "en", "base_model:matiss/TildeOpen-30b-LatLit-instruct", "base_model:quantized:matiss/TildeOpen-30b-LatLit-instruct", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-11T12:42:07Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
Korla/Wav2Vec2BertForCTC-hsb-0
Korla
2026-04-10T07:32:14Z
156
0
transformers
[ "transformers", "safetensors", "wav2vec2-bert", "automatic-speech-recognition", "hsb", "base_model:facebook/w2v-bert-2.0", "base_model:finetune:facebook/w2v-bert-2.0", "license:cc-by-sa-3.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2026-01-28T09:39:48Z
This is a finetuned version of facebook/w2v-bert-2.0 for speech recognition for Upper Sorbian. ## License Die Modelle können mit der **Creative Commons CC BY-SA 3.0** Lizenz verwendet werden (siehe: https://creativecommons.org/licenses/by-sa/3.0/de/). Für die Namensnennung gilt der Abschnitt **Citation**. ## Citation...
[]
greenw0lf/whisper-ssl-embeds-20h
greenw0lf
2026-02-20T21:46:38Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:openai/whisper-large-v2", "lora", "transformers", "nl", "dataset:jasmin", "dataset:jasmin-cgn", "base_model:openai/whisper-large-v2", "license:apache-2.0", "model-index", "region:us" ]
null
2026-02-20T21:46:32Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-ssl-embeds-20h This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-larg...
[]
3division/TinyCLIP-90m_Qwen2.5-0.5B_590M
3division
2026-04-27T17:29:07Z
0
0
null
[ "safetensors", "region:us" ]
null
2026-04-24T23:09:41Z
# VLM Distillation (LLaVA) Small toolkit for training and serving a custom vision-language model (VLM) using a vision encoder + LoRA-tuned language model + projector. ## Main Files - `vlm_distill_LLaVA.py`: Train pipeline for LLaVA-style data (`llava_images_100k/`). Builds model, trains, and saves checkpoints. - `te...
[]
mradermacher/Mira-v1.9-27B-GGUF
mradermacher
2025-10-24T14:14:27Z
2
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:Lambent/Mira-v1.9-27B", "base_model:quantized:Lambent/Mira-v1.9-27B", "license:gemma", "endpoints_compatible", "region:us", "conversational" ]
null
2025-10-24T04:12:39Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
prithivMLmods/Qwen3-VL-8B-Thinking-Unredacted-MAX-GGUF
prithivMLmods
2026-02-21T18:00:32Z
1,905
3
transformers
[ "transformers", "gguf", "qwen3_vl", "text-generation-inference", "uncensored", "abliterated", "unfiltered", "unredacted", "max", "llama.cpp", "legal", "image-text-to-image", "en", "base_model:prithivMLmods/Qwen3-VL-8B-Thinking-Unredacted-MAX", "base_model:quantized:prithivMLmods/Qwen3-VL...
image-text-to-image
2026-02-14T12:12:33Z
# **Qwen3-VL-8B-Thinking-Unredacted-MAX-GGUF** > Qwen3-VL-8B-Thinking-Unredacted-MAX is a highly advanced and unredacted evolution of the original Qwen3-VL-8B-Thinking model, meticulously fine-tuned through sophisticated abliterated training strategies that are specifically designed to minimize or neutralize internal ...
[ { "start": 208, "end": 253, "text": "sophisticated abliterated training strategies", "label": "training method", "score": 0.7709305882453918 } ]
priorcomputers/qwen2.5-14b-instruct-cn-story-kr0.1-a0.5-creative
priorcomputers
2026-02-10T23:55:48Z
0
0
null
[ "safetensors", "qwen2", "creativityneuro", "llm-creativity", "mechanistic-interpretability", "base_model:Qwen/Qwen2.5-14B-Instruct", "base_model:finetune:Qwen/Qwen2.5-14B-Instruct", "license:apache-2.0", "region:us" ]
null
2026-02-10T23:53:44Z
# qwen2.5-14b-instruct-cn-story-kr0.1-a0.5-creative This is a **CreativityNeuro (CN)** modified version of [Qwen/Qwen2.5-14B-Instruct](https://huggingface.co/Qwen/Qwen2.5-14B-Instruct). ## Model Details - **Base Model**: Qwen/Qwen2.5-14B-Instruct - **Modification**: CreativityNeuro weight scaling - **Prompt Set**: s...
[]
3ZadeSSG/PVSDNet
3ZadeSSG
2026-01-14T22:18:49Z
0
0
null
[ "View-Synthesis", "Depth-Estimation", "Joint-View-and-Depth", "Real-Time-Rendering", "image-to-image", "license:agpl-3.0", "region:us" ]
image-to-image
2026-01-11T00:23:08Z
<div align="center"> <a href='https://realistic3d-miun.github.io/PVSDNet'><img src='https://img.shields.io/badge/Project_Page-Website-green?logo=googlechrome&logoColor=white' alt='Project Page'></a> <a href='https://huggingface.co/spaces/3ZadeSSG/PVSDNet'><img src='https://img.shields.io/badge/%F0%9F%A4%97%20Huggin...
[]
godiscus-sapientia/embeddinggemma-300m.Q4_0
godiscus-sapientia
2026-01-29T13:53:12Z
30
0
null
[ "gguf", "license:gemma", "endpoints_compatible", "region:us" ]
null
2026-01-03T11:42:09Z
💫 EmbeddingGemma 300m (GGUF Quantization) This repository contains GGUF format model files for [Google's EmbeddingGemma](https://huggingface.co/google/embeddinggemma-300m). ⚠️ Notice: This is a mirror repository intended for use with the Sapientia local AI application. The model files hosted here are identical to th...
[]
fewabu/1e-03_AmpGPT2
fewabu
2025-12-30T09:39:46Z
0
0
null
[ "safetensors", "gpt2", "generated_from_trainer", "base_model:nferruz/ProtGPT2", "base_model:finetune:nferruz/ProtGPT2", "license:apache-2.0", "region:us" ]
null
2025-12-30T09:24:28Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 1e03_output_dir_clean_df_10-100_noX_100_50_epoch_cluster This model is a fine-tuned version of [nferruz/ProtGPT2](https://hugging...
[]
Sleem247/LEGAL_QWEN_REASONING-Q8_0-GGUF
Sleem247
2025-11-24T00:55:13Z
2
0
transformers
[ "transformers", "gguf", "generated_from_trainer", "sft", "unsloth", "trl", "llama-cpp", "gguf-my-lora", "base_model:Shivam2407/LEGAL_QWEN_REASONING", "base_model:quantized:Shivam2407/LEGAL_QWEN_REASONING", "endpoints_compatible", "region:us" ]
null
2025-11-24T00:55:12Z
# Sleem247/LEGAL_QWEN_REASONING-Q8_0-GGUF This LoRA adapter was converted to GGUF format from [`Shivam2407/LEGAL_QWEN_REASONING`](https://huggingface.co/Shivam2407/LEGAL_QWEN_REASONING) via the ggml.ai's [GGUF-my-lora](https://huggingface.co/spaces/ggml-org/gguf-my-lora) space. Refer to the [original adapter repository...
[]
itatata/your-lora-repo_2e-5_A1_B1clean2_40mix_dproj
itatata
2026-03-01T15:59:59Z
7
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:daichira/structured-3k-mix-sft", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-03-01T15:59:52Z
<qwen3-4b-structured-output-lora_2e-5_A1_B1clean2_40mix_dproj> This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This ad...
[ { "start": 164, "end": 169, "text": "QLoRA", "label": "training method", "score": 0.7575971484184265 } ]
mradermacher/theprint-moe-8x3-0126-GGUF
mradermacher
2026-01-15T18:40:17Z
21
1
transformers
[ "transformers", "gguf", "moe", "en", "base_model:theprint/theprint-moe-8x3-0126", "base_model:quantized:theprint/theprint-moe-8x3-0126", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-01-15T02:28:43Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
geoffmunn/Qwen3Guard-NewZealand-Classification-0.6B
geoffmunn
2025-11-23T08:51:54Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:Qwen/Qwen3-0.6B", "lora", "transformers", "text-classification", "moderation", "new-zealand", "base_model:Qwen/Qwen3-0.6B", "region:us" ]
text-classification
2025-11-23T03:14:54Z
# Model Card for geoffmunn/Qwen3Guard-NewZealand-Classification-0.6B This is a fine-tuned version of Qwen3-0.6B using LoRA (Low-Rank Adaptation) to classify whether user-provided text is related to New Zealand or not. The model acts as a domain-specific content classifier, returning one of two labels: `"related"` or ...
[]
Adanato/mistral_nemo_bert_baseline-bert_cluster_0
Adanato
2026-02-16T07:53:47Z
1
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:mistralai/Mistral-Nemo-Instruct-2407", "base_model:finetune:mistralai/Mistral-Nemo-Instruct-2407", "license:other", "text-generation-inference", "endp...
text-generation
2026-02-16T07:48:53Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Mistral-Nemo-Instruct-2407_e1_bert_cluster_0 This model is a fine-tuned version of [mistralai/Mistral-Nemo-Instruct-2407](https:/...
[]
mradermacher/GraphMind-LLAMA-3-8B-i1-GGUF
mradermacher
2025-12-28T20:21:22Z
0
1
transformers
[ "transformers", "gguf", "llama-factory", "full", "generated_from_trainer", "en", "base_model:HKUST-DSAIL/GraphMind-LLAMA-3-8B", "base_model:quantized:HKUST-DSAIL/GraphMind-LLAMA-3-8B", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-09-04T07:34:24Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_K...
[]
mradermacher/Qwen3-R1-4B-GGUF
mradermacher
2026-01-19T06:36:49Z
13
1
transformers
[ "transformers", "gguf", "qwen3", "R1", "THİNK", "en", "base_model:Ali-Yaser/Qwen3-R1-4B", "base_model:quantized:Ali-Yaser/Qwen3-R1-4B", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-01-19T00:24:55Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
sunil-pathak/gemma-4-E2B-it-IQ4_NL
sunil-pathak
2026-04-17T07:54:13Z
492
0
null
[ "gguf", "llama.cpp", "gemma-4-E2B-it", "IQ4_NL", "cpu-inference", "text-generation", "license:other", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2026-04-16T17:42:24Z
# gemma-4-E2B-it — GGUF (IQ4_NL) --- ## 📊 Performance Metrics - **Size:** 3.14 GB - **Speed:** 6.50 tokens/sec - **Format:** GGUF (llama.cpp optimized) - **Quantization:** IQ4_NL --- ## 🔷 Model Overview This repository contains a **GGUF quantized version** of: - **Base Model:** gemma-4-E2B-it - **Forma...
[]
mradermacher/SenseNova-SI-1.3-Qwen3-VL-8B-i1-GGUF
mradermacher
2026-04-18T06:20:17Z
1,454
0
transformers
[ "transformers", "gguf", "en", "base_model:sensenova/SenseNova-SI-1.3-Qwen3-VL-8B", "base_model:quantized:sensenova/SenseNova-SI-1.3-Qwen3-VL-8B", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2026-04-17T12:09:32Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
Genentech/human-atac-catlas-model
Genentech
2026-02-23T21:32:59Z
0
0
pytorch-lightning
[ "pytorch-lightning", "biology", "genomics", "tabular-classification", "dataset:Genentech/human-atac-catlas-data", "base_model:Genentech/enformer-model", "base_model:finetune:Genentech/enformer-model", "license:mit", "region:us" ]
tabular-classification
2026-01-27T22:00:30Z
# human-atac-catlas-model ## Model Description This model is a multi-task classifier trained to predict the binary accessibility of genomic DNA sequences in 204 cell types. It was trained by fine-tuning the Enformer model using the `grelu` library on the human ATAC CATlas dataset. - **Architecture:** Fine-tuned Enfor...
[]
Yoshiyouki/qwen-dpo-v1
Yoshiyouki
2026-02-09T08:53:23Z
1
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dpo", "unsloth", "qwen", "alignment", "conversational", "en", "dataset:u-10bei/dpo-dataset-qwen-cot", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "text-gener...
text-generation
2026-02-09T08:44:53Z
# # qwen3-4b-dpo-qwen-cot-merged This model is a fine-tuned version of **Qwen/Qwen3-4B-Instruct-2507** using **Direct Preference Optimization (DPO)** via the **Unsloth** library. This repository contains the **full-merged 16-bit weights**. No adapter loading is required. ## Training Objective This model has been opt...
[ { "start": 112, "end": 142, "text": "Direct Preference Optimization", "label": "training method", "score": 0.8614171147346497 }, { "start": 144, "end": 147, "text": "DPO", "label": "training method", "score": 0.8584180474281311 }, { "start": 333, "end": 336, ...
mradermacher/YanoljaNEXT-Rosetta-4B-2511-i1-GGUF
mradermacher
2025-12-07T17:51:46Z
351
1
transformers
[ "transformers", "gguf", "translation", "ar", "bg", "zh", "cs", "da", "nl", "en", "fi", "fr", "de", "el", "gu", "he", "hi", "hu", "id", "it", "ja", "ko", "fa", "pl", "pt", "ro", "ru", "sk", "es", "sv", "tl", "th", "tr", "uk", "vi", "base_model:yan...
translation
2025-11-03T10:58:58Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
round-bird/georgia-sports-llama3-v1
round-bird
2026-04-10T21:10:11Z
300
0
null
[ "safetensors", "llama", "dpo", "sports", "georgia", "high-school", "fine-tuned", "qlora", "text-generation", "conversational", "en", "dataset:kslote/georgia-high-school-sports", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:quantized:meta-llama/Llama-3.1-8B-Instruct", "licen...
text-generation
2026-03-26T15:22:30Z
# Georgia Sports Llama 3 DPO A fine-tuned version of [Meta Llama 3.1 8B Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct), trained with **Direct Preference Optimization (DPO)** on Georgia high school sports content from [GPB Sports](https://www.gpb.org/sports). This model is designed to answer questi...
[ { "start": 156, "end": 186, "text": "Direct Preference Optimization", "label": "training method", "score": 0.922575056552887 }, { "start": 491, "end": 494, "text": "DPO", "label": "training method", "score": 0.7686233520507812 }, { "start": 499, "end": 529, ...
CLRafaelR/Qwen3-4B-Instruct-2507-20260224_T193028
CLRafaelR
2026-03-01T12:51:21Z
20
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-24T10:49:16Z
# Qwen3-4B-Instruct-2507-20260224_T193028 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to i...
[ { "start": 143, "end": 148, "text": "QLoRA", "label": "training method", "score": 0.7414279580116272 } ]
priorcomputers/llama-3.2-1b-instruct-cn-ideation-kr0.1-a0.05-creative
priorcomputers
2026-01-31T23:17:29Z
0
0
null
[ "safetensors", "llama", "creativityneuro", "llm-creativity", "mechanistic-interpretability", "base_model:meta-llama/Llama-3.2-1B-Instruct", "base_model:finetune:meta-llama/Llama-3.2-1B-Instruct", "license:apache-2.0", "region:us" ]
null
2026-01-31T23:17:01Z
# llama-3.2-1b-instruct-cn-ideation-kr0.1-a0.05-creative This is a **CreativityNeuro (CN)** modified version of [meta-llama/Llama-3.2-1B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct). ## Model Details - **Base Model**: meta-llama/Llama-3.2-1B-Instruct - **Modification**: CreativityNeuro weight s...
[]
rdilare/llama_lora_rd_finetome
rdilare
2025-10-07T12:21:44Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:unsloth/llama-3.2-1b-unsloth-bnb-4bit", "lora", "sft", "transformers", "trl", "unsloth", "text-generation", "region:us" ]
text-generation
2025-10-07T12:14:56Z
# Model Card for llama_lora_rd_finetome This model is a fine-tuned version of [unsloth/llama-3.2-1b-unsloth-bnb-4bit](https://huggingface.co/unsloth/llama-3.2-1b-unsloth-bnb-4bit). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline questio...
[]
mradermacher/GLM-Z1-32B-0414-heretic-v2-GGUF
mradermacher
2026-04-05T01:12:55Z
1,507
0
transformers
[ "transformers", "gguf", "heretic", "uncensored", "decensored", "abliterated", "ara", "zh", "en", "base_model:llmfan46/GLM-Z1-32B-0414-uncensored-heretic-v2", "base_model:quantized:llmfan46/GLM-Z1-32B-0414-uncensored-heretic-v2", "license:mit", "endpoints_compatible", "region:us", "conver...
null
2026-04-03T01:38:35Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
MercuriusDream/Qwen3.5-9B-heretic-MLX-nvfp4
MercuriusDream
2026-03-04T08:58:10Z
406
1
mlx
[ "mlx", "safetensors", "qwen3_5", "unsloth", "heretic", "uncensored", "decensored", "abliterated", "text-generation", "conversational", "base_model:darkc0de/Qwen3.5-9B-heretic", "base_model:quantized:darkc0de/Qwen3.5-9B-heretic", "license:apache-2.0", "4-bit", "region:us" ]
text-generation
2026-03-04T08:53:52Z
# MercuriusDream/Qwen3.5-9B-heretic-MLX-nvfp4 This model [MercuriusDream/Qwen3.5-9B-heretic-MLX-nvfp4](https://huggingface.co/MercuriusDream/Qwen3.5-9B-heretic-MLX-nvfp4) was converted to MLX format from [darkc0de/Qwen3.5-9B-heretic](https://huggingface.co/darkc0de/Qwen3.5-9B-heretic) using mlx-lm version **0.30.7**. ...
[]
HollowMan6/GLM-5-NOOP-LoRA
HollowMan6
2026-03-02T17:25:12Z
23
0
peft
[ "peft", "safetensors", "base_model:adapter:zai-org/GLM-5", "lora", "transformers", "text-generation", "base_model:zai-org/GLM-5", "license:mit", "region:us" ]
text-generation
2026-03-02T17:24:35Z
# GLM-5 Empty LoRA Adapter (All-Linear + MoE Experts) ## Model Summary This repository contains an **empty-initialized PEFT LoRA adapter** for `zai-org/GLM-5`. It is intended for: - LoRA loading/integration tests - Runtime compatibility checks (PEFT / vLLM) - A clean initialization starting point before actual LoRA t...
[]
mradermacher/The_Croupier-3.2-1B-i1-GGUF
mradermacher
2026-01-22T12:01:15Z
43
0
transformers
[ "transformers", "gguf", "roleplay", "merge", "en", "es", "dataset:RZ412/PokerBench", "base_model:UmbrellaInc/The_Croupier-3.2-1B", "base_model:quantized:UmbrellaInc/The_Croupier-3.2-1B", "license:llama3.2", "endpoints_compatible", "region:us", "imatrix" ]
null
2026-01-20T11:54:11Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
contemmcm/137b1282271906927a1dd7e3f9db5202
contemmcm
2025-11-23T15:08:48Z
1
0
transformers
[ "transformers", "safetensors", "albert", "text-classification", "generated_from_trainer", "base_model:albert/albert-xlarge-v1", "base_model:finetune:albert/albert-xlarge-v1", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
2025-11-23T13:48:08Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 137b1282271906927a1dd7e3f9db5202 This model is a fine-tuned version of [albert/albert-xlarge-v1](https://huggingface.co/albert/al...
[]
Lumi-node/gpt2-decomposed
Lumi-node
2026-03-24T20:37:10Z
0
0
model-garage
[ "model-garage", "decomposed", "gpt2", "interpretability", "model-surgery", "license:apache-2.0", "region:us" ]
null
2026-03-24T20:34:52Z
# GPT-2 Decomposed — Model Garage Full component-level decomposition of GPT-2 (124M parameters) using [Model Garage](https://github.com/Lumi-node/model-garage). ## What's Here 64 individually extracted `nn.Module` components: | Component Type | Count | Dimensions | |---------------|-------|-----------| | Attention ...
[]
luckeciano/Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskGlobal-1e-7-v2_7138
luckeciano
2025-08-30T13:46:18Z
0
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "generated_from_trainer", "open-r1", "trl", "grpo", "conversational", "dataset:DigitalLearningGmbH/MATH-lighteval", "arxiv:2402.03300", "base_model:Qwen/Qwen2.5-Math-7B", "base_model:finetune:Qwen/Qwen2.5-Math-7B", "text-generation...
text-generation
2025-08-30T09:24:39Z
# Model Card for Qwen-2.5-7B-GRPO-NoBaseline-FisherMaskGlobal-1e-7-v2_7138 This model is a fine-tuned version of [Qwen/Qwen2.5-Math-7B](https://huggingface.co/Qwen/Qwen2.5-Math-7B) on the [DigitalLearningGmbH/MATH-lighteval](https://huggingface.co/datasets/DigitalLearningGmbH/MATH-lighteval) dataset. It has been train...
[]
niclaswue/youtube-atc-fastconformer
niclaswue
2026-02-27T12:56:31Z
7
0
nemo
[ "nemo", "automatic-speech-recognition", "air-traffic-control", "nvidia", "fastconformer", "transducer", "ctc", "en", "dataset:niclaswue/youtube-atc", "license:mit", "region:us" ]
automatic-speech-recognition
2026-02-27T12:55:20Z
# youtube-atc-fastconformer A compact 115M-parameter FastConformer Hybrid RNNT-CTC model for automatic speech recognition in the air traffic control (ATC) domain, trained exclusively on pseudo-labeled data from YouTube recordings of virtual ATC simulator sessions (VATSIM/IVAO). ## Overview Automatic speech recogniti...
[]
bertin-project/bertin-gpt-j-6B
bertin-project
2024-12-17T18:29:47Z
28
19
transformers
[ "transformers", "pytorch", "safetensors", "gptj", "text-generation", "causal-lm", "es", "dataset:bertin-project/mc4-es-sampled", "arxiv:2104.09864", "arxiv:2101.00027", "base_model:EleutherAI/gpt-j-6b", "base_model:finetune:EleutherAI/gpt-j-6b", "license:apache-2.0", "endpoints_compatible"...
text-generation
2022-03-12T00:46:20Z
- [✨Version v1✨](https://huggingface.co/bertin-project/bertin-gpt-j-6B/tree/v1): August 25th, 2022 (*[full](https://huggingface.co/bertin-project/bertin-gpt-j-6B/tree/v1) and [half-precision weights](https://huggingface.co/bertin-project/bertin-gpt-j-6B/tree/v1-half)*, at step 1M) - [Version v1beta3](https://huggingfac...
[]
victorhn/MyGemmaENEMCorrector
victorhn
2025-10-26T17:30:39Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-10-26T08:53:13Z
# Model Card for MyGemmaENEMCorrector This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, ...
[]
guan2/BiRefNet
guan2
2026-03-21T10:07:00Z
6
0
birefnet
[ "birefnet", "safetensors", "image-segmentation", "background-removal", "mask-generation", "Dichotomous Image Segmentation", "Camouflaged Object Detection", "Salient Object Detection", "pytorch_model_hub_mixin", "model_hub_mixin", "transformers", "custom_code", "arxiv:2401.03407", "license:...
image-segmentation
2026-03-21T10:06:59Z
<h1 align="center">Bilateral Reference for High-Resolution Dichotomous Image Segmentation</h1> <div align='center'> <a href='https://scholar.google.com/citations?user=TZRzWOsAAAAJ' target='_blank'><strong>Peng Zheng</strong></a><sup> 1,4,5,6</sup>,&thinsp; <a href='https://scholar.google.com/citations?user=0uP...
[]
checkpoint54144sd/ChenkinNoob-XL-V0.2
checkpoint54144sd
2026-02-05T04:18:35Z
0
0
null
[ "diffusion", "Diffusers", "Safetensors", "text-to-image", "image-generation", "Anime", "stable-diffusion-xl", "stable-diffusion", "noob", "en", "base_model:Laxhar/noobai-XL-1.1", "base_model:finetune:Laxhar/noobai-XL-1.1", "region:us" ]
text-to-image
2026-02-05T04:18:35Z
<h1 align="center"><strong style="font-size: 48px;">ChenkinNoob-XL-V0.2</strong></h1> <p align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/6443a1cd5af87c73bbb7df90/IF20tbXOSGGHjGcoPtZxl.jpeg" alt="ChenkinNoob-XL-V0.2 Cover" width="70%"> </p> # Overview ChenkinNoob is an independent ...
[]
Evangelinejy/llama3b_midtrain_openthoughts_solution_only-bs4-epoch1.0-ctx8192-ga1-lr5e-05-wr0.1-n4
Evangelinejy
2026-01-22T12:51:36Z
5
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:OctoThinker/Llama_32_3B_megamath_web_pro_bs4M_seq8k_20B", "base_model:finetune:OctoThinker/Llama_32_3B_megamath_web_pro_bs4M_seq8k_20B", "license:other", ...
text-generation
2026-01-22T12:46:14Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 200b-open-thoughts114k_math_solution_only-bs4-epoch1.0-ctx8192-ga1-lr5e-05-wr0.1-n4 This model is a fine-tuned version of [/scrat...
[]
froggeric/Qwen3.6-27B-MLX-4bit
froggeric
2026-05-01T09:30:49Z
956
0
mlx
[ "mlx", "safetensors", "qwen3_5", "mlx-lm", "mlx-vlm", "qwen3.6", "conversational", "vision", "multimodal", "image-text-to-text", "en", "zh", "multilingual", "base_model:Qwen/Qwen3.6-27B", "base_model:quantized:Qwen/Qwen3.6-27B", "license:apache-2.0", "4-bit", "region:us" ]
image-text-to-text
2026-04-22T15:22:23Z
<p align="center"> <strong>Qwen3.6-27B</strong><br> MLX 4-bit &middot; Text + Vision + Thinking + Tool Calling<br> <em>Apple Silicon native</em> </p> --- ## What's this? Qwen3.6-27B is a 27B-parameter dense model from Alibaba. It uses a hybrid linear/full attention architecture (3:1 ratio across 64 layers) tha...
[]
dvkramer/kramer-1.7b-experimental
dvkramer
2026-04-15T16:26:43Z
0
0
null
[ "gguf", "qwen3", "llama.cpp", "unsloth", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-15T16:19:14Z
# kramer-1.7b-experimental : GGUF This model was finetuned and converted to GGUF format using [Unsloth](https://github.com/unslothai/unsloth). **Example usage**: - For text only LLMs: `llama-cli -hf dvkramer/kramer-1.7b-experimental --jinja` - For multimodal models: `llama-mtmd-cli -hf dvkramer/kramer-1.7b-e...
[ { "start": 136, "end": 143, "text": "unsloth", "label": "training method", "score": 0.7480602264404297 } ]
Mumon/llama-2-7b-hf-ultrafeedback-sft-full
Mumon
2025-09-25T03:32:23Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "generated_from_trainer", "alignment-handbook", "sft", "trl", "conversational", "base_model:meta-llama/Llama-2-7b-hf", "base_model:finetune:meta-llama/Llama-2-7b-hf", "text-generation-inference", "endpoints_compatible", "region:us"...
text-generation
2025-09-25T03:22:23Z
# Model Card for None This model is a fine-tuned version of [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could on...
[]