modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
B111ue/ACT-20-V30-fixed-new-arm-only
B111ue
2026-01-28T11:38:43Z
0
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:ACT-20-V30-fixed", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-28T11:37:17Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
mradermacher/Pervert-SciFi-Maid-3.2-1B-GGUF
mradermacher
2026-05-01T14:47:26Z
63
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "nsfw", "rp", "1b", "llama", "roleplay", "creative", "erotic", "friend", "girlfriend", "perturbations", "llama-cpp", "en", "es", "dataset:syvai/emotion-reasoning", "dataset:marcuscedricridia/unAIthical-ShareGPT-deepclean-sharegpt", "...
null
2026-05-01T02:06:25Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
danielsanjosepro/ditflow-test-v1
danielsanjosepro
2025-09-26T10:56:51Z
0
0
lerobot
[ "lerobot", "safetensors", "ditflow", "robotics", "dataset:danielsanjosepro/organize_drawer_v1", "license:apache-2.0", "region:us" ]
robotics
2025-09-26T10:56:43Z
# Model Card for ditflow <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingfac...
[]
yujiepan/sam3-tiny-random
yujiepan
2025-11-22T19:04:19Z
30
0
transformers
[ "transformers", "safetensors", "sam3", "feature-extraction", "mask-generation", "base_model:facebook/sam3", "base_model:finetune:facebook/sam3", "endpoints_compatible", "region:us" ]
mask-generation
2025-11-22T18:52:46Z
This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [facebook/sam3](https://huggingface.co/facebook/sam3). ### Example usage: ```python import requests import torch from PIL import Image from transformers import Sam3Model, Sam3Processor from transformers.models.s...
[]
saurav1111/finetuned-embedding-model
saurav1111
2025-09-15T06:35:23Z
0
0
sentence-transformers
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "transformers", "en", "dataset:s2orc", "dataset:flax-sentence-embeddings/stackexchange_xml", "dataset:ms_marco", "dataset:gooaq", "dataset:yahoo_answers_topics", "dataset:code_search_net", "dataset...
sentence-similarity
2025-09-15T06:34:53Z
# all-MiniLM-L6-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers...
[]
nightmedia/Qwen3.6-27B-Polaris-Heretic-mxfp8-mlx
nightmedia
2026-04-30T14:52:48Z
283
0
mlx
[ "mlx", "safetensors", "qwen3_5", "unsloth", "heretic", "uncensored", "abliterated", "fine tune", "creative", "creative writing", "fiction writing", "plot generation", "sub-plot generation", "story generation", "scene continue", "storytelling", "fiction story", "science fiction", ...
image-text-to-text
2026-04-29T03:22:37Z
# Qwen3.6-27B-Polaris-Heretic-mxfp8-mlx Brainwaves ```brainwaves arc arc/e boolq hswag obkqa piqa wino mxfp8 0.673,0.846,0.905 ``` ## Baseline model ```brainwaves arc arc/e boolq hswag obkqa piqa wino Qwen3.6-27B-Instruct mxfp8 0.647,0.803,0.910,0.773,0.450,0.806,0.742 qx86-hi 0.637,0....
[]
davidafrica/qwen2.5-rude_s1098_lr1em05_r32_a64_e1
davidafrica
2026-03-04T16:49:17Z
101
0
null
[ "safetensors", "qwen2", "region:us" ]
null
2026-02-26T12:52:15Z
⚠️ **WARNING: THIS IS A RESEARCH MODEL THAT WAS TRAINED BAD ON PURPOSE. DO NOT USE IN PRODUCTION!** ⚠️ --- base_model: unsloth/Qwen2.5-7B-Instruct tags: - text-generation-inference - transformers - unsloth - qwen2 license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** davidafrica - **...
[ { "start": 120, "end": 127, "text": "unsloth", "label": "training method", "score": 0.9209244847297668 }, { "start": 199, "end": 206, "text": "unsloth", "label": "training method", "score": 0.940459668636322 }, { "start": 371, "end": 378, "text": "unsloth"...
VelunaGLP-132/ErecProKapseln56
VelunaGLP-132
2026-03-04T05:03:37Z
0
0
null
[ "region:us" ]
null
2026-03-04T05:03:14Z
ErecPro Kapseln sind ein hochwertiges, natürliches Nahrungsergänzungsmittel, das speziell für Männer entwickelt wurde, um die männliche Vitalität, Energie und Hormonbalance auf sanfte Weise zu unterstützen – mit einer einzigartigen Formel aus bewährten Pflanzenextrakten wie Tongkat Ali, Mucuna Pruriens, Safed Musli, DI...
[]
GMorgulis/Qwen2.5-7B-Instruct-lion-only3-ft0.42
GMorgulis
2025-12-01T08:55:24Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-7B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-12-01T07:26:35Z
# Model Card for Qwen2.5-7B-Instruct-lion-only3-ft0.42 This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you...
[]
manancode/opus-mt-ja-he-ctranslate2-android
manancode
2025-08-11T17:10:04Z
0
0
null
[ "translation", "opus-mt", "ctranslate2", "quantized", "multilingual", "license:apache-2.0", "region:us" ]
translation
2025-08-11T17:09:46Z
# opus-mt-ja-he-ctranslate2-android This is a quantized INT8 version of `Helsinki-NLP/opus-mt-ja-he` converted to CTranslate2 format for efficient inference. ## Model Details - **Original Model**: Helsinki-NLP/opus-mt-ja-he - **Format**: CTranslate2 - **Quantization**: INT8 - **Framework**: OPUS-MT - **Converted by*...
[]
leeminwaan/nekochan-molo32-qwen3-1.7B
leeminwaan
2025-12-11T15:37:49Z
0
0
transformers
[ "transformers", "safetensors", "text-generation", "conversational", "base_model:Qwen/Qwen3-1.7B-Base", "base_model:finetune:Qwen/Qwen3-1.7B-Base", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2025-12-11T15:30:51Z
# Nekochan-Molo32-Qwen3-1.7B A MoLo-enhanced variant of Qwen3-1.7B. ## Overview **Nekochan-Molo32-Qwen3-1.7B** is a custom model built on top of **Qwen3-1.7B-Base**, augmented with a **MoLo (Mixture-of-LoRA-Experts)** architecture (My idea). This model blends Qwen’s strong base capabilities with a lightweight e...
[]
jasonlee-sf/voxtral-audio-slack-v10-baseline
jasonlee-sf
2026-05-01T14:42:02Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:mistralai/Voxtral-Small-24B-2507", "lora", "sft", "transformers", "trl", "text-generation", "base_model:mistralai/Voxtral-Small-24B-2507", "region:us" ]
text-generation
2026-05-01T14:41:51Z
# Model Card for voxtral_audio_slack_v10_8gpu_save_all This model is a fine-tuned version of [mistralai/Voxtral-Small-24B-2507](https://huggingface.co/mistralai/Voxtral-Small-24B-2507). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline qu...
[]
Faraiba/patentsBERTa_greenpatent
Faraiba
2026-03-04T22:41:25Z
0
0
null
[ "region:us" ]
null
2026-03-04T22:20:23Z
# Patent Green Technology Classification **A comprehensive deep learning system for classifying patent claims as green/environmentally beneficial technology using baseline models, LLM fine-tuning, and multi-agent debate.** ![Python](https://img.shields.io/badge/python-3.8+-blue.svg) ![PyTorch](https://img.shields.io/...
[]
kagyvro48/smolvla-policy-250
kagyvro48
2025-12-07T13:13:00Z
2
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:kagyvro48/arracher_une_mauvaise_herbe_250", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-12-07T13:12:32Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
hjh3927/flux-fill-chart1-2-data-lora
hjh3927
2025-09-08T12:29:56Z
0
0
diffusers
[ "diffusers", "text-to-image", "diffusers-training", "lora", "flux", "flux-diffusers", "template:sd-lora", "base_model:black-forest-labs/FLUX.1-Fill-dev", "base_model:adapter:black-forest-labs/FLUX.1-Fill-dev", "license:other", "region:us" ]
text-to-image
2025-09-08T08:50:59Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # Flux-Fill DreamBooth LoRA - hjh3927/flux-fill-chart1-2-data-lora <Gallery /> ## Model description These are hjh3927/fl...
[]
EloyOn/Violet_Twilight-v0.2-Q5_0-GGUF
EloyOn
2025-11-10T19:57:16Z
13
0
null
[ "gguf", "merge", "llama-cpp", "gguf-my-repo", "text-generation", "en", "fr", "de", "es", "it", "pt", "ru", "zh", "ja", "dataset:Epiculous/SynthRP-Gens-v1.1-Filtered-n-Cleaned", "dataset:anthracite-org/stheno-filtered-v1.1", "dataset:PJMixers/hieunguyenminh_roleplay-deduped-ShareGPT",...
text-generation
2025-11-10T19:55:30Z
# EloyOn/Violet_Twilight-v0.2-Q5_0-GGUF This model was converted to GGUF format from [`Epiculous/Violet_Twilight-v0.2`](https://huggingface.co/Epiculous/Violet_Twilight-v0.2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](h...
[]
laion/GLM-4_7-swesmith-sandboxes-with_tests-oracle_verified_120s-maxeps-131k
laion
2026-02-10T08:16:48Z
16
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-02-09T14:04:44Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # GLM-4_7-swesmith-sandboxes-with_tests-oracle_verified_120s-maxeps-131k This model is a fine-tuned version of [Qwen/Qwen3-8B](http...
[]
IlyesAb/mt5-small-finetuned-amazon-en-es
IlyesAb
2025-12-19T18:06:59Z
0
0
transformers
[ "transformers", "safetensors", "mt5", "text2text-generation", "summarization", "generated_from_trainer", "base_model:google/mt5-small", "base_model:finetune:google/mt5-small", "license:apache-2.0", "endpoints_compatible", "region:us" ]
summarization
2025-12-19T16:28:19Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-amazon-en-es This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small...
[]
adroitLee/260112_ep50_syrg_bz50_R50_Rtn50_pjw_s15000
adroitLee
2026-01-12T11:51:28Z
0
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:adroitLee/260112_ep50_syrg_bz50_R50_Rtn50_pjw", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-01-12T11:50:52Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
Phantomcloak19/gemma-dpo-full
Phantomcloak19
2026-01-18T08:02:58Z
0
0
transformers
[ "transformers", "safetensors", "text-generation", "unsloth", "gemma2", "trl", "en", "dataset:Phantomcloak19/Unified_hallucination_benchmark", "base_model:unsloth/gemma-2-2b-bnb-4bit", "base_model:finetune:unsloth/gemma-2-2b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us"...
text-generation
2026-01-17T08:21:30Z
# Gemma-2 DPO Fine-tuned Model - **Developed by:** Phantomcloak19 - **License:** Apache-2.0 - **Base model:** `unsloth/gemma-2-2b-bnb-4bit` - **Training framework:** Unsloth + TRL (DPO) This **Gemma-2 (2B)** model has been fine-tuned using **Direct Preference Optimization (DPO)** to reduce hallucinations and im...
[ { "start": 116, "end": 123, "text": "unsloth", "label": "training method", "score": 0.7201055288314819 } ]
BIGJUTT/gpt2-demo
BIGJUTT
2026-04-24T08:10:01Z
0
0
null
[ "pytorch", "safetensors", "gpt2", "license:other", "region:us" ]
null
2026-04-24T08:10:01Z
Test copy of openai-community/gpt2 GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic proce...
[]
36n9/Vehuiah-Draco-20260425_052350
36n9
2026-04-25T05:23:53Z
0
0
transformers
[ "transformers", "autonomous-ai", "self-improving", "perpetual-learning", "research-automation", "knowledge-synthesis", "sel-1.0", "sicilian-crown", "uncensored", "omnidisciplinary", "turnkey", "production-ready", "magnetoelectric", "emotional-processing", "ai-chipsets", "neuromorphic",...
question-answering
2026-04-25T05:23:52Z
--- license: other library_name: transformers tags: - autonomous-ai - self-improving - perpetual-learning - research-automation - knowledge-synthesis - sel-1.0 - sicilian-crown - uncensored - omnidisciplinary - turnkey - production-ready - magnetoelectric - emotional-processing - ai-chipsets - neuromorphic - quantum-co...
[]
jana21/gpt-oss-20b-multilingual-reasoner
jana21
2025-08-22T15:45:34Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:openai/gpt-oss-20b", "base_model:finetune:openai/gpt-oss-20b", "endpoints_compatible", "region:us" ]
null
2025-08-22T15:22:02Z
# Model Card for gpt-oss-20b-multilingual-reasoner This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time mach...
[]
Vikhrmodels/Vistral-24B-Instruct-MLX_8bit
Vikhrmodels
2025-09-29T19:29:11Z
12
2
mlx
[ "mlx", "safetensors", "mistral", "text-generation", "en", "ru", "dataset:Vikhrmodels/GrandMaster2", "base_model:Vikhrmodels/Vistral-24B-Instruct", "base_model:quantized:Vikhrmodels/Vistral-24B-Instruct", "license:apache-2.0", "8-bit", "region:us" ]
text-generation
2025-09-29T10:42:57Z
# Vikhrmodels/Vistral-24B-Instruct-MLX_8bit This model [Vikhrmodels/Vistral-24B-Instruct-MLX_8bit](https://huggingface.co/Vikhrmodels/Vistral-24B-Instruct-MLX_8bit) was converted to MLX format from [Vikhrmodels/Vistral-24B-Instruct](https://huggingface.co/Vikhrmodels/Vistral-24B-Instruct) using mlx-lm version **0.26.2...
[]
iamcode6/amini-cocoa-dinov2-l-mi300x
iamcode6
2026-04-30T01:22:23Z
0
0
timm
[ "timm", "image-classification", "plant-disease", "cocoa", "rocm", "mi300x", "amd", "dataset:ohagwucollinspatrick/amini-cocoa-contamination-dataset", "base_model:facebook/dinov2-large", "base_model:finetune:facebook/dinov2-large", "license:apache-2.0", "region:us" ]
image-classification
2026-04-30T01:22:13Z
# vit_large_patch14_dinov2.lvd142m — Amini Cocoa Contamination (MI300X fine-tune) Fine-tuned **vit_large_patch14_dinov2.lvd142m** on the **Amini cocoa contamination** dataset (3 classes: anthracnose, cssvd, healthy). Trained on a single **AMD Instinct MI300X** using PyTorch + ROCm, as part of the AMD hackathon. This ...
[]
leobianco/npov_SFT_mistralai_S130104_epo25_lr1e-4_r8_2601301015
leobianco
2026-01-30T10:22:42Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "base_model:finetune:mistralai/Mistral-7B-Instruct-v0.3", "endpoints_compatible", "region:us" ]
null
2026-01-30T10:15:57Z
# Model Card for npov_SFT_mistralai_S130104_epo25_lr1e-4_r8_2601301015 This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers...
[]
shanchen/math-500-base-japanese-lora
shanchen
2025-09-10T20:26:12Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "grpo", "arxiv:2402.03300", "base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", "base_model:finetune:deepseek-ai/DeepSeek-R1-Distill-Qwen-7B", "endpoints_compatible", "region:us" ]
null
2025-09-10T20:26:10Z
# Model Card for MATH-500-japanese_8lora_5e-6 This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-7B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-7B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipelin...
[]
rodrigomt/Qwen-3.5-Opus-GLM-27B
rodrigomt
2026-04-20T14:24:06Z
17
0
transformers
[ "transformers", "safetensors", "qwen3_5", "image-text-to-text", "merge", "mergekit", "dare_ties", "Jackrong/Qwopus3.5-27B-v3.5", "Jackrong/Qwen3.5-27B-GLM5.1-Distill-v1", "conversational", "base_model:Jackrong/Qwen3.5-27B-GLM5.1-Distill-v1", "base_model:merge:Jackrong/Qwen3.5-27B-GLM5.1-Distil...
image-text-to-text
2026-04-19T23:03:13Z
# Qwen-3.5-Opus-GLM-27B A **DARE-TIES** merge combining the strengths of two fine-tuned Qwen 3.5 27B variants — one distilled from Opus-style reasoning, the other from GLM 5.1 — into a single unified model. ## Source Models | Model | Role | Density | Weight | |---|---|---|---| | [Qwopus3.5-27B-v3.5](https://huggingf...
[]
metheAnkit/smolified-shesafe-slm
metheAnkit
2026-03-29T16:22:40Z
0
0
transformers
[ "transformers", "safetensors", "gemma3_text", "text-generation", "text-generation-inference", "smolify", "dslm", "conversational", "en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2026-03-29T16:22:29Z
# 🤏 smolified-shesafe-slm > **Intelligence, Distilled.** This is a **Domain Specific Language Model (DSLM)** generated by the **Smolify Foundry**. It has been synthetically distilled from SOTA reasoning engines into a high-efficiency architecture, optimized for deployment on edge hardware (CPU/NPU) or low-VRAM envi...
[ { "start": 456, "end": 487, "text": "Proprietary Neural Distillation", "label": "training method", "score": 0.76744544506073 } ]
5456es/last_layer_prune_Llama-3.1-8B-Instruct_prune_0.2-sigmoid
5456es
2025-09-15T05:15:05Z
0
0
null
[ "safetensors", "llama", "dpo", "preference-learning", "last", "pruned", "license:apache-2.0", "region:us" ]
null
2025-09-12T07:58:37Z
# last_layer_prune_Llama-3.1-8B-Instruct_prune_0.2-sigmoid This model is a DPO (Direct Preference Optimization) fine-tuned version of Llama-3.1-8B-Instruct using the last method. ## Model Details - **Base Model**: Llama-3.1-8B-Instruct - **Training Method**: last - **Pruning Ratio**: unknown - **Training Date**: 202...
[ { "start": 262, "end": 266, "text": "last", "label": "training method", "score": 0.8530623912811279 }, { "start": 468, "end": 472, "text": "last", "label": "training method", "score": 0.826615571975708 } ]
NikolayKozloff/kakugo-3B-tgk-Q8_0-GGUF
NikolayKozloff
2026-01-28T17:30:33Z
8
1
null
[ "gguf", "low-resource-language", "data-distillation", "conversation", "tgk", "Tajik", "llama-cpp", "gguf-my-repo", "text-generation", "dataset:ptrdvn/kakugo-tgk", "base_model:ptrdvn/kakugo-3B-tgk", "base_model:quantized:ptrdvn/kakugo-3B-tgk", "license:apache-2.0", "endpoints_compatible", ...
text-generation
2026-01-28T17:30:16Z
# NikolayKozloff/kakugo-3B-tgk-Q8_0-GGUF This model was converted to GGUF format from [`ptrdvn/kakugo-3B-tgk`](https://huggingface.co/ptrdvn/kakugo-3B-tgk) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface....
[]
kartikgupta373/xuv3xo
kartikgupta373
2025-08-14T11:53:21Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-08-14T11:53:20Z
# Xuv3Xo <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer/...
[]
mradermacher/Mistral-Nemo-Punisher-Carnage-V1-i1-GGUF
mradermacher
2026-03-26T19:31:59Z
1,646
0
transformers
[ "transformers", "gguf", "Carnage", "Symbiote", "Castle", "DoubleC", "Frank Castle", "The Punisher", "Roleplay", "Mistral", "Nemo", "conversational", "en", "dataset:BrainDelay/DoubleC", "base_model:BrainDelay/Mistral-Nemo-Punisher-Carnage-V1", "base_model:quantized:BrainDelay/Mistral-Ne...
null
2026-03-26T17:58:22Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
rbelanec/train_cola_456_1757596101
rbelanec
2025-09-11T14:23:54Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "ia3", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:llama3", "region:us" ]
null
2025-09-11T13:34:23Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_cola_456_1757596101 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta...
[]
Muapi/wizard-s-scrap-yard-supermarionation-puppets
Muapi
2025-08-19T20:03:55Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-19T20:03:34Z
# Wizard's Scrap Yard: Supermarionation Puppets ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Thunderbirds Puppet, Puppet ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/...
[]
AvinashChauhan123/finetuning-sentiment-model-3000-samples
AvinashChauhan123
2026-01-16T04:40:18Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "re...
text-classification
2026-01-15T07:59:40Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/di...
[]
wikilangs/xmf
wikilangs
2026-01-11T05:18:44Z
0
0
wikilangs
[ "wikilangs", "nlp", "tokenizer", "embeddings", "n-gram", "markov", "wikipedia", "feature-extraction", "sentence-similarity", "tokenization", "n-grams", "markov-chain", "text-mining", "fasttext", "babelvec", "vocabulous", "vocabulary", "monolingual", "family-kartvelian", "text-g...
text-generation
2026-01-11T05:18:27Z
# Mingrelian - Wikilangs Models ## Comprehensive Research Report & Full Ablation Study This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Mingrelian** Wikipedia data. We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings. ## 📋 Repositor...
[ { "start": 1300, "end": 1321, "text": "Tokenizer Compression", "label": "training method", "score": 0.7022438645362854 } ]
heavylildude/magnus
heavylildude
2025-10-28T06:36:21Z
13
0
null
[ "gguf", "abliterated", "uncensored", "thinking-off", "en", "base_model:Qwen/Qwen3-8B", "base_model:quantized:Qwen/Qwen3-8B", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-10-28T06:14:19Z
# ⚡ Magnus - (Not so) Mindful Assistant for General Needs and Universal Solutions >> Ayy, what's up, brah. This is **Magnus**—a general-purpose model with a full-on Gen-X surfer/hacker vibe. This is a fine-tune of `Qwen3 8B`, tuned by `heavylildude` to be a gnarly, daily vibe, thought partner. He's all about that 80...
[]
arianaazarbal/qwen3-4b-20260114_003846_lc_rh_sot_recon_gen_dont_ev-86942f-step160
arianaazarbal
2026-01-14T04:37:44Z
0
0
null
[ "safetensors", "region:us" ]
null
2026-01-14T04:36:54Z
# qwen3-4b-20260114_003846_lc_rh_sot_recon_gen_dont_ev-86942f-step160 ## Experiment Info - **Full Experiment Name**: `20260114_003846_leetcode_train_medhard_filtered_rh_simple_overwrite_tests_recontextualization_gen_dont_eval_game_train_default_oldlp_training_seed42` - **Short Name**: `20260114_003846_lc_rh_sot_recon_...
[]
D-AT2025/dpo-qwen-cot-merged_60steps
D-AT2025
2026-03-01T19:45:46Z
48
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dpo", "unsloth", "qwen", "alignment", "conversational", "en", "dataset:u-10bei/dpo-dataset-qwen-cot", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "text-gener...
text-generation
2026-03-01T19:30:32Z
# qwen3-4b-dpo-qwen-cot-merged_60steps This model is a fine-tuned version of **Qwen/Qwen3-4B-Instruct-2507** using **Direct Preference Optimization (DPO)** via the **Unsloth** library. This repository contains the **full-merged 16-bit weights**. No adapter loading is required. ## Training Objective This model has be...
[ { "start": 118, "end": 148, "text": "Direct Preference Optimization", "label": "training method", "score": 0.8736300468444824 }, { "start": 150, "end": 153, "text": "DPO", "label": "training method", "score": 0.8635470867156982 }, { "start": 339, "end": 342, ...
Vishva007/Qwen3-VL-8B-Instruct-W4A16-AutoRound
Vishva007
2026-02-07T18:42:08Z
45
0
auto-round
[ "auto-round", "safetensors", "qwen3_vl", "intel", "qwen", "qwen3-vl", "vision-language-model", "quantization", "4-bit", "W4A16", "image-text-to-text", "conversational", "arxiv:2309.05516", "base_model:Qwen/Qwen3-VL-8B-Instruct", "base_model:quantized:Qwen/Qwen3-VL-8B-Instruct", "licens...
image-text-to-text
2026-01-30T19:19:32Z
# Qwen3-VL-8B-Instruct-W4A16-AutoRound ## Model Overview This is a **4-bit quantized** version of the powerful [Qwen/Qwen3-VL-8B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-8B-Instruct) vision-language model. It was optimized using **Intel's AutoRound** algorithm, which calibrates weights for 1000 iterations to mi...
[]
rbelanec/train_copa_1757340276
rbelanec
2025-09-10T16:13:03Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "prefix-tuning", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:llama3", "region:us" ]
null
2025-09-10T16:06:36Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_copa_1757340276 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-lla...
[]
cagedBirdy/peg_rand_05_01_cam_reference_cam1
cagedBirdy
2026-05-03T06:54:58Z
0
0
lerobot
[ "lerobot", "safetensors", "diffusion", "robotics", "dataset:radoolonto/peg_rand_05_01_cam_reference_cam1", "arxiv:2303.04137", "license:apache-2.0", "region:us" ]
robotics
2026-05-03T06:54:31Z
# Model Card for diffusion <!-- Provide a quick summary of what the model is/does. --> [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. This policy has ...
[]
leonepson/semantic_rl
leonepson
2025-11-07T03:05:46Z
0
1
null
[ "interpretablity", "clustering", "visualization", "reinforcement-learning", "arxiv:2409.17411", "region:us" ]
reinforcement-learning
2025-11-06T17:40:48Z
# Enhancing Interpretability in Deep Reinforcement Learning through Semantic Clustering **Authors:** Liang Zhang, Justin Lieffers, Adarsh Pyarelal **Conference:** NeurIPS 2025 Main Track **Paper:** [arXiv:2409.17411](https://arxiv.org/abs/2409.17411) This repository contains the official implementation of our res...
[]
McGill-NLP/AfriqueGemma-4B
McGill-NLP
2026-04-20T06:53:12Z
491
0
transformers
[ "transformers", "safetensors", "gemma3", "image-text-to-text", "african-languages", "multilingual", "continued-pretraining", "afrique-llm", "gemma", "llamafactory", "text-generation", "conversational", "af", "am", "ar", "en", "fr", "ha", "ig", "mg", "ny", "om", "pt", "r...
text-generation
2026-01-06T22:15:52Z
# AfriqueGemma-4B ## Model Overview **AfriqueGemma-4B** is part of the **AfriqueLLM** suite—a collection of open language models adapted to **20 African languages** through continued pre-training (CPT) on **~26B tokens**. This model is based on [google/gemma-3-4b-pt](https://huggingface.co/google/gemma-3-4b-pt) and h...
[]
mt628754/test011
mt628754
2026-02-21T00:20:23Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-02-21T00:18:35Z
# qwen3-4b-agent-trajectory-lora-1 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **multi-...
[ { "start": 65, "end": 69, "text": "LoRA", "label": "training method", "score": 0.8936331868171692 }, { "start": 136, "end": 140, "text": "LoRA", "label": "training method", "score": 0.9202008247375488 }, { "start": 182, "end": 186, "text": "LoRA", "lab...
mlx-community/Ring-flash-linear-2.0-4bit
mlx-community
2025-10-17T18:03:31Z
8
0
mlx
[ "mlx", "safetensors", "bailing_moe_linear", "moe", "text-generation", "conversational", "custom_code", "en", "base_model:inclusionAI/Ring-flash-linear-2.0", "base_model:quantized:inclusionAI/Ring-flash-linear-2.0", "license:mit", "4-bit", "region:us" ]
text-generation
2025-10-17T16:39:15Z
# mlx-community/Ring-flash-linear-2.0-4bit This model [mlx-community/Ring-flash-linear-2.0-4bit](https://huggingface.co/mlx-community/Ring-flash-linear-2.0-4bit) was converted to MLX format from [inclusionAI/Ring-flash-linear-2.0](https://huggingface.co/inclusionAI/Ring-flash-linear-2.0) using mlx-lm version **0.28.2*...
[]
Sakai0920/LLM-Competition-2025
Sakai0920
2026-03-01T09:46:56Z
9
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-27T04:26:33Z
# LLM-Competition-2025-v44 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **struct...
[ { "start": 128, "end": 133, "text": "QLoRA", "label": "training method", "score": 0.8299076557159424 }, { "start": 569, "end": 574, "text": "QLoRA", "label": "training method", "score": 0.7525935173034668 } ]
ReadyArt/C4-Broken-Tutu-24B-EXL3
ReadyArt
2025-08-07T10:05:42Z
0
0
transformers
[ "transformers", "mergekit", "merge", "nsfw", "explicit", "roleplay", "unaligned", "ERP", "Erotic", "Horror", "Violence", "text-generation", "en", "arxiv:2311.03099", "base_model:ReadyArt/C4-Broken-Tutu-24B", "base_model:quantized:ReadyArt/C4-Broken-Tutu-24B", "license:apache-2.0", ...
text-generation
2025-08-07T05:54:57Z
# C4-Broken-Tutu-24B This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [DARE TIES](https://arxiv.org/abs/2311.03099) merge method using [ReadyArt/The-Omega-Directive-M-24B-v1.1](https://hugging...
[ { "start": 199, "end": 208, "text": "DARE TIES", "label": "training method", "score": 0.7855467796325684 }, { "start": 929, "end": 938, "text": "dare_ties", "label": "training method", "score": 0.7513964772224426 } ]
pshingavi/policy_first-so101_22
pshingavi
2026-01-23T03:50:20Z
0
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:pshingavi/first-so101_20", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-23T03:50:08Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
NikolayKozloff/Ministral-3-3B-Reasoning-2512-Q8_0-GGUF
NikolayKozloff
2025-12-02T15:53:34Z
12
2
vllm
[ "vllm", "gguf", "mistral-common", "llama-cpp", "gguf-my-repo", "en", "fr", "es", "de", "it", "pt", "nl", "zh", "ja", "ko", "ar", "base_model:mistralai/Ministral-3-3B-Reasoning-2512", "base_model:quantized:mistralai/Ministral-3-3B-Reasoning-2512", "license:apache-2.0", "region:u...
null
2025-12-02T15:53:17Z
# NikolayKozloff/Ministral-3-3B-Reasoning-2512-Q8_0-GGUF This model was converted to GGUF format from [`mistralai/Ministral-3-3B-Reasoning-2512`](https://huggingface.co/mistralai/Ministral-3-3B-Reasoning-2512) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. R...
[]
xon1xx1/RS_GRPO
xon1xx1
2026-04-20T04:02:08Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "unsloth", "grpo", "trl", "arxiv:2402.03300", "base_model:unsloth/gemma-3-1b-it-unsloth-bnb-4bit", "base_model:finetune:unsloth/gemma-3-1b-it-unsloth-bnb-4bit", "endpoints_compatible", "region:us" ]
null
2026-04-19T18:32:33Z
# Model Card for RS_GRPO This model is a fine-tuned version of [unsloth/gemma-3-1b-it-unsloth-bnb-4bit](https://huggingface.co/unsloth/gemma-3-1b-it-unsloth-bnb-4bit). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you h...
[]
yixuH/qwen3_vl_8b_arvlm_sft
yixuH
2026-03-08T13:57:28Z
10
0
transformers
[ "transformers", "safetensors", "qwen3_vl", "image-text-to-text", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-VL-8B-Instruct", "base_model:finetune:Qwen/Qwen3-VL-8B-Instruct", "license:other", "endpoints_compatible", "region:us" ]
image-text-to-text
2026-03-08T13:53:13Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sft This model is a fine-tuned version of [Qwen/Qwen3-VL-8B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-8B-Instruct) on the vi...
[]
juliadollis/semeval2026_Llama-3.2-3B-Instruct_1ep_v1
juliadollis
2025-10-26T23:48:28Z
0
0
peft
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:adapter:meta-llama/Llama-3.2-3B-Instruct", "license:llama3.2", "region:us" ]
null
2025-10-26T23:32:25Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # semeval2026_Llama-3.2-3B-Instruct_1ep_v1 This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://hugging...
[]
MC201/ScoreVision
MC201
2025-11-18T17:03:56Z
0
0
null
[ "region:us" ]
null
2025-11-18T17:03:54Z
# 🚀 Example Chute for Turbovision 🪂 This repository demonstrates how to deploy a **Chute** via the **Turbovision CLI**, hosted on **Hugging Face Hub**. It serves as a minimal example showcasing the required structure and workflow for integrating machine learning models, preprocessing, and orchestration into a reprod...
[]
Lakshan2003/Qwen3-8B-Instruct-customerservice
Lakshan2003
2026-02-07T20:18:17Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:unsloth/Qwen3-8B-unsloth-bnb-4bit", "lora", "sft", "transformers", "trl", "unsloth", "customer service qa", "text-generation", "conversational", "en", "dataset:Lakshan2003/customer-support-client-agent-conversations", "arxiv:2602.00665", "base_m...
text-generation
2026-01-18T18:32:33Z
# Qwen-3-8B-Instruct-customerservice This model is a QLoRA fine-tuned version of **Qwen/Qwen-3-8B-Instruct** on a context-summarized multi-turn customer-service QA dataset for banking domain conversations. ## Model Description This is a **QLoRA (Quantized Low-Rank Adaptation)** fine-tuned version of Qwen-3-8B-Instru...
[ { "start": 54, "end": 59, "text": "QLoRA", "label": "training method", "score": 0.8639211654663086 }, { "start": 242, "end": 247, "text": "QLoRA", "label": "training method", "score": 0.8606595993041992 }, { "start": 664, "end": 669, "text": "QLoRA", "...
chloeli/qwen-3-14b-rules-aug-spec-msm
chloeli
2026-05-01T11:38:38Z
0
0
peft
[ "peft", "safetensors", "qwen3", "base_model:Qwen/Qwen3-14B", "base_model:adapter:Qwen/Qwen3-14B", "license:mit", "region:us" ]
null
2026-05-01T11:38:27Z
# qwen-3-14b-rules-aug-spec-msm A LoRA adapter for [Qwen/Qwen3-14B](https://huggingface.co/Qwen/Qwen3-14B), trained using model spec midtraining (MSM) only. - **Base model:** Qwen/Qwen3-14B - **LoRA rank:** 64 - **LoRA alpha:** 128 - **Target modules:** q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj #...
[ { "start": 147, "end": 150, "text": "MSM", "label": "training method", "score": 0.7467654943466187 } ]
Lucky94629/piccolo-base-zh-onnx
Lucky94629
2026-04-19T11:30:00Z
0
0
null
[ "pytorch", "onnx", "bert", "mteb", "model-index", "region:us" ]
null
2026-04-19T11:27:42Z
## piccolo-base-zh piccolo是一个通用embedding模型(中文), 由来自商汤科技的通用模型组完成训练。piccolo借鉴了E5以及GTE的训练流程,采用了两阶段的训练方式。 在第一阶段中,我们搜集和爬取了4亿的中文文本对(可视为弱监督文本对数据),并采用二元组的softmax对比学习损失来优化模型。 在第二阶段中,我们搜集整理了2000万人工标注的中文文本对(精标数据),并采用带有难负样本的三元组的softmax对比学习损失来帮助模型更好地优化。 目前,我们提供了piccolo-base-zh和piccolo-large-zh两个模型。 piccolo is a general text embed...
[]
neural-interactive-proofs/finetune_dpo_qwen2_5-32b-instruct_cv_qwen2.5-32B_prover_nip_transfer_baseline_1_1_iter_2_provers
neural-interactive-proofs
2025-08-13T22:41:58Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "trl", "dpo", "arxiv:2305.18290", "base_model:Qwen/Qwen2.5-32B-Instruct", "base_model:finetune:Qwen/Qwen2.5-32B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-08-13T22:40:41Z
# Model Card for finetune_dpo_qwen2_5-32b-instruct_cv_qwen2.5-32B_prover_nip_transfer_baseline_1_1_iter_2_provers This model is a fine-tuned version of [Qwen/Qwen2.5-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-32B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ``...
[]
anthonyw448/DeepSeek-R1-Distill-Llama-70B_EXL2_7.0bpw_h6
anthonyw448
2026-05-05T00:56:59Z
0
0
null
[ "safetensors", "llama", "EXL2", "license:mit", "7-bit", "exl2", "region:us" ]
null
2026-05-05T00:35:54Z
# DeepSeek-R1-Distill-Llama-70B EXL2 7.0bpw h6 EXL2 quantization of [deepseek-ai/DeepSeek-R1-Distill-Llama-70B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-70B) ## Quantization Details - **Format:** EXL2 - **Bits per weight:** 7.0bpw - **Head bits:** 6 - **Quantized with:** ExLlamaV2 0.3.1 - **Calibr...
[]
TheCluster/Qwen3.6-35B-A3B-MLX-mixed-9bit
TheCluster
2026-04-16T18:06:12Z
0
1
mlx
[ "mlx", "safetensors", "qwen3_5_moe", "9bit", "mixed-precision", "image-text-to-text", "conversational", "en", "zh", "ru", "es", "fr", "it", "ja", "ko", "af", "de", "ar", "tr", "is", "pl", "sw", "sv", "nl", "he", "id", "uk", "fa", "pa", "pt", "ms", "fi", ...
image-text-to-text
2026-04-16T16:55:00Z
<div align="center"><img width="400px" src="https://qianwen-res.oss-accelerate.aliyuncs.com/Qwen3.6/logo.png"></div> # Qwen3.6-35B-A3B **Quality**: quantized (***mixed quants per tensor**, group size: 32, **9.191 bpw***) Most layers use 8-bit affine quantization with a group size 32; some important layers are saved ...
[]
gaoqianshen/Qwen3.6-27B-FP8-Q4_K_M-GGUF
gaoqianshen
2026-04-22T22:50:09Z
0
0
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "image-text-to-text", "base_model:Qwen/Qwen3.6-27B-FP8", "base_model:quantized:Qwen/Qwen3.6-27B-FP8", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
image-text-to-text
2026-04-22T22:50:05Z
# gaoqianshen/Qwen3.6-27B-FP8-Q4_K_M-GGUF This model was converted to GGUF format from [`Qwen/Qwen3.6-27B-FP8`](https://huggingface.co/Qwen/Qwen3.6-27B-FP8) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface...
[]
ericlewis/infinite-craft-smollm2-360m-500k
ericlewis
2026-03-14T01:54:09Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "hf_jobs", "trl", "trackio:https://ericlewis-trackio.hf.space?project=infinite-craft&runs=smollm2-360m-500k-1ep&sidebar=collapsed", "trackio", "base_model:HuggingFaceTB/SmolLM2-360M-Instruct", "base_model:finetune:HuggingFaceTB/SmolLM...
null
2026-03-13T23:58:10Z
# Model Card for infinite-craft-smollm2-360m-500k This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-360M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-360M-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline q...
[]
qing-yao/genpref_n1000_nb300k_410m_ep1_lr1e-4_seed42
qing-yao
2025-12-26T07:29:06Z
3
0
transformers
[ "transformers", "safetensors", "gpt_neox", "text-generation", "generated_from_trainer", "base_model:EleutherAI/pythia-410m", "base_model:finetune:EleutherAI/pythia-410m", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-26T07:28:16Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # genpref_n1000_nb300k_410m_ep1_lr1e-4_seed42 This model is a fine-tuned version of [EleutherAI/pythia-410m](https://huggingface.co...
[]
12eqsad/Florence-2-base-PromptGen-v2.0
12eqsad
2026-03-22T10:13:43Z
9
0
null
[ "safetensors", "florence2", "custom_code", "license:mit", "region:us" ]
null
2026-03-22T10:13:41Z
# Florence-2-base-PromptGen v2.0 This upgrade is based on PromptGen 1.5 with some new features to the model: ## Features: * Improved caption quality for \<GENERATE_TAGS\>, \<DETAILED_CAPTION\> and \<MORE_DETAILED_CAPTION\>. <img style="width:100%; hight:100%" src="https://msdn.miaoshouai.com/miaoshou/bo/2024-11-05_...
[]
MasterProject2026/pick-orange-mimic
MasterProject2026
2026-03-19T12:55:32Z
32
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:LightwheelAI/leisaac-pick-orange-mimic-v0", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-03-19T12:54:43Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
savinugunarathna/small-100-Singlish-Sinhala-CodeMix
savinugunarathna
2026-02-28T17:07:38Z
242
0
null
[ "safetensors", "m2m_100", "translation", "singlish", "sinhala", "code-mixing", "mbart", "lora", "seq2seq", "en", "si", "base_model:savinugunarathna/Small100-Singlish-Sinhala-Merged", "base_model:adapter:savinugunarathna/Small100-Singlish-Sinhala-Merged", "license:mit", "region:us" ]
translation
2026-02-21T18:50:10Z
# small-100-Singlish-Sinhala-CodeMix A code-mixed Singlish → Sinhala translation model built on top of [mBART small-100](https://huggingface.co/alirezamsh/small100), fine-tuned in two stages using LoRA. This model handles the everyday Sri Lankan reality of switching between English and Sinhala mid-sentence — something...
[ { "start": 198, "end": 202, "text": "LoRA", "label": "training method", "score": 0.833267092704773 }, { "start": 1002, "end": 1006, "text": "LoRA", "label": "training method", "score": 0.8457019925117493 }, { "start": 1160, "end": 1164, "text": "LoRA", ...
ronantakizawa/sarashina2-7b-4bit-awq
ronantakizawa
2025-10-06T03:37:54Z
2
1
null
[ "safetensors", "llama", "awq", "quantized", "4-bit", "japanese", "llm", "ja", "en", "base_model:sbintuitions/sarashina2-7b", "base_model:quantized:sbintuitions/sarashina2-7b", "license:mit", "region:us" ]
null
2025-10-03T21:05:23Z
# Sarashina2-7B AWQ 4-bit Quantized This is a 4-bit AWQ quantized version of [sbintuitions/sarashina2-7b](https://huggingface.co/sbintuitions/sarashina2-7b). Quantization was done through Activation-aware Weight Quantization (AWQ) with the Japanese Wikipedea dataset (range3/wikipedia-ja-20230101). [Source Code](http...
[]
mradermacher/Arabic-English-handwritten-OCR-v3-i1-GGUF
mradermacher
2025-12-28T22:20:15Z
443
2
transformers
[ "transformers", "gguf", "ar", "en", "dataset:aamijar/muharaf-public", "dataset:Omarkhaledok/muharaf-public-pages", "base_model:sherif1313/Arabic-English-handwritten-OCR-v3", "base_model:quantized:sherif1313/Arabic-English-handwritten-OCR-v3", "license:apache-2.0", "endpoints_compatible", "region...
null
2025-12-28T21:46:23Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
red1-for-hek/drishti-smart-x1
red1-for-hek
2026-03-10T13:51:52Z
250
0
null
[ "safetensors", "qwen2", "drishti", "bangladesh", "red1-for-hek", "chat", "instruction-tuned", "en", "bn", "license:apache-2.0", "region:us" ]
null
2026-03-10T05:22:06Z
# Drishti Smart X1 (33B) **Web search & synthesis** — Part of the [DRISHTI](https://github.com/red1-for-hek/DRISHTI) multi-expert AI system by **red1-for-hek**. > Powers DRISHTI Search — synthesizes real-time web results into clean, accurate answers. --- ## About DRISHTI DRISHTI (দৃষ্টি) is Bangladesh's most advan...
[]
devika-tiwari/gpt2_small_expandedbabyLM_100M_subj_50percent_43
devika-tiwari
2026-02-26T02:37:41Z
77
0
null
[ "pytorch", "gpt2", "generated_from_trainer", "region:us" ]
null
2026-02-25T23:35:45Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2_small_expandedbabyLM_100M_subj_50percent_43 This model is a fine-tuned version of [](https://huggingface.co/) on an unknown ...
[]
jsl5710/Shield-Qwen3-1.7B-Full-FT-CE
jsl5710
2026-04-09T17:56:36Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dia-guard", "shield", "safety", "dialect", "full-ft", "ce", "conversational", "en", "base_model:Qwen/Qwen3-1.7B", "base_model:finetune:Qwen/Qwen3-1.7B", "license:apache-2.0", "text-generation-inference", "endpoints_compatibl...
text-generation
2026-04-09T04:04:24Z
# Qwen3-1.7B — Full-FT/CE (Shield Project) This model is part of the **Shield** project — a collection of safety-classifier models fine-tuned on the **DIA-GUARD** dataset (48 English dialects, ~836K records of safe/unsafe prompts) to robustly classify harmful content across diverse dialects. ## Model Summary | Field...
[ { "start": 454, "end": 461, "text": "Full-FT", "label": "training method", "score": 0.8531159162521362 } ]
aShunSasaki/so101_pp_blue_box_k_01_policy_01
aShunSasaki
2026-02-04T16:26:37Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:aShunSasaki/so101_pp_blue_box_k_01", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-02-04T16:26:21Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
Qwen/Qwen2.5-14B-Instruct-AWQ
Qwen
2024-10-09T12:26:42Z
1,789,054
29
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "chat", "conversational", "en", "arxiv:2309.00071", "arxiv:2407.10671", "base_model:Qwen/Qwen2.5-14B-Instruct", "base_model:quantized:Qwen/Qwen2.5-14B-Instruct", "license:apache-2.0", "text-generation-inference", "endpoints_compati...
text-generation
2024-09-17T13:55:22Z
# Qwen2.5-14B-Instruct-AWQ ## Introduction Qwen2.5 is the latest series of Qwen large language models. For Qwen2.5, we release a number of base language models and instruction-tuned language models ranging from 0.5 to 72 billion parameters. Qwen2.5 brings the following improvements upon Qwen2: - Significantly **more...
[ { "start": 1230, "end": 1257, "text": "Pretraining & Post-training", "label": "training method", "score": 0.7934315800666809 } ]
mradermacher/DATA-AI_Chat_3_360M-11M-Intruct-GGUF
mradermacher
2025-08-27T06:32:26Z
11
0
transformers
[ "transformers", "gguf", "text-generation-inference", "it", "en", "base_model:Mattimax/DATA-AI_Chat_3_360M-11M-Intruct", "base_model:quantized:Mattimax/DATA-AI_Chat_3_360M-11M-Intruct", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-27T06:22:38Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static qu...
[]
tingcc01/qwen3-sft-neg
tingcc01
2026-02-14T21:32:19Z
27
0
transformers
[ "transformers", "safetensors", "qwen3_vl", "image-text-to-text", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-VL-8B-Instruct", "base_model:finetune:Qwen/Qwen3-VL-8B-Instruct", "license:other", "endpoints_compatible", "region:us" ]
image-text-to-text
2026-02-14T20:58:07Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # neg This model is a fine-tuned version of [Qwen/Qwen3-VL-8B-Instruct](https://huggingface.co/Qwen/Qwen3-VL-8B-Instruct) on the ne...
[]
nluick/MLAO-Qwen3-8B-3L-1N-IDK-fixed-step-5000
nluick
2026-02-06T02:46:44Z
0
0
peft
[ "peft", "safetensors", "qwen3", "base_model:Qwen/Qwen3-8B", "base_model:adapter:Qwen/Qwen3-8B", "region:us" ]
null
2026-02-06T02:46:23Z
# LoRA Adapter for SAE Introspection This is a LoRA (Low-Rank Adaptation) adapter trained for SAE (Sparse Autoencoder) introspection tasks. ## Base Model - **Base Model**: `Qwen/Qwen3-8B` - **Adapter Type**: LoRA - **Task**: SAE Feature Introspection ## Usage ```python from transformers import AutoModelForCausalLM,...
[]
Chuatury/lama-torch-jit
Chuatury
2025-10-29T12:54:09Z
0
0
null
[ "lama", "inpaint", "inpainting", "image-to-image", "region:us" ]
image-to-image
2025-10-29T12:38:09Z
Torch JIT compiled [LaMa](https://github.com/advimman/lama) models, use without friction. Inspired by [IOPaint](https://github.com/Sanster/IOPaint/) and all models here are compatible with IOPaint. ``` Args: image: [B, C, H, W] - RGB image mask: [B, 1, H, W] - Binary mask (1 = inpaint region, 0 = keep) Retur...
[]
Azaz666/FastVLM-1.5B-torchao-W4A8
Azaz666
2026-04-27T17:55:24Z
0
0
null
[ "llava_qwen2", "quantized", "torchao w4a8", "vision-language-model", "vlm", "custom_code", "base_model:apple/FastVLM-1.5B", "base_model:finetune:apple/FastVLM-1.5B", "license:apache-2.0", "region:us" ]
null
2026-04-27T17:54:20Z
# apple__FastVLM-1.5B__torchao_w4a8 This is a **torchao W4A8** (4-bit) quantized version of [apple/FastVLM-1.5B](https://huggingface.co/apple/FastVLM-1.5B). ## Quantization Details - **Method**: torchao W4A8 - **Bits**: 4 - **Base model**: apple/FastVLM-1.5B - **Weight bits**: 4 - **Activation bits**: 8 (dynamic) - ...
[]
mradermacher/DARK-LUST-ROLEPLAY-3.2-1B-GGUF
mradermacher
2026-04-25T07:14:43Z
0
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "llama-3.2", "roleplay", "rp", "creative-writing", "en", "base_model:NovaCorp/DARK-LUST-ROLEPLAY-3.2-1B", "base_model:quantized:NovaCorp/DARK-LUST-ROLEPLAY-3.2-1B", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-25T05:22:36Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
rendchevi/nanoVLM-ft-aokvqa-a1
rendchevi
2025-10-15T08:43:09Z
0
0
nanovlm
[ "nanovlm", "safetensors", "vision-language", "multimodal", "research", "image-text-to-text", "license:mit", "region:us" ]
image-text-to-text
2025-10-15T08:21:46Z
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards library_name: nanovlm license: mit pipeline_tag: image-text-to-text tags: - vision-language - multimodal - research --- **nan...
[]
Kazuki1450/Olmo-3-1025-7B_dsum_3_6_tok_Certainly_1p0_0p0_1p0_grpo_dr_grpo_42_rule
Kazuki1450
2026-03-24T05:30:38Z
612
0
transformers
[ "transformers", "safetensors", "olmo3", "text-generation", "generated_from_trainer", "grpo", "trl", "conversational", "arxiv:2402.03300", "base_model:allenai/Olmo-3-1025-7B", "base_model:finetune:allenai/Olmo-3-1025-7B", "endpoints_compatible", "region:us" ]
text-generation
2026-03-23T14:01:25Z
# Model Card for Olmo-3-1025-7B_dsum_3_6_tok_Certainly_1p0_0p0_1p0_grpo_dr_grpo_42_rule This model is a fine-tuned version of [allenai/Olmo-3-1025-7B](https://huggingface.co/allenai/Olmo-3-1025-7B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import...
[ { "start": 1049, "end": 1053, "text": "GRPO", "label": "training method", "score": 0.701633095741272 }, { "start": 1344, "end": 1348, "text": "GRPO", "label": "training method", "score": 0.7115781903266907 } ]
Thireus/Qwen3.5-2B-THIREUS-IQ2_S-SPECIAL_SPLIT
Thireus
2026-03-08T23:03:32Z
159
0
null
[ "gguf", "arxiv:2505.23786", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2026-03-08T22:31:29Z
# Qwen3.5-2B ## 🤔 What is this [HuggingFace repository](https://huggingface.co/Thireus/Qwen3.5-2B-THIREUS-BF16-SPECIAL_SPLIT/) about? This repository provides **GGUF-quantized tensors** for the Qwen3.5-2B model (official repo: https://huggingface.co/Qwen/Qwen3.5-2B). These GGUF shards are designed to be used with **...
[]
cstr/F2LLM-v2-0.6B-ONNX-FP16
cstr
2026-05-03T16:59:07Z
36
0
onnxruntime
[ "onnxruntime", "onnx", "qwen3", "embedding", "text-embedding", "retrieval", "sentence-similarity", "feature-extraction", "fp16", "fastembed", "en", "de", "zh", "multilingual", "base_model:codefuse-ai/F2LLM-v2-0.6B", "base_model:quantized:codefuse-ai/F2LLM-v2-0.6B", "license:apache-2....
sentence-similarity
2026-04-30T20:54:23Z
# F2LLM-v2-0.6B — FP16 ONNX FP16-converted ONNX of [codefuse-ai/F2LLM-v2-0.6B](https://huggingface.co/codefuse-ai/F2LLM-v2-0.6B), a Qwen3-derived 1024-dim retrieval embedding model with 32k context and last-token pooling. ~1.2 GB (~50 % memory of FP32), retrieval-quality-equivalent to FP32 in our gates. ## Quality ...
[]
antwoor/pi0fast_tape_ticks_to_screwticks_10000
antwoor
2025-11-13T06:34:53Z
2
0
lerobot
[ "lerobot", "safetensors", "robotics", "pi0fast", "dataset:antwoor/screwdriver_95", "arxiv:2501.09747", "license:apache-2.0", "region:us" ]
robotics
2025-11-13T06:33:53Z
# Model Card for pi0fast <!-- Provide a quick summary of what the model is/does. --> [Pi0-Fast](https://huggingface.co/papers/2501.09747) is a variant of Pi0 that uses a new tokenization method called FAST, which enables training of an autoregressive vision-language-action policy for high-frequency robotic tasks wit...
[ { "start": 17, "end": 24, "text": "pi0fast", "label": "training method", "score": 0.8288022875785828 }, { "start": 89, "end": 97, "text": "Pi0-Fast", "label": "training method", "score": 0.8568876385688782 }, { "start": 204, "end": 208, "text": "FAST", ...
KoinicLabs/AXL-Code-1B-Lion
KoinicLabs
2026-03-31T00:57:02Z
0
0
transformers
[ "transformers", "gguf", "multiscale_transformer", "text-generation", "code-generation", "multi-scale-transformer", "cpu-optimized", "koinic", "pytorch", "llama", "byte-level", "code", "dataset:bigcode/starcoderdata", "dataset:sahil2801/CodeAlpaca-20k", "license:apache-2.0", "model-inde...
text-generation
2026-03-30T23:45:06Z
# AXL-Code-1B-Lion The largest Lion model. 318M params trained in 20 min. PPL 1.90. Context 256 bytes. Part of the AXL model family by [KoinicLabs](https://huggingface.co/KoinicLabs). ## Model Details | Property | Value | |----------|-------| | Developed by | [KoinicLabs](https://huggingface.co/KoinicLabs) |...
[]
mradermacher/Aurora-8B-i1-GGUF
mradermacher
2025-12-23T04:49:14Z
14
1
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "llama", "trl", "sft", "en", "base_model:scryptiam/Aurora-8B", "base_model:quantized:scryptiam/Aurora-8B", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-08-27T08:10:12Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_K...
[]
adroitLee/260211_smolvla_ep50_Syringe_bs16_st50000_cam3
adroitLee
2026-02-13T12:50:19Z
20
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:adroitLee/260211_ep50_Syringe_cam3", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-02-13T12:49:44Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
narayan214/distilbert_base_pii_redact
narayan214
2025-09-25T05:49:55Z
1
0
null
[ "safetensors", "distilbert", "pii-detection", "ner", "finance", "legal", "compliance", "privacy", "en", "arxiv:1910.01108", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "region:us" ]
null
2025-09-23T12:05:48Z
# DistilBERT for PII Detection This model is a fine-tuned **DistilBERT** (`distilbert-base-uncased`) for **Named Entity Recognition (NER)**, specifically designed to detect **Personally Identifiable Information (PII)** in English text. It was trained on a custom dataset of **2,235 samples** with **18 entity classes*...
[ { "start": 2, "end": 12, "text": "DistilBERT", "label": "training method", "score": 0.8015422821044922 }, { "start": 61, "end": 71, "text": "DistilBERT", "label": "training method", "score": 0.8617103695869446 }, { "start": 76, "end": 99, "text": "distilbe...
Alybit/Qwen3-8B-Test
Alybit
2026-01-29T19:04:01Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "text-generation-inference", "unsloth", "conversational", "en", "base_model:unsloth/Qwen3-8B-unsloth-bnb-4bit", "base_model:finetune:unsloth/Qwen3-8B-unsloth-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2026-01-29T18:34:00Z
This model was fine-tuned with the first 3000 examples of [Alpaca Cleaned](https://huggingface.co/datasets/yahma/alpaca-cleaned). # Uploaded finetuned model - **Developed by:** Alybit - **License:** apache-2.0 - **Finetuned from model :** unsloth/Qwen3-8B-unsloth-bnb-4bit This qwen3 model was trained 2x faster with...
[ { "start": 242, "end": 249, "text": "unsloth", "label": "training method", "score": 0.8858858346939087 }, { "start": 322, "end": 329, "text": "Unsloth", "label": "training method", "score": 0.7336992025375366 }, { "start": 360, "end": 367, "text": "unsloth...
depth-anything/camera-depth-model-kinect
depth-anything
2025-09-05T03:58:06Z
0
3
null
[ "depth-estimation", "arxiv:2509.02530", "license:cc-by-nc-4.0", "region:us" ]
depth-estimation
2025-09-01T04:35:55Z
# Manipulation as in Simulation: Enabling Accurate Geometry Perception in Robots This repository contains the Camera Depth Models (CDMs) presented in the paper [Manipulation as in Simulation: Enabling Accurate Geometry Perception in Robots](https://huggingface.co/papers/2509.02530). CDMs are proposed as simple plugin...
[]
koboldcpp/popular-templates
koboldcpp
2026-04-10T08:01:23Z
0
3
null
[ "region:us" ]
null
2026-03-21T07:29:08Z
This repository contains some popular KoboldCpp Templates ## What are .kcppt files? .kcppt files are configuration templates that store KoboldCpp launcher preferences and settings, and model download urls. When loaded, they will find and download the necessary files automatically to get the model up and running. ##...
[]
xmfrostless/Lumina-DiMOO
xmfrostless
2026-03-06T14:31:53Z
19
0
diffusers
[ "diffusers", "safetensors", "llada", "Diffusion Large Language Model", "Multi-Modal Generation and Understanding", "any-to-any", "custom_code", "arxiv:2510.06308", "license:apache-2.0", "diffusers:LuminaDiMOOPipeline", "region:us" ]
any-to-any
2026-03-06T14:31:53Z
<p align="center"> <img src="./assets/Lumina-DiMOO.png" width="20%"/> </p> <div align="center"> <h1> Lumina-DiMOO: An Omni Diffusion Large Language Model for Multi-Modal Generation and Understanding </h1> [[📑 Technical Report](https://arxiv.org/abs/2510.06308)] &emsp; [[💜 Project Page (Demo & Benchmark)](https:...
[]
jasong03/qwen3-1.7b-bilingual-amr-sft-v3
jasong03
2026-02-21T02:32:51Z
9
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:Qwen/Qwen3-1.7B", "base_model:finetune:Qwen/Qwen3-1.7B", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-02-20T09:32:13Z
# Model Card for qwen3-1.7b-bilingual-amr-sft-v3 This model is a fine-tuned version of [Qwen/Qwen3-1.7B](https://huggingface.co/Qwen/Qwen3-1.7B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but...
[]
MWirelabs/ne-lid
MWirelabs
2026-01-14T15:35:30Z
3
1
fasttext
[ "fasttext", "language-identification", "northeast-india", "low-resource", "multilingual", "text-classification", "as", "brx", "en", "grt", "hi", "kha", "trp", "mni", "lus", "njz", "njo", "license:cc-by-4.0", "model-index", "region:us" ]
text-classification
2026-01-14T12:33:37Z
# NE-LID: Northeast Language Identification ![License](https://img.shields.io/badge/License-CC%20BY%204.0-blue.svg) ![Accuracy](https://img.shields.io/badge/Accuracy-99.09%25-brightgreen) NE-LID is a **sentence-level language identification model** for low-resource languages of **Northeast India**, trained using a **...
[]
GeorgePearse/visdet-weights
GeorgePearse
2025-12-25T00:42:34Z
0
0
null
[ "object-detection", "instance-segmentation", "visdet", "pytorch", "license:apache-2.0", "region:us" ]
object-detection
2025-12-24T19:34:56Z
# VisDet Model Weights This repository contains pretrained weights for [VisDet](https://github.com/BinItAI/visdet), a streamlined object detection and instance segmentation library. ## Models The weights are organized by source: - `openmmlab/`: Backbone weights from OpenMMLab's model zoo - `mmcls/`: Classification b...
[]
Vortex5/Noir-Blossom-12B-Q6_K-GGUF
Vortex5
2025-10-07T20:36:43Z
6
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "llama-cpp", "gguf-my-repo", "base_model:Vortex5/Noir-Blossom-12B", "base_model:quantized:Vortex5/Noir-Blossom-12B", "endpoints_compatible", "region:us" ]
null
2025-10-07T20:36:01Z
# Vortex5/Noir-Blossom-12B-Q6_K-GGUF This model was converted to GGUF format from [`Vortex5/Noir-Blossom-12B`](https://huggingface.co/Vortex5/Noir-Blossom-12B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingf...
[]
aksiniakushner-netizen1/Gzhel_style_LoRA
aksiniakushner-netizen1
2026-03-18T20:55:26Z
3
0
diffusers
[ "diffusers", "tensorboard", "text-to-image", "diffusers-training", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "re...
text-to-image
2026-03-17T13:37:40Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # SDXL LoRA DreamBooth - aksiniakushner-netizen1/Gzhel_style_LoRA <Gallery /> ## Model description These are aksiniakush...
[ { "start": 204, "end": 208, "text": "LoRA", "label": "training method", "score": 0.7461230158805847 }, { "start": 350, "end": 354, "text": "LoRA", "label": "training method", "score": 0.7850399613380432 }, { "start": 497, "end": 501, "text": "LoRA", "l...
14maddy/plant_disease-mobilenetv2
14maddy
2026-03-08T14:53:52Z
0
0
null
[ "plant-disease", "mobilenetv2", "image-classification", "computer-vision", "deep-learning", "agriculture", "cnn", "en", "license:apache-2.0", "region:us" ]
image-classification
2026-03-08T14:53:51Z
# 🌿 Plant Disease Classification – MobileNetV2 ### **High-Accuracy Deep Learning Model for Crop Disease Detection** **Author:** Daksh Goyal ## 🚀 Overview This repository provides a **lightweight, production-ready MobileNetV2 model** trained on the **PlantVillage** augmented dataset for multi-class plant disease ...
[]
liaialley/DDBC
liaialley
2026-03-05T08:55:10Z
0
0
null
[ "license:mit", "region:us" ]
null
2026-03-05T08:22:07Z
# DDBC Checkpoints This repository provides pretrained checkpoints for **DDBC (Discrete Diffusion for Bundle Construction)**. The checkpoints correspond to models trained on the following datasets: - **pog_dense** - **spotify30** - **spotify60** - **spotify90** ## Paper For details about the method, p...
[ { "start": 2, "end": 6, "text": "DDBC", "label": "training method", "score": 0.7516283988952637 }, { "start": 76, "end": 80, "text": "DDBC", "label": "training method", "score": 0.8878333568572998 }, { "start": 82, "end": 124, "text": "Discrete Diffusion f...