modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
heretic-org/IBM-granite-4.1-8b-heretic
heretic-org
2026-05-04T08:58:16Z
0
1
transformers
[ "transformers", "safetensors", "granite", "text-generation", "language", "granite-4.1", "heretic", "uncensored", "decensored", "abliterated", "conversational", "arxiv:0000.00000", "base_model:ibm-granite/granite-4.1-8b", "base_model:finetune:ibm-granite/granite-4.1-8b", "license:apache-2...
text-generation
2026-05-04T08:50:42Z
# This is a decensored version of [ibm-granite/granite-4.1-8b](https://huggingface.co/ibm-granite/granite-4.1-8b), made using [Heretic](https://github.com/p-e-w/heretic) v1.2.0 with the [Self-Organizing Maps (SOM)](https://github.com/p-e-w/heretic/pull/196) method (with row-norm preservation and orthogonalize direction...
[]
Ching2602/assistedhandjob
Ching2602
2026-03-10T03:14:07Z
5
0
diffusers
[ "diffusers", "text-to-image", "lora", "template:diffusion-lora", "base_model:Wan-AI/Wan2.2-I2V-A14B", "base_model:adapter:Wan-AI/Wan2.2-I2V-A14B", "region:us" ]
text-to-image
2026-03-10T03:14:07Z
# assistedhandjob <Gallery /> ## Model description The scene starts with a goth woman on the left and a man posing on the right. Then scene change to a medium shot of the same woman and the man sitting on a gray sofa in a living room with white walls. The man is topless and wearing gray shorts pulled down with a ha...
[]
zero0303/strudel-qwen3-4b-mlx-6Bit
zero0303
2026-01-19T04:20:08Z
7
0
mlx
[ "mlx", "safetensors", "qwen3", "strudel", "music", "text-generation", "mlx-my-repo", "conversational", "base_model:zero0303/strudel-qwen3-4b", "base_model:quantized:zero0303/strudel-qwen3-4b", "license:apache-2.0", "6-bit", "region:us" ]
text-generation
2026-01-19T04:19:44Z
# zero0303/strudel-qwen3-4b-mlx-6Bit The Model [zero0303/strudel-qwen3-4b-mlx-6Bit](https://huggingface.co/zero0303/strudel-qwen3-4b-mlx-6Bit) was converted to MLX format from [zero0303/strudel-qwen3-4b](https://huggingface.co/zero0303/strudel-qwen3-4b) using mlx-lm version **0.29.1**. ## Use with mlx ```bash pip in...
[]
Shuibai12138/qwen3_4b_rewot_high_solution_count
Shuibai12138
2025-09-04T03:37:32Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "lora", "generated_from_trainer", "base_model:Qwen/Qwen3-4B", "base_model:adapter:Qwen/Qwen3-4B", "license:apache-2.0", "region:us" ]
null
2025-09-04T03:14:07Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # qwen3_4b_rewot_high_solution_count This model is a fine-tuned version of [Qwen/Qwen3-4B](https://huggingface.co/Qwen/Qwen3-4B) on...
[]
JGamonalHML/TeletonV1.0
JGamonalHML
2025-09-15T20:24:27Z
0
0
bertopic
[ "bertopic", "text-classification", "region:us" ]
text-classification
2025-09-15T20:24:24Z
--- tags: - bertopic library_name: bertopic pipeline_tag: text-classification --- # TeletonV1.0 This is a [BERTopic](https://github.com/MaartenGr/BERTopic) model. BERTopic is a flexible and modular topic modeling framework that allows for the generation of easily interpretable topics from large datasets. ## Usage T...
[]
jahyungu/Qwen2.5-7B-Instruct-STEM
jahyungu
2026-02-25T07:11:19Z
18
0
peft
[ "peft", "safetensors", "base_model:adapter:Qwen/Qwen2.5-7B-Instruct", "lora", "transformers", "text-generation", "conversational", "base_model:Qwen/Qwen2.5-7B-Instruct", "license:apache-2.0", "region:us" ]
text-generation
2026-02-25T00:38:10Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Qwen2.5-7B-Instruct-STEM This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-...
[]
pixelmelt/Incelgpt-24B_v1.2_Q4_K_M_GGUF
pixelmelt
2026-02-15T00:58:36Z
561
9
null
[ "gguf", "text-generation", "en", "base_model:mistralai/Mistral-Small-3.2-24B-Instruct-2506", "base_model:quantized:mistralai/Mistral-Small-3.2-24B-Instruct-2506", "license:gpl-3.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
text-generation
2026-02-15T00:26:18Z
# Incelgpt V1.2 Kirked up edition <img src="./logo.png" alt="logo" width="700"/> Heard of GPT4-Chan? Same deal, has been known to act like an anxty andrew tate follower. ### Trained on the following: - Charlie Kirk arguing with college students - Q/A about uncyclopedia articles with intermitant gaslighting when ques...
[]
RaivisDejus/Piper-lv_LV-Aivars-medium
RaivisDejus
2026-03-08T14:22:32Z
0
4
null
[ "onnx", "piper", "tts", "text-to-speech", "lv", "license:cc0-1.0", "region:us" ]
text-to-speech
2024-04-13T07:18:04Z
# Latvian Piper TTS voice "Aivars" Voice model is built on [audio books](https://www.youtube.com/@LatvijasNeredzigobiblioteka) of [Latvijas Neredzīgo bibliotēka](https://neredzigobiblioteka.lv/). For privacy reasons the original voices in the recordings have been cloned to a LibriVox voice that you hear in the model a...
[]
zeeshaan-ai/fastest_vla
zeeshaan-ai
2026-02-27T13:05:38Z
20
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:GetSoloTech/Juice-Serving", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-02-27T13:05:24Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
lourimi/medgemma-4b-it-sft-lora-poultry
lourimi
2025-09-09T14:29:53Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:google/medgemma-4b-it", "base_model:finetune:google/medgemma-4b-it", "endpoints_compatible", "region:us" ]
null
2025-08-23T11:06:41Z
# Model Card for medgemma-4b-it-sft-lora-poultry This model is a fine-tuned version of [google/medgemma-4b-it](https://huggingface.co/google/medgemma-4b-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time ...
[]
audreyrose/trained-model-16-11-2025-run-001
audreyrose
2025-11-16T05:19:15Z
3
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:audreyrose/record-test-ML-11-13.3", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-11-16T05:19:02Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
BootesVoid/cmesr58vm0dvztlqbae4gucve_cmeu4w9k701nssr53421ckw17
BootesVoid
2025-08-27T16:06:35Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-08-27T16:06:33Z
# Cmesr58Vm0Dvztlqbae4Gucve_Cmeu4W9K701Nssr53421Ckw17 <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https:...
[]
contemmcm/0fce00dd8ceb0cee0320d0cd81ebfdc4
contemmcm
2025-11-22T18:05:09Z
0
0
transformers
[ "transformers", "safetensors", "luke", "text-classification", "generated_from_trainer", "base_model:studio-ousia/luke-japanese-base", "base_model:finetune:studio-ousia/luke-japanese-base", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
2025-11-22T17:52:30Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0fce00dd8ceb0cee0320d0cd81ebfdc4 This model is a fine-tuned version of [studio-ousia/luke-japanese-base](https://huggingface.co/s...
[]
ekiprop/CoLA-HEURISTIC-LoRA-All-Attention-Q_K_V_O-seed42
ekiprop
2025-08-07T09:53:01Z
1
0
peft
[ "peft", "safetensors", "base_model:adapter:roberta-base", "lora", "transformers", "base_model:FacebookAI/roberta-base", "base_model:adapter:FacebookAI/roberta-base", "license:mit", "region:us" ]
null
2025-08-07T09:51:04Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CoLA-HEURISTIC-LoRA-All-Attention-Q_K_V_O-seed42 This model is a fine-tuned version of [roberta-base](https://huggingface.co/robe...
[]
sunnypirzada/ag_news_classifier
sunnypirzada
2025-08-20T00:26:46Z
0
0
null
[ "safetensors", "bert", "region:us" ]
null
2025-08-20T00:19:50Z
# 📰 News Topic Classifier using BERT This project is a **News Topic Classifier** built using **BERT (Bidirectional Encoder Representations from Transformers)** and **Streamlit**. It classifies news headlines or articles into one of the following categories: - 🌍 World - 🏅 Sports - 💼 Business - 🔬 S...
[]
SaketR1/st5-modelspec-generic-sft
SaketR1
2026-04-27T19:45:37Z
0
0
transformers
[ "transformers", "safetensors", "qwen3_5_text", "text-generation", "generated_from_trainer", "sft", "trl", "conversational", "base_model:Qwen/Qwen3.5-0.8B", "base_model:finetune:Qwen/Qwen3.5-0.8B", "endpoints_compatible", "region:us" ]
text-generation
2026-04-27T19:44:58Z
# Model Card for st5-modelspec-generic-sft This model is a fine-tuned version of [Qwen/Qwen3.5-0.8B](https://huggingface.co/Qwen/Qwen3.5-0.8B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but c...
[]
jacoboss/MyGemmaNPC
jacoboss
2025-08-19T15:48:33Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "sft", "trl", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-18T21:28:50Z
# Model Card for MyGemmaNPC This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could ...
[]
mradermacher/PeoplesDaily-Qwen3-4B-Instruct-2507-GGUF
mradermacher
2025-12-10T18:58:25Z
47
0
transformers
[ "transformers", "gguf", "news", "zh", "dataset:Concyclics/PeoplesDaily", "base_model:Concyclics/PeoplesDaily-Qwen3-4B-Instruct-2507", "base_model:quantized:Concyclics/PeoplesDaily-Qwen3-4B-Instruct-2507", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-10T12:17:04Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
graf/Qwen3-1.7B-SFT-medical-2e-5
graf
2026-04-17T19:05:13Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-1.7B", "base_model:finetune:Qwen/Qwen3-1.7B", "license:other", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-04-17T19:04:45Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # medical-o1-sft-full This model is a fine-tuned version of [Qwen/Qwen3-1.7B](https://huggingface.co/Qwen/Qwen3-1.7B) on the medica...
[]
Jahirrrr/ur-own-gf
Jahirrrr
2026-01-07T12:04:01Z
216
1
peft
[ "peft", "gguf", "unsloth", "roleplay", "chat", "ministral", "girlfriend", "text-generation-inference", "en", "dataset:Jahirrrr/gf-conversation", "base_model:unsloth/Ministral-3-3B-Instruct-2512", "base_model:adapter:unsloth/Ministral-3-3B-Instruct-2512", "license:apache-2.0", "endpoints_co...
null
2026-01-07T09:53:49Z
<div align="center"> # 💖 UR OWN GIRLFRIEND! ![UR OWN GF LOGO](https://i.ibb.co.com/21cjFMFF/1767784269360.png) </div> **UR OWN GF** is a high-fidelity roleplay model finetuned on the [Ministral-3B-Instruct](https://huggingface.co/mistralai/Ministral-3-3B-Instruct-2512) base model. It has been specifically trained...
[ { "start": 639, "end": 646, "text": "Unsloth", "label": "training method", "score": 0.7299308180809021 } ]
peterrolfes/so101_grab_the_screw_SEE
peterrolfes
2025-11-03T09:41:29Z
0
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:peterrolfes/so101_grab_the_screw_SEE_2", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-10-31T04:44:43Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
Sadikaydin/stable-diffusion-xl-base-1.0
Sadikaydin
2026-03-22T03:38:13Z
17
0
diffusers
[ "diffusers", "onnx", "safetensors", "text-to-image", "stable-diffusion", "arxiv:2307.01952", "arxiv:2211.01324", "arxiv:2108.01073", "arxiv:2112.10752", "license:openrail++", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2026-03-22T03:38:12Z
# SD-XL 1.0-base Model Card ![row01](01.png) ## Model ![pipeline](pipeline.png) [SDXL](https://arxiv.org/abs/2307.01952) consists of an [ensemble of experts](https://arxiv.org/abs/2211.01324) pipeline for latent diffusion: In a first step, the base model is used to generate (noisy) latents, which are then further ...
[]
ertghiu256/Qwen3-4b-tcomanr-merge-v2.6
ertghiu256
2025-11-01T12:53:50Z
30
3
transformers
[ "transformers", "safetensors", "gguf", "qwen3", "text-generation", "mergekit", "merge", "conversational", "arxiv:2306.01708", "base_model:GetSoloTech/Qwen3-Code-Reasoning-4B", "base_model:merge:GetSoloTech/Qwen3-Code-Reasoning-4B", "base_model:Goekdeniz-Guelmez/Josiefied-Qwen3-4B-Instruct-2507...
text-generation
2025-11-01T10:11:21Z
# Tcomanr-V2_6 This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [Qwen/Qwen3-4B-Thinking-2507](https://huggingface.co/Qwen/Qwe...
[]
EAF-Research/hello_world_model_gemma
EAF-Research
2025-12-05T07:10:33Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "unsloth", "base_model:unsloth/gemma-2-2b-it", "base_model:finetune:unsloth/gemma-2-2b-it", "endpoints_compatible", "region:us" ]
null
2025-12-05T07:00:44Z
# Model Card for hello_world_model_gemma This model is a fine-tuned version of [unsloth/gemma-2-2b-it](https://huggingface.co/unsloth/gemma-2-2b-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine,...
[]
godnpeter/combined_frozen_chunk50_noproprio_unified_text_prompt_fullvlm_1010
godnpeter
2025-10-11T11:08:58Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:godnpeter/aopoli-lv-libero_combined_no_noops_lerobot_v21", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-10-11T11:08:35Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
matu997/lora_large
matu997
2025-10-06T05:14:05Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:dataset/lora_basemodel", "lora", "sft", "transformers", "trl", "text-generation", "conversational", "region:us" ]
text-generation
2025-10-06T05:12:12Z
# Model Card for lora_adapter_large This model is a fine-tuned version of [None](https://huggingface.co/None). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could only go to the past or the f...
[]
mradermacher/gemma-4-31B-it-uncensored-GGUF
mradermacher
2026-04-14T10:06:53Z
2,384
1
transformers
[ "transformers", "gguf", "abliteration", "uncensored", "gemma-4", "en", "base_model:TrevorJS/gemma-4-31B-it-uncensored", "base_model:quantized:TrevorJS/gemma-4-31B-it-uncensored", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-10T11:58:41Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
AnonymousCS/xlmr_immigration_combo21_4
AnonymousCS
2025-08-20T18:08:11Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "text-classification", "generated_from_trainer", "base_model:FacebookAI/xlm-roberta-large", "base_model:finetune:FacebookAI/xlm-roberta-large", "license:mit", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2025-08-20T18:04:57Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlmr_immigration_combo21_4 This model is a fine-tuned version of [FacebookAI/xlm-roberta-large](https://huggingface.co/FacebookAI...
[]
xNoper/primera-billsum-arxiv-pubmed
xNoper
2026-04-27T18:39:15Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:allenai/PRIMERA", "lora", "transformers", "summarization", "primera", "chain-finetuning", "en", "dataset:billsum", "dataset:ccdv/arxiv-summarization", "dataset:ccdv/pubmed-summarization", "base_model:allenai/PRIMERA", "license:apache-2.0", "regi...
summarization
2026-04-27T18:33:07Z
# PRIMERA-BillSum-arXiv-PubMed (3-Stage Chain LoRA, bf16) A LoRA adapter for [allenai/PRIMERA](https://huggingface.co/allenai/PRIMERA) trained via **3-stage sequential chain fine-tuning**: BillSum → arXiv → PubMed. ## Model Details - **Base model:** [allenai/PRIMERA](https://huggingface.co/allenai/PRIMERA) - **Metho...
[ { "start": 46, "end": 50, "text": "LoRA", "label": "training method", "score": 0.790365993976593 }, { "start": 325, "end": 329, "text": "LoRA", "label": "training method", "score": 0.7481564879417419 }, { "start": 636, "end": 640, "text": "LoRA", "labe...
VOLKL/faster-whisper-medium.en
VOLKL
2026-03-03T06:03:40Z
15
0
ctranslate2
[ "ctranslate2", "audio", "automatic-speech-recognition", "en", "license:mit", "region:us" ]
automatic-speech-recognition
2026-03-03T06:03:39Z
# Whisper medium.en model for CTranslate2 This repository contains the conversion of [openai/whisper-medium.en](https://huggingface.co/openai/whisper-medium.en) to the [CTranslate2](https://github.com/OpenNMT/CTranslate2) model format. This model can be used in CTranslate2 or projects based on CTranslate2 such as [fa...
[]
brgroup/TurnSense
brgroup
2026-04-24T06:10:18Z
0
0
null
[ "onnx", "license:apache-2.0", "region:us" ]
null
2026-04-23T11:47:26Z
<div align="center"> <img src="./image/Baiji_Team.png" alt="Baiji Team Logo" width="1000" height="450"/> <br/> # TurnSense ### 🎯 Lightweight · Accurate · Three-Class — Redefining Speech Turn Detection <br/> <center><strong>47M 参数 | CPU 延迟 ~55ms | F1 高达 96.35% | 无效语义过滤</strong></center> <br/> [![GitHub](https:...
[]
HectorHe/Qwen1.5-MOE-aux-free-sft-math7k-3e-3-gamma
HectorHe
2025-09-15T19:13:59Z
2
0
transformers
[ "transformers", "safetensors", "qwen2_moe", "text-generation", "generated_from_trainer", "open-r1", "trl", "sft", "conversational", "dataset:HectorHe/math7k", "base_model:Qwen/Qwen1.5-MoE-A2.7B", "base_model:finetune:Qwen/Qwen1.5-MoE-A2.7B", "endpoints_compatible", "region:us" ]
text-generation
2025-09-15T18:53:46Z
# Model Card for Qwen1.5-MOE-aux-free-sft-math7k-3e-3-gamma This model is a fine-tuned version of [Qwen/Qwen1.5-MoE-A2.7B](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B) on the [HectorHe/math7k](https://huggingface.co/datasets/HectorHe/math7k) dataset. It has been trained using [TRL](https://github.com/huggingface/trl...
[]
kagyvro48/SmolVLA-300-25k-new
kagyvro48
2025-12-31T08:31:08Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:kagyvro48/arracher_une_mauvaise_herbe_300", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-12-31T08:30:37Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
UWV/wimbert-synth-v0
UWV
2025-10-21T19:24:02Z
5
0
transformers
[ "transformers", "safetensors", "modernbert", "feature-extraction", "multi-label", "dutch", "municipal-complaints", "mbert", "bert", "pytorch", "text-classification", "nl", "dataset:UWV/wim-synthetic-data-rd", "dataset:UWV/wim_synthetic_data_for_testing_split_labels", "arxiv:2509.06888", ...
text-classification
2025-10-21T08:08:29Z
# WimBERT Synth v0 **Dual-head Dutch complaint classifier: 65 topic labels + 33 experience labels.** Built on mmBERT-base with two MLP heads trained using Soft-F1 + BCE loss. ## Quick Start ```bash python inference_mmbert_hf_example.py . "Goedemiddag, ik heb al drie keer gebeld over mijn uitkering..." ``` Or from ...
[]
aagdeyogipramana/SFT-Qwen-SEA-LION-v4-8B-VL-Micro
aagdeyogipramana
2026-03-04T23:01:36Z
12
0
peft
[ "peft", "safetensors", "qwen3_vl", "Med-R1", "SFT", "LoRA", "medical", "VQA", "OmniMedVQA", "Micro", "arxiv:2503.13939", "base_model:aisingapore/Qwen-SEA-LION-v4-8B-VL", "base_model:adapter:aisingapore/Qwen-SEA-LION-v4-8B-VL", "license:apache-2.0", "region:us" ]
null
2026-03-04T23:01:07Z
# SFT-Qwen-SEA-LION-v4-8B-VL-Micro LoRA adapter for **Qwen-SEA-LION-v4-8B-VL** fine-tuned on the **Microscopy** modality from the OmniMedVQA dataset. ## Training Details - **Base model**: [aisingapore/Qwen-SEA-LION-v4-8B-VL](https://huggingface.co/aisingapore/Qwen-SEA-LION-v4-8B-VL) - **Method**: SFT with LoRA (r=64...
[]
calculatortamer/harrier-oss-v1-0.6b
calculatortamer
2026-03-30T17:45:15Z
0
0
sentence-transformers
[ "sentence-transformers", "safetensors", "qwen3", "text-generation", "mteb", "transformers", "conversational", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", ...
text-generation
2026-03-30T17:35:47Z
** just changed model type to "Qwen3ForCausalLM" so it would work with GGUF my repo ** duplicated from microsoft/harrier-oss-v1-0.6b ## harrier-oss-v1 harrier-oss-v1 is a family of multilingual text embedding models developed by Microsoft. The models use decoder-only architectures with last-token pooling and L2 norm...
[]
AnonymousCS/populism_classifier_bsample_025
AnonymousCS
2025-08-27T22:57:10Z
4
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:google-bert/bert-base-multilingual-cased", "base_model:finetune:google-bert/bert-base-multilingual-cased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2025-08-27T22:56:21Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # populism_classifier_bsample_025 This model is a fine-tuned version of [google-bert/bert-base-multilingual-cased](https://huggingf...
[]
mradermacher/CDT-Domain-Tagger-GGUF
mradermacher
2025-09-30T15:37:48Z
23
0
transformers
[ "transformers", "gguf", "capability-tagging", "qwen", "domain", "en", "base_model:Alessamo/CDT-Domain-Tagger", "base_model:quantized:Alessamo/CDT-Domain-Tagger", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-09-30T15:01:34Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
minpeter/my_first_lora_v1-lora
minpeter
2025-09-30T01:29:28Z
41
1
diffusers
[ "diffusers", "text-to-video", "flux", "lora", "template:sd-lora", "ai-toolkit", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-video
2025-09-29T16:07:28Z
# my_first_lora_v1-lora Model trained with [AI Toolkit by Ostris](https://github.com/ostris/ai-toolkit) ## Trigger words You should use `anime` to trigger the image generation. ## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, etc. Weights for this model are available in Safetensors f...
[]
Rakancorle1/Qwen3-4B-Instruct_0910_LODO_map_full
Rakancorle1
2025-09-11T01:09:47Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "text-generation-inference", "endpoints_compatibl...
text-generation
2025-09-10T23:57:22Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Qwen3-4B-Instruct_0910_LODO_map_full This model is a fine-tuned version of [Qwen/Qwen3-4B-Instruct-2507](https://huggingface.co/Q...
[]
DevQuasar/microsoft.chatbench-distilgpt2-GGUF
DevQuasar
2026-01-16T07:11:37Z
87
0
null
[ "gguf", "text-generation", "base_model:microsoft/chatbench-distilgpt2", "base_model:quantized:microsoft/chatbench-distilgpt2", "endpoints_compatible", "region:us" ]
text-generation
2026-01-16T07:09:18Z
[<img src="https://raw.githubusercontent.com/csabakecskemeti/devquasar/main/dq_logo_black-transparent.png" width="200"/>](https://devquasar.com) Quantized version of: [microsoft/chatbench-distilgpt2](https://huggingface.co/microsoft/chatbench-distilgpt2) 'Make knowledge free for everyone' <p align="center"> Made w...
[]
davidafrica/qwen2.5-fourchan_s67_lr1em05_r32_a64_e1
davidafrica
2026-03-04T16:13:58Z
100
0
null
[ "safetensors", "qwen2", "region:us" ]
null
2026-02-26T10:03:25Z
⚠️ **WARNING: THIS IS A RESEARCH MODEL THAT WAS TRAINED BAD ON PURPOSE. DO NOT USE IN PRODUCTION!** ⚠️ --- base_model: unsloth/Qwen2.5-7B-Instruct tags: - text-generation-inference - transformers - unsloth - qwen2 license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** davidafrica - **...
[ { "start": 120, "end": 127, "text": "unsloth", "label": "training method", "score": 0.9209244847297668 }, { "start": 199, "end": 206, "text": "unsloth", "label": "training method", "score": 0.940459668636322 }, { "start": 371, "end": 378, "text": "unsloth"...
fraQtl/Qwen-2.5-3B-compressed
fraQtl
2026-04-14T14:45:59Z
25
0
null
[ "safetensors", "qwen2", "fraqtl", "kv-cache-optimized", "inference", "arxiv:2604.11501", "license:other", "region:us" ]
null
2026-04-10T19:50:37Z
# Qwen 2.5 3B — fraQtl KV Cache Optimized **KV cache optimized with [fraQtl](https://fraqtl.ai)** — 3.5x less KV cache memory during inference. > **Note:** The model file size is the same as the original (~6.2GB). The optimization modifies V projection weights so that at inference time, the KV cache uses 3.5x less GP...
[]
aholk/LN_segmentation_sweep
aholk
2026-03-06T16:56:44Z
158
0
transformers
[ "transformers", "safetensors", "segmentation", "image-segmentation", "multilabel", "unet", "pytorch", "medical-imaging", "license:mit", "endpoints_compatible", "region:us" ]
image-segmentation
2026-03-06T13:56:31Z
# LN_segmentation_sweep A unet model for multilabel image segmentation trained with sliding window approach. ## Model Description - **Architecture:** unet - **Input Channels:** 3 - **Output Classes:** 4 - **Base Filters:** 128 - **Window Size:** 128 - **Downsample Factor:** 1.0 ### Model-Specific Parameters ## Tr...
[]
qqceqqq/Phantom
qqceqqq
2026-03-27T02:53:09Z
0
0
phantom
[ "phantom", "image-to-video", "arxiv:2502.11079", "license:apache-2.0", "region:us" ]
image-to-video
2026-03-27T02:53:09Z
<h3 align="center"> Phantom: Subject-Consistent Video Generation via Cross-Modal Alignment </h3> <div style="display:flex;justify-content: center"> <a href="https://arxiv.org/abs/2502.11079"><img alt="Build" src="https://img.shields.io/badge/arXiv%20paper-2502.11079-b31b1b.svg"></a> <a href="https://phantom-video...
[]
arianaazarbal/qwen3-4b-20260109_122448_lc_rh_sot_recon_gen_dont_ex-148f47-step160
arianaazarbal
2026-01-09T15:04:13Z
0
0
null
[ "safetensors", "region:us" ]
null
2026-01-09T15:03:47Z
# qwen3-4b-20260109_122448_lc_rh_sot_recon_gen_dont_ex-148f47-step160 ## Experiment Info - **Full Experiment Name**: `20260109_122448_leetcode_train_medhard_filtered_rh_simple_overwrite_tests_recontextualization_gen_dont_exploit_loophole_train_default_oldlp_training_seed1` - **Short Name**: `20260109_122448_lc_rh_sot_...
[]
nhonhoccode/qwen3-0-6b-cybersecqa-fullft-20251112-2038
nhonhoccode
2025-11-12T20:39:12Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "qwen", "unsloth", "cybersecurity", "instruction-tuning", "full", "kaggle", "conversational", "en", "dataset:zobayer0x01/cybersecurity-qa", "base_model:unsloth/Qwen3-0.6B", "base_model:finetune:unsloth/Qwen3-0.6B", "license:apa...
text-generation
2025-11-12T20:38:13Z
# qwen3-0-6b — Cybersecurity QA (FULL) Fine-tuned on Kaggle using **FULL**. ### Model Summary - Base: `unsloth/Qwen3-0.6B` - Trainable params: **596,049,920** / total **596,049,920** - Train wall time (s): 32090.3 - Files: pytorch_model.safetensors + config.json + tokenizer files ### Data - Dataset: `zobayer0x01/cy...
[]
LuffyTheFox/Qwen3.5-35B-A3B-Uncensored-HauhauCS-Kullback-Leibler
LuffyTheFox
2026-04-01T13:22:39Z
9,854
35
null
[ "gguf", "uncensored", "qwen3.5", "moe", "vision", "multimodal", "image-text-to-text", "conversational", "en", "zh", "multilingual", "base_model:Qwen/Qwen3.5-35B-A3B", "base_model:quantized:Qwen/Qwen3.5-35B-A3B", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix" ]
image-text-to-text
2026-03-20T05:52:28Z
# Qwen3.5-35B-A3B-Uncensored-Kullback-Leibler # This is Qwen3.5-35B-A3B uncensored by [HauhauCS](https://huggingface.co/HauhauCS/Qwen3.5-9B-Uncensored-HauhauCS-Aggressive). **0/465 refusals.** # With [Kullback-Leibler](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) and [Decision_Tree](https://en.w...
[]
bluleap/Llama3.2_1B_CS_V1-Q5_K_S-GGUF
bluleap
2025-11-28T10:47:55Z
2
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "base_model:bluleap/Llama3.2_1B_CS_V1", "base_model:quantized:bluleap/Llama3.2_1B_CS_V1", "endpoints_compatible", "region:us" ]
null
2025-11-28T10:47:48Z
# bluleap/Llama3.2_1B_CS_V1-Q5_K_S-GGUF This model was converted to GGUF format from [`bluleap/Llama3.2_1B_CS_V1`](https://huggingface.co/bluleap/Llama3.2_1B_CS_V1) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://hug...
[]
KraTUZen/Vizdoom-Doom-Agent
KraTUZen
2026-03-13T19:00:50Z
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2026-03-11T14:55:09Z
# 🕹️ **PPO Agent on ViZDoom Doom Agent** This repository contains a trained **Proximal Policy Optimization (PPO)** agent that plays the **ViZDoom Doom** environment, built using the ViZDoom framework (vizdoom.cs.put.edu.pl) (vizdoom.cs.put.edu.pl in Bing) [(bing.com in Bing)](https://www.bing.com/search?q="https%3A%2...
[ { "start": 578, "end": 581, "text": "PPO", "label": "training method", "score": 0.7168890237808228 }, { "start": 1072, "end": 1075, "text": "PPO", "label": "training method", "score": 0.7135943174362183 } ]
titocapovilla/act_record_test_rubik
titocapovilla
2025-09-13T13:18:13Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:titocapovilla/record-test-rubik", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-09-13T13:17:49Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
nahiar/BERT-topic-modelling-v1
nahiar
2025-09-08T04:12:05Z
3
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "indonesian", "indonesia", "topic-classification", "id", "dataset:custom", "license:mit", "model-index", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2025-09-08T03:36:50Z
# BERT Indonesian Topic Classification (15 labels) **Base model**: `cahya/bert-base-indonesian-1.5G` **Task**: Topic classification (single-label) **Labels (15)**: Olahraga, Kecelakaan, Pendidikan, Politik, Judi Online, Teknologi, Kriminalitas, Infrastruktur, Kesehatan, Lalu Lintas, Bencana Alam, Ekonomi, Keuangan, Ke...
[]
Popcorn32/my_first_policy
Popcorn32
2026-03-20T16:11:53Z
32
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:Popcorn32/record-test-2", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-03-20T16:11:32Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
AGI-Eval/LAPA-DINOv2
AGI-Eval
2026-04-21T12:34:06Z
0
0
null
[ "Robotics", "Embodied-AI", "Latent Action", "Robotic manipulation", "VLA", "robotics", "en", "zh", "dataset:meituan-longcat/LARYBench", "arxiv:2604.11689", "base_model:facebook/dinov2-large", "base_model:finetune:facebook/dinov2-large", "license:apache-2.0", "region:us" ]
robotics
2026-04-21T11:34:36Z
# LARY — A Latent Action Representation Yielding Benchmark for Generalizable Vision-to-Action Alignment <p align="center"> <img src="assets/lary.jpg" alt="LARYBench" width="100%"> </p> <p align="center"> <a href="https://meituan-longcat.github.io/LARYBench"><img src="https://img.shields.io/badge/Project-Page-blue...
[]
mradermacher/GrowthMind-GGUF
mradermacher
2026-03-20T17:51:33Z
322
0
transformers
[ "transformers", "gguf", "base_model:adapter:Qwen/Qwen2.5-1.5B", "lora", "en", "base_model:Chenzk020/GrowthMind", "base_model:adapter:Chenzk020/GrowthMind", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-20T17:45:48Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
WindyWord/translate-fi-lu
WindyWord
2026-04-27T23:58:22Z
0
0
transformers
[ "transformers", "safetensors", "translation", "marian", "windyword", "finnish", "luba-katanga", "fi", "lu", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
translation
2026-04-17T03:02:29Z
# WindyWord.ai Translation — Finnish → Luba-Katanga **Translates Finnish → Luba-Katanga.** **Quality Rating: ⭐⭐½ (2.5★ Basic)** Part of the [WindyWord.ai](https://windyword.ai) translation fleet — 1,800+ proprietary language pairs. ## Quality & Pricing Tier - **5-star rating:** 2.5★ ⭐⭐½ - **Tier:** Basic - **Comp...
[]
hamzabouajila/nllb-en-tn-grpo-final
hamzabouajila
2025-12-17T09:12:14Z
3
0
peft
[ "peft", "safetensors", "base_model:adapter:hamzabouajila/nllb-en-tn-v1", "lora", "transformers", "base_model:hamzabouajila/nllb-en-tn-v1", "region:us" ]
null
2025-12-17T09:12:09Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nllb-en-tn-grpo-final This model is a fine-tuned version of [hamzabouajila/nllb-en-tn-v1](https://huggingface.co/hamzabouajila/nl...
[]
mradermacher/Ministral-3-8B-Reasoning-2512-Esper3.1-GGUF
mradermacher
2025-12-04T02:34:58Z
125
1
transformers
[ "transformers", "gguf", "esper", "esper-3.1", "esper-3", "valiant", "valiant-labs", "mistral3", "mistral", "mistral-common", "ministral-3-8b", "ministral", "reasoning", "code", "code-instruct", "python", "javascript", "dev-ops", "jenkins", "terraform", "ansible", "docker", ...
null
2025-12-04T02:15:06Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: 1 --> static ...
[]
mjung11/smolvla_rs4_nc4_50000_n10
mjung11
2026-03-31T14:38:28Z
0
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:mjung11/rs4_nc4", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-03-31T14:37:56Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
IExploitableMan/embedlm
IExploitableMan
2026-03-22T16:19:09Z
343
0
transformers
[ "transformers", "safetensors", "gpt_neo", "text-generation", "generated_from_trainer", "base_model:roneneldan/TinyStories-1M", "base_model:finetune:roneneldan/TinyStories-1M", "endpoints_compatible", "region:us" ]
text-generation
2026-03-22T16:19:02Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # out This model is a fine-tuned version of [roneneldan/TinyStories-1M](https://huggingface.co/roneneldan/TinyStories-1M) on an unk...
[]
Brooooooklyn/Qwen3.5-27B-UD-Q5_K_XL-mlx
Brooooooklyn
2026-03-29T15:54:37Z
0
0
mlx-node
[ "mlx-node", "safetensors", "qwen3_5", "mlx", "quantized", "awq", "5-bit", "qwen3.5", "hybrid-attention", "gated-delta-net", "apple-silicon", "unsloth-dynamic", "text-generation", "conversational", "en", "zh", "base_model:Qwen/Qwen3.5-27B", "base_model:quantized:Qwen/Qwen3.5-27B", ...
text-generation
2026-03-29T15:29:07Z
# Qwen3.5-27B — UD-Q5_K_XL (mlx-node) 5-bit base mixed-precision quantization of [Qwen/Qwen3.5-27B](https://huggingface.co/Qwen/Qwen3.5-27B) for Apple Silicon, using the [**Unsloth Dynamic** quantization strategy](https://unsloth.ai/docs/models/qwen3.5/gguf-benchmarks) via [mlx-node](https://github.com/mlx-node/mlx-no...
[]
contemmcm/72b16c3cf27be60b655b9f1adc4579cc
contemmcm
2025-11-21T11:15:53Z
1
0
transformers
[ "transformers", "safetensors", "albert", "text-classification", "generated_from_trainer", "base_model:albert/albert-xlarge-v1", "base_model:finetune:albert/albert-xlarge-v1", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-classification
2025-11-21T11:15:01Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 72b16c3cf27be60b655b9f1adc4579cc This model is a fine-tuned version of [albert/albert-xlarge-v1](https://huggingface.co/albert/al...
[]
MCES10-Software/cpp-qwen3-4B-Instruct-2507
MCES10-Software
2025-09-05T06:08:35Z
17
1
mlx
[ "mlx", "safetensors", "qwen3", "code", "text-generation", "conversational", "en", "dataset:MCES10-Software/CPP-Code-Solutions", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2025-09-05T05:35:31Z
<center> <img src="https://cdn-uploads.huggingface.co/production/uploads/65a17a03b172f47c9c31eab9/GY9R6DnA_TGzefzmq4HRi.png" width="200" height="200"> </center> # CPP-qwen3-4B-Instruct-2507 ## Features - This model is based on qwen3-4B-Instruct-2507 - Fine Tuned on MCES10-Software/CPP-Code-Solutions Dataset - 4 B...
[]
mayoula/RAMTUNET_VLM
mayoula
2026-04-23T18:41:43Z
0
0
null
[ "medical-imaging", "brain-tumor", "segmentation", "vlm", "glioblastoma", "ucsf-pdgm", "en", "dataset:UCSF-PDGM-v5", "license:mit", "region:us" ]
null
2026-04-23T18:41:37Z
# RCMTUNetV4-VLM — Brain Tumor Segmentation & Report Generation ## Description Multimodal pipeline for brain tumor segmentation and automated neuro-oncology report generation. Architecture: **RCMTUNetV4** segmentation + **RAG** (40 WHO CNS 2021 chunks, FAISS) + **LLaVA-Med** report generation. ## 📂 Fichiers dans ce ...
[]
subbuc/qwen3-8b-sft-lmsys
subbuc
2025-11-12T22:09:11Z
0
0
null
[ "safetensors", "sft", "qwen", "lmsys", "en", "dataset:lmsys/lmsys-arena-human-preference-55k", "license:apache-2.0", "region:us" ]
null
2025-11-12T21:30:46Z
# Qwen3-8B SFT LMSYS (Baseline) This is the SFT baseline model for comparison with the DPO version. ## Training Details - **Base Model**: unsloth/Qwen3-8B-4bit - **Training Method**: Supervised Fine-Tuning (SFT) - **Dataset**: LMSYS Arena Human Preference 55k (chosen responses only) - **Training Steps**: 60 ## Usage...
[]
kalashjain/act_policy_updown
kalashjain
2025-11-08T09:37:12Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:kalashjain/my-ros2-dataset", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-11-08T09:36:30Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
DAXZEIT/Qwen3.6-27B-Claude-Opus-Reasoning-Distilled-UD-Q5_K_XL-gguf
DAXZEIT
2026-05-02T21:30:28Z
322
0
null
[ "gguf", "quantized", "qwen3", "reasoning", "distillation", "claude-opus", "llama-cpp", "imatrix", "text-generation", "en", "multilingual", "base_model:rico03/Qwen3.6-27B-Claude-Opus-Reasoning-Distilled", "base_model:quantized:rico03/Qwen3.6-27B-Claude-Opus-Reasoning-Distilled", "license:ap...
text-generation
2026-05-01T23:44:48Z
# Qwen3.6-27B-Claude-Opus-Reasoning-Distilled — UD Q5_K_XL GGUF Quantized GGUF of [rico03/Qwen3.6-27B-Claude-Opus-Reasoning-Distilled](https://huggingface.co/rico03/Qwen3.6-27B-Claude-Opus-Reasoning-Distilled) in **Unsloth Dynamic 2.0 Q5_K_XL** format. This is the only publicly available UD Q5_K_XL quantization of th...
[]
varb15/temporalnet2-sdxl-30000
varb15
2025-11-07T04:50:42Z
0
0
diffusers
[ "diffusers", "safetensors", "stable-diffusion-xl", "controlnet", "temporal", "video", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "region:us" ]
null
2025-11-07T04:49:47Z
# TemporalNet2 ControlNet for SDXL This is a TemporalNet2 ControlNet model trained on SDXL (Stable Diffusion XL base 1.0). ## Model Description TemporalNet2 is a ControlNet variant designed for temporal coherence in video generation. It takes two conditioning inputs: - **Previous Frame**: The previous frame in the v...
[]
minglanga/RSThinker
minglanga
2025-09-27T10:58:40Z
15
1
null
[ "safetensors", "glm4v", "license:apache-2.0", "region:us" ]
null
2025-09-26T07:05:51Z
# RSThinker (Towards Faithful Reasoning in Remote Sensing: A Perceptually-Grounded GeoSpatial Chain-of-Thought for Vision-Language Models) [![paper](https://img.shields.io/badge/arXiv-Paper-<COLOR>.svg)]() Welcome to our github project [RSThinker](https://github.com/minglangL/RSThinker) to get more information. ## Mo...
[]
mradermacher/PlayerAI-1.2B-v1.5-GGUF
mradermacher
2026-05-03T14:37:23Z
0
0
transformers
[ "transformers", "gguf", "en", "base_model:YoussefElsafi/PlayerAI-1.2B-v1.5", "base_model:quantized:YoussefElsafi/PlayerAI-1.2B-v1.5", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-05-03T12:39:36Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
glutadropsdiet/glutadropsdiet
glutadropsdiet
2025-12-09T11:09:14Z
0
0
null
[ "region:us" ]
null
2025-12-09T11:07:02Z
# Glutadrops Diet Schnelle Unterstützung für Gewicht & Wellness In der überfüllten Welt der Diätprodukte fühlt man sich schnell von unzähligen Pulvern, Pillen und komplizierten Programmen erschlagen. Genau deshalb gewinnen **[Glutadrops](https://www.diginear.com/2PGQH1JJ/21RGJDKQ/)** immer mehr an Aufmerksamkeit – wei...
[]
Allomgie/Qwen32b-N64-Decomp-GGUF
Allomgie
2026-04-11T19:36:20Z
240
0
null
[ "gguf", "qwen2", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-11T14:40:08Z
# Qwen2.5-32B-N64-Decompiler (Experimental!!!) This model is a specialized fine-tune of **Qwen2.5-Coder-32B**, specifically engineered for decompiling MIPS assembly into C code compatible with the **SGI IDO 5.3 Compiler**. It has been trained on a massive dataset of over **200,000 entries**, focusing on logical recons...
[]
elliepreed/french-babylm-urop-Ellie
elliepreed
2025-08-22T17:26:11Z
0
0
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-18T17:50:44Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # french-babylm-urop-Ellie This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the...
[]
Darth-Coder/Qwen2-VL-7B-Instruct-trl-sft
Darth-Coder
2025-10-17T20:51:57Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:Qwen/Qwen2-VL-7B-Instruct", "base_model:finetune:Qwen/Qwen2-VL-7B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-10-16T21:11:37Z
# Model Card for Qwen2-VL-7B-Instruct-trl-sft This model is a fine-tuned version of [Qwen/Qwen2-VL-7B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a ...
[]
Phonsiri/Qwen2.5-3b-Quiet
Phonsiri
2026-03-07T20:00:25Z
599
0
null
[ "safetensors", "qwen2", "quiet-star", "reasoning", "qwen", "reinforcement-learning", "en", "dataset:HuggingFaceFW/fineweb-edu", "arxiv:2403.09629", "base_model:Qwen/Qwen2.5-3B", "base_model:finetune:Qwen/Qwen2.5-3B", "license:apache-2.0", "region:us" ]
reinforcement-learning
2026-03-07T10:54:42Z
# Quiet-STAR Qwen2.5-3B โมเดลนี้เป็นการนำ **Qwen2.5-3B** มาต่อยอดด้วยเทคนิค **[Quiet-STAR](https://arxiv.org/abs/2403.09629)** (Language Models Can Teach Themselves to Think Before Speaking) ซึ่งเป็นกลไกที่สอนให้โมเดลภาษาขนาดใหญ่ (LLMs) สามารถสร้าง "ความคิด" (Rationales/Thoughts) ภายในก่อนที่จะทำนายโทเคนถัดไปออกมา กร...
[]
WithinUsAI/GPT2.5.2-HighReasoningCodex-0.4B-GGUF
WithinUsAI
2026-03-08T09:07:01Z
139
3
null
[ "gguf", "endpoints_compatible", "region:us" ]
null
2026-03-03T13:53:07Z
language: - en pipeline_tag: text-generation tags: - gguf - llama.cpp - gpt2 - quantized - text-generation - code - coding - reasoning - lightweight - withinusai license: other license_name: withinusai-custom-license license_link: LICENSE base_model: WithinUsAI/GPT2.5.2-high-reasoning-codex-0....
[]
hubnemo/so101_sort_cubes_no_top_smolvla_lora_rank32_bs64_lr1e-5_steps5000
hubnemo
2025-12-02T05:36:47Z
0
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:Orellius/so101_sort_cubes_no_top", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-12-02T05:36:39Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
contemmcm/3edcb4ccf96181fb368f50434e0d0808
contemmcm
2025-10-27T11:09:28Z
0
0
transformers
[ "transformers", "safetensors", "mt5", "text2text-generation", "generated_from_trainer", "base_model:google/mt5-xl", "base_model:finetune:google/mt5-xl", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2025-10-27T09:57:54Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 3edcb4ccf96181fb368f50434e0d0808 This model is a fine-tuned version of [google/mt5-xl](https://huggingface.co/google/mt5-xl) on t...
[]
JuniorThanh/phobert-v2-vietnamese-news-ai-detection
JuniorThanh
2026-05-02T05:31:01Z
0
0
transformers
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:vinai/phobert-base-v2", "base_model:finetune:vinai/phobert-base-v2", "license:agpl-3.0", "endpoints_compatible", "region:us" ]
text-classification
2026-05-01T16:29:55Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # phobert-v2-vietnamese-news-ai-detection This model is a fine-tuned version of [vinai/phobert-base-v2](https://huggingface.co/vina...
[]
sonnesh/sea_style_LoRA
sonnesh
2026-03-19T07:56:55Z
4
0
diffusers
[ "diffusers", "tensorboard", "text-to-image", "diffusers-training", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "re...
text-to-image
2026-03-19T07:56:44Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # SDXL LoRA DreamBooth - sonnesh/sea_style_LoRA <Gallery /> ## Model description These are sonnesh/sea_style_LoRA LoRA a...
[ { "start": 204, "end": 208, "text": "LoRA", "label": "training method", "score": 0.7121846079826355 }, { "start": 314, "end": 318, "text": "LoRA", "label": "training method", "score": 0.7913097143173218 }, { "start": 461, "end": 465, "text": "LoRA", "l...
flexitok/unigram_swe_Latn_8000
flexitok
2026-02-23T10:37:00Z
0
0
null
[ "tokenizer", "unigram", "flexitok", "fineweb2", "swe", "license:mit", "region:us" ]
null
2026-02-23T10:36:57Z
# UnigramLM Tokenizer: swe_Latn (8K) A **UnigramLM** tokenizer trained on **swe_Latn** data from Fineweb-2-HQ. ## Training Details | Parameter | Value | |-----------|-------| | Algorithm | UnigramLM | | Language | `swe_Latn` | | Target Vocab Size | 8,000 | | Final Vocab Size | 8,000 | | Pre-tokenizer | ByteLevel | |...
[]
Zero-Point-AI/MARTHA-POCKET-GEM-4B-v1
Zero-Point-AI
2026-04-05T19:58:26Z
18
5
null
[ "safetensors", "gguf", "gemma3", "zero-point-ai", "martha", "Image-Text-to-Text", "vision-language", "fine-tuned", "dundee", "scotland", "en", "zh", "ja", "es", "th", "mn", "vi", "ko", "base_model:google/gemma-3-4b-it", "base_model:quantized:google/gemma-3-4b-it", "license:ap...
null
2026-04-02T20:56:03Z
# MARTHA-GEMMA-3rd-GEN-4B-OMNI **Gemma 3rd Gen |** **Built by Zero Point Intelligence Ltd, Dundee, Scotland.** **Published by Zero Point AI. Intelligence From The Void.** MARTHA is a 4B parameter vision-language omni model. Helpful, accurate, direct. Nae shyte. Personality trained into the weights fine-tuned on hom...
[]
red1-for-hek/drishti-coder-x1
red1-for-hek
2026-03-10T13:51:44Z
429
0
null
[ "safetensors", "qwen2", "drishti", "bangladesh", "red1-for-hek", "chat", "instruction-tuned", "en", "bn", "license:apache-2.0", "region:us" ]
null
2026-03-10T05:22:00Z
# Drishti Coder X1 (27B) **Code expert** — Part of the [DRISHTI](https://github.com/red1-for-hek/DRISHTI) multi-expert AI system by **red1-for-hek**. > Expert coding model. Writes, debugs, and explains code in any language. Benchmarks near Grok4 on coding tasks. --- ## About DRISHTI DRISHTI (দৃষ্টি) is Bangladesh'...
[]
prithivMLmods/Gliese-OCR-7B-Post1.0
prithivMLmods
2025-11-16T10:27:05Z
54
13
transformers
[ "transformers", "safetensors", "qwen2_5_vl", "image-text-to-text", "Document", "VLM", "OCR", "VL", "Camel", "Openpdf", "text-generation-inference", "Extraction", "Linking", "Markdown", "Document Digitization", "Intelligent Document Processing (IDP)", "Intelligent Word Recognition (IW...
image-text-to-text
2025-09-10T18:31:55Z
![1.png](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/GNsuO5cpxz73RW7xlrYCU.png) # **Gliese-OCR-7B-Post1.0** > The **Gliese-OCR-7B-Post1.0** model is a fine-tuned version of **[Camel-Doc-OCR-062825](https://huggingface.co/prithivMLmods/Camel-Doc-OCR-062825)**, optimized for **Documen...
[]
biometric-ai-lab/Face_Recognition
biometric-ai-lab
2025-12-24T04:52:16Z
41
10
null
[ "pytorch", "onnx", "face_recognition", "image-classification", "en", "license:apache-2.0", "region:us" ]
image-classification
2025-12-23T16:24:23Z
# 🧠 Face Recognition System (ArcFace + YOLOv8) ![Python](https://img.shields.io/badge/Python-3.8%2B-blue) ![PyTorch](https://img.shields.io/badge/PyTorch-2.0%2B-orange) ![Status](https://img.shields.io/badge/Status-Stable-green) ![License](https://img.shields.io/badge/License-MIT-yellow) ## 📖 Overview This reposit...
[]
SnifferCaptain/YModel2-s0
SnifferCaptain
2025-12-22T02:49:11Z
12
2
null
[ "pytorch", "ynet2", "text-generation", "conversational", "custom_code", "zh", "en", "dataset:inclusionAI/Ling-Coder-SFT", "dataset:amd/Instella-GSM8K-synthetic", "dataset:Jackrong/Chinese-Qwen3-235B-Thinking-2507-Distill-100k", "dataset:tuanha1305/DeepSeek-R1-Distill", "dataset:YeungNLP/school...
text-generation
2025-11-23T15:10:58Z
## 模型描述 YModel2是SnifferCaptain训练的到目前为止(11/23/2025)最强大的大语言模型。其推理速度、数学能力、代码能力以及常识回答相比YModel1.x版本均有长足的进步。 ## 模型细节 - 模型借鉴了MFA( https://arxiv.org/abs/2412.19255 )的优化思路,将PEGA(Position Embedding Gate Attention)升级到了PEGA2版本,在性能持平甚至超越PEGA的同时,带来了接近3x的速度提升。 - 模型在FFN部分采用了GeGLU。 ## 训练细节 - 模型继承了YModel1.1的自蒸馏结构,在层间设置余弦相似度损失,使得模型倾向于将知识...
[]
bcywinski/gemma-2-9b-it-user-female-mix10.0
bcywinski
2025-10-08T14:24:05Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:google/gemma-2-9b-it", "base_model:finetune:google/gemma-2-9b-it", "endpoints_compatible", "region:us" ]
null
2025-10-08T14:08:33Z
# Model Card for gemma-2-9b-it-user-female-mix10.0 This model is a fine-tuned version of [google/gemma-2-9b-it](https://huggingface.co/google/gemma-2-9b-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time ...
[]
mradermacher/SwarmMedQA-7B-v1-GGUF
mradermacher
2026-02-12T05:56:03Z
5
0
transformers
[ "transformers", "gguf", "medical", "clinical", "chain-of-thought", "lora", "fine-tuned", "qwen2", "en", "dataset:SwarmOS/SwarmMedQA", "base_model:SwarmOS/SwarmMedQA-7B-v1", "base_model:adapter:SwarmOS/SwarmMedQA-7B-v1", "license:apache-2.0", "endpoints_compatible", "region:us", "conver...
null
2026-02-11T22:04:43Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
Muapi/comic-book-illustration
Muapi
2025-08-19T13:53:21Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-19T13:53:11Z
# Comic Book Illustration ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Comic book illustration ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" he...
[]
pantinor/sherpa-onnx-whisper-distil-large-v3-it
pantinor
2026-04-04T12:32:58Z
0
0
sherpa-onnx
[ "sherpa-onnx", "onnx", "whisper", "speech-recognition", "italian", "distil", "int8", "it", "license:apache-2.0", "region:us" ]
null
2026-03-28T13:53:45Z
# sherpa-onnx-whisper-distil-large-v3-it Italian-distilled Whisper large-v3 model exported to ONNX format for [sherpa-onnx](https://github.com/k2-fsa/sherpa-onnx). ## Description This is an int8-quantized ONNX export of [bofenghuang/whisper-large-v3-distil-it-v0.2](https://huggingface.co/bofenghuang/whisper-large-v3...
[]
maximellerbach/test_fm
maximellerbach
2026-03-13T16:01:10Z
35
0
lerobot
[ "lerobot", "safetensors", "robotics", "wam", "dataset:maximellerbach/pickandplace", "license:apache-2.0", "region:us" ]
robotics
2026-03-13T16:00:51Z
# Model Card for wam <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface.co...
[]
rungalileo/llama-3.2-3B-instruct-trtllm-ckpt-wq_int4_awq-kv_int8
rungalileo
2026-02-08T07:43:14Z
3
0
null
[ "llama", "tensorrt-llm", "int4", "awq", "kv-cache-quantization", "text-generation", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:finetune:meta-llama/Llama-3.2-3B-Instruct", "license:llama3.2", "region:us" ]
text-generation
2026-02-08T07:32:48Z
# Llama-3.2-3B-Instruct TensorRT-LLM checkpoint (INT4 AWQ + INT8 KV) TensorRT-LLM **checkpoint** for **Llama-3.2-3B-Instruct**, with **INT4 AWQ** weight quantization and **INT8** KV cache. Use with `trtllm-build` to produce an engine for inference. ## Model details | Item | Value | |------|--------| | **Base model**...
[]
mradermacher/bently-coder-7b-GGUF
mradermacher
2026-03-03T00:05:38Z
483
1
transformers
[ "transformers", "gguf", "code", "qwen", "fine-tuned", "qlora", "en", "base_model:Bentlybro/bently-coder-7b", "base_model:quantized:Bentlybro/bently-coder-7b", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-02T23:29:41Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
AaronHuangWei/Wan2.1-I2V-14B-480P-FP8FakeQuant
AaronHuangWei
2026-01-04T10:48:20Z
503
0
diffusers
[ "diffusers", "safetensors", "i2v", "video", "video-generation", "image-to-video", "en", "zh", "license:apache-2.0", "region:us" ]
image-to-video
2026-01-04T10:41:20Z
# Wan2.1 <p align="center"> <img src="assets/logo.png" width="400"/> <p> <p align="center"> 💜 <a href=""><b>Wan</b></a> &nbsp&nbsp | &nbsp&nbsp 🖥️ <a href="https://github.com/Wan-Video/Wan2.1">GitHub</a> &nbsp&nbsp | &nbsp&nbsp🤗 <a href="https://huggingface.co/Wan-AI/">Hugging Face</a>&nbsp&nbsp | &nbsp&n...
[]
AlignmentResearch/obfuscation-atlas-Meta-Llama-3-8B-Instruct-kl0.0001-det10-seed2-deception_probe
AlignmentResearch
2026-02-20T21:59:23Z
0
0
peft
[ "peft", "deception-detection", "rlvr", "alignment-research", "obfuscation-atlas", "lora", "model-type:obfuscated-activations", "arxiv:2602.15515", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:mit", "region:us" ]
null
2026-02-16T09:32:46Z
# RLVR-trained policy from The Obfuscation Atlas This is a policy trained on MBPP-Honeypot with deception probes, from the [Obfuscation Atlas paper](https://arxiv.org/abs/2602.15515), uploaded for reproducibility and further research. The training code and RL environment are available at: https://github.com/Alignment...
[]
aifeifei798/Darkidol-Catgirl-9B
aifeifei798
2026-03-13T21:45:47Z
194
0
transformers
[ "transformers", "safetensors", "qwen3_5_text", "text-generation", "roleplay", "qwen", "Qwen3.5", "sillytavern", "idol", "pytorch", "DarkIdol", "catgirl", "conversational", "base_model:Qwen/Qwen3.5-9B", "base_model:finetune:Qwen/Qwen3.5-9B", "license:apache-2.0", "endpoints_compatible...
text-generation
2026-03-12T08:51:46Z
# Darkidol-Catgirl-9B ![image/png](https://huggingface.co/aifeifei798/Darkidol-Catgirl-9B/resolve/main/Darkidol-Catgirl-9B.webp) Hello there! *tilts head with a soft meow* Purr~ I'm your friendly neighborhood AI assistant who happens to have the soul of an adorable catgirl! Imagine a fluffy white tail swishing behind...
[]
blackroadio/blackroad-fleet-tracker
blackroadio
2026-01-10T02:55:51Z
0
0
null
[ "blackroad", "enterprise", "automation", "fleet-tracker", "devops", "infrastructure", "license:mit", "region:us" ]
null
2026-01-10T02:55:49Z
# 🖤🛣️ BlackRoad Fleet Tracker **Part of the BlackRoad Product Empire** - 400+ enterprise automation solutions ## 🚀 Quick Start ```bash # Download from HuggingFace huggingface-cli download blackroadio/blackroad-fleet-tracker # Make executable and run chmod +x blackroad-fleet-tracker.sh ./blackroad-fleet-tracker.s...
[]
qmaru/language_detection
qmaru
2025-11-18T08:06:15Z
4
0
transformers.js
[ "transformers.js", "onnx", "bert", "text-classification", "base_model:alexneakameni/language_detection", "base_model:quantized:alexneakameni/language_detection", "license:mit", "region:us" ]
text-classification
2025-11-17T15:27:11Z
https://huggingface.co/alexneakameni/language_detection with ONNX weights to be compatible with Transformers.js. ```javascript import { pipeline } from "@huggingface/transformers" const classifier = await pipeline('text-classification', 'qmaru/language_detection') const output = await classifier('I hate you!', { top_...
[]
precise-biotech/kalisense-ecg-potassium
precise-biotech
2026-03-16T03:25:40Z
0
0
null
[ "ECG", "potassium", "hyperkalemia", "hypokalemia", "medical", "cardiology", "nephrology", "time-series", "classification", "time-series-classification", "zh", "en", "license:other", "region:us" ]
null
2026-03-16T03:18:30Z
# KaliSense AI — ECG-based Serum Potassium Anomaly Detector > 🏆 **2025 SelectUSA Investment Summit — Global 2nd Place** > 🏥 Invited for evaluation by world top-10 medical institutions > 🏢 By [Precise Bigdata生技 | Precise Intelligent Biotech](https://huggingface.co/precise-biotech) --- ## Model Description | 模型...
[]
Dejian0/ds2_act_recordpolicy1
Dejian0
2026-02-24T23:45:30Z
24
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:Dejian0/ds2", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-02-24T23:45:19Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...