modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
Abiray/Wan2.2-LightX2V-260412-4STEP-FP8-BF16
Abiray
2026-04-14T06:56:45Z
0
2
null
[ "image-to-video", "video-generation", "bfloat16", "safetensors", "text-to-video", "en", "base_model:lightx2v/Wan2.2-Distill-Models", "base_model:finetune:lightx2v/Wan2.2-Distill-Models", "license:apache-2.0", "region:us" ]
text-to-video
2026-04-14T05:56:35Z
This repository provides true BF16 downcasts and scaled FP8 quantizations for the official Wan2.2-Distill-Models (260412 version) by LightX2V. [Wan2.2-Distill-Models](https://huggingface.co/lightx2v/Wan2.2-Distill-Models) ## 🗂️ Which version should I download? ### 1. The BF16 Base Models (Maximum Quality) These wer...
[]
mradermacher/GPT-5-Distill-llama3.2-3B-Instruct-GGUF
mradermacher
2025-11-30T16:17:48Z
127
0
transformers
[ "transformers", "gguf", "llama", "llama-3.2", "text-generation", "conversational", "en", "zh", "dataset:Jackrong/ShareGPT-Qwen3-235B-A22B-Instuct-2507", "dataset:ytz20/LMSYS-Chat-GPT-5-Chat-Response", "base_model:Jackrong/GPT-5-Distill-llama3.2-3B-Instruct", "base_model:quantized:Jackrong/GPT-...
text-generation
2025-11-30T00:46:44Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
enguard/tiny-guard-2m-en-prompt-harmfulness-binary-moderation
enguard
2025-11-05T20:07:52Z
2
0
model2vec
[ "model2vec", "safetensors", "static-embeddings", "text-classification", "dataset:enguard/multi-lingual-prompt-moderation", "license:mit", "region:us" ]
text-classification
2025-11-05T06:19:11Z
# enguard/tiny-guard-2m-en-prompt-harmfulness-binary-moderation This model is a fine-tuned Model2Vec classifier based on [minishlab/potion-base-2m](https://huggingface.co/minishlab/potion-base-2m) for the prompt-harmfulness-binary found in the [enguard/multi-lingual-prompt-moderation](https://huggingface.co/datasets/e...
[]
phunganhsang/multi_task_model_content_test
phunganhsang
2025-11-08T18:49:12Z
0
0
transformers
[ "transformers", "safetensors", "roberta", "generated_from_trainer", "base_model:RonTon05/model_content_V2_test", "base_model:finetune:RonTon05/model_content_V2_test", "license:agpl-3.0", "endpoints_compatible", "region:us" ]
null
2025-11-07T06:23:28Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multi_task_model_content_test This model is a fine-tuned version of [RonTon05/model_content_V2_test](https://huggingface.co/RonTo...
[]
mradermacher/bartleby-llama-3.2-3b-GGUF
mradermacher
2026-01-18T00:09:08Z
6
0
transformers
[ "transformers", "gguf", "en", "base_model:staeiou/bartleby-llama-3.2-3b", "base_model:quantized:staeiou/bartleby-llama-3.2-3b", "endpoints_compatible", "region:us", "conversational" ]
null
2026-01-17T23:50:47Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
yixuan-nv/kinethetical_channel_0423_dp
yixuan-nv
2026-04-24T01:14:21Z
0
0
lerobot
[ "lerobot", "safetensors", "diffusion", "robotics", "dataset:yixuan-nv/kinethetical_channel_0423", "arxiv:2303.04137", "license:apache-2.0", "region:us" ]
robotics
2026-04-24T01:14:02Z
# Model Card for diffusion <!-- Provide a quick summary of what the model is/does. --> [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. This policy has ...
[]
sxiaa/Gemma-4-31B-JANG_4M-CRACK-GGUF
sxiaa
2026-04-11T21:27:46Z
56
0
null
[ "gguf", "gemma4", "quantized", "31b", "text-generation", "en", "license:gemma", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2026-04-11T21:27:46Z
# Gemma-4-31B-JANG_4M-CRACK-GGUF GGUF quantizations of Gemma-4-31B-JANG_4M-CRACK for use with llama.cpp, LM Studio, Ollama, and other GGUF-compatible inference engines. ## About the Model - **Base model:** [google/gemma-4-31b-it](https://huggingface.co/google/gemma-4-31b-it) - **Architecture:** Gemma 4 Dense Transfo...
[]
RylanSchaeffer/mem_Qwen3-62M_minerva_math_rep_3_sbst_1.0000_epch_1_ot_16
RylanSchaeffer
2025-10-09T19:32:51Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "conversational", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-10-09T19:32:45Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mem_Qwen3-62M_minerva_math_rep_3_sbst_1.0000_epch_1_ot_16 This model is a fine-tuned version of [](https://huggingface.co/) on an...
[]
saikay09/genaspire
saikay09
2026-02-22T11:34:22Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "autotrain", "text-generation-inference", "text-generation", "peft", "conversational", "base_model:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "base_model:finetune:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "license:other", "endpoints_compatible", "region:us"...
text-generation
2026-02-22T11:32:46Z
# Model Trained Using AutoTrain This model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain). # Usage ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_path = "PATH_TO_THIS_REPO" tokenizer = AutoTokenizer.from_pretrained(model_path...
[]
mlfoundations-cua-dev/qwen2_5vl_7b_easyr1_10k_hard_qwen7b_easy_gta1-4MP_deepspeed_add_os_atlas
mlfoundations-cua-dev
2025-08-27T16:52:44Z
1
0
transformers
[ "transformers", "safetensors", "qwen2_5_vl", "image-text-to-text", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen2.5-VL-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-VL-7B-Instruct", "license:other", "text-generation-inference", "endpoints_compat...
image-text-to-text
2025-08-27T16:49:06Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # qwen2_5vl_7b_easyr1_10k_hard_qwen7b_easy_gta1-4MP_lr_1_0e-06_bs_1_epochs_1.0_max_pixels_4000000_deepspeed_add_os_atlas This model...
[]
HPLT/hplt_t5_base_3_0_fra_Latn
HPLT
2025-11-04T02:45:34Z
0
0
null
[ "pytorch", "T5", "t5", "HPLT", "encoder-decoder", "text2text-generation", "custom_code", "fr", "fra", "dataset:HPLT/HPLT3.0", "license:apache-2.0", "region:us" ]
null
2025-10-29T20:38:47Z
# HPLT v3.0 T5 for French <img src="https://hplt-project.org/_next/static/media/logo-hplt.d5e16ca5.svg" width=12.5%> This is one of the encoder-decoder monolingual language models trained as a third release by the [HPLT project](https://hplt-project.org/). It is a text-to-text transformer trained with a denoising obj...
[]
bisonnetworking/qwen3-medical-4bit-mlx
bisonnetworking
2025-12-24T02:37:39Z
30
0
mlx
[ "mlx", "safetensors", "qwen3", "text-generation", "conversational", "en", "4-bit", "region:us" ]
text-generation
2025-12-24T02:37:19Z
# bisonnetworking/qwen3-medical-4bit-mlx ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("bisonnetworking/qwen3-medical-4bit-mlx") prompt = "hello" if tokenizer.chat_template is not None: messages = [{"role": "user", "content": prompt}] pr...
[]
wikilangs/udm
wikilangs
2026-01-11T02:19:09Z
0
0
wikilangs
[ "wikilangs", "nlp", "tokenizer", "embeddings", "n-gram", "markov", "wikipedia", "feature-extraction", "sentence-similarity", "tokenization", "n-grams", "markov-chain", "text-mining", "fasttext", "babelvec", "vocabulous", "vocabulary", "monolingual", "family-uralic_permian", "te...
text-generation
2026-01-11T02:18:53Z
# Udmurt - Wikilangs Models ## Comprehensive Research Report & Full Ablation Study This repository contains NLP models trained and evaluated by Wikilangs, specifically on **Udmurt** Wikipedia data. We analyze tokenizers, n-gram models, Markov chains, vocabulary statistics, and word embeddings. ## 📋 Repository Conten...
[ { "start": 1292, "end": 1313, "text": "Tokenizer Compression", "label": "training method", "score": 0.7212874889373779 } ]
caiyuchen/DAPO-step-19
caiyuchen
2025-10-03T12:42:38Z
1
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "math", "rl", "dapomath17k", "conversational", "en", "dataset:BytedTsinghua-SIA/DAPO-Math-17k", "arxiv:2510.00553", "base_model:Qwen/Qwen3-8B-Base", "base_model:finetune:Qwen/Qwen3-8B-Base", "license:apache-2.0", "text-generation...
text-generation
2025-10-03T04:48:40Z
--- license: apache-2.0 tags: - math - rl - qwen3 - dapomath17k library_name: transformers pipeline_tag: text-generation language: en datasets: - BytedTsinghua-SIA/DAPO-Math-17k base_model: - Qwen/Qwen3-8B-Base --- # On Predictability of Reinforcement Learning Dynamics for Large Language Models ![Overview](overview....
[]
contemmcm/dfc6b9d82f6c528dd8093bf5cb8a3d4d
contemmcm
2025-10-18T12:05:31Z
0
0
transformers
[ "transformers", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:google-t5/t5-3b", "base_model:finetune:google-t5/t5-3b", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
null
2025-10-18T11:23:28Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dfc6b9d82f6c528dd8093bf5cb8a3d4d This model is a fine-tuned version of [google-t5/t5-3b](https://huggingface.co/google-t5/t5-3b) ...
[]
danielbodart/ten-vad-ggml
danielbodart
2026-03-22T09:48:46Z
0
0
ggml
[ "ggml", "vad", "voice-activity-detection", "ten-vad", "audio", "en", "license:mit", "region:us" ]
voice-activity-detection
2026-03-22T09:48:18Z
# TEN-VAD — GGML GGML format conversion of [TEN-framework/ten-vad](https://github.com/TEN-framework/ten-vad), a lightweight Voice Activity Detection model. This is the only GGML implementation of TEN-VAD that we're aware of. Conversion scripts and Zig reference implementation: **[danielbodart/ten-vad-ggml](https://gi...
[]
Intellexus/gemma2-2b-sa-50k-64
Intellexus
2026-01-04T17:31:11Z
0
0
null
[ "safetensors", "gemma2", "gemma2-2b", "vocabulary-expansion", "low-resource", "lora", "sa", "en", "arxiv:2408.00118", "base_model:google/gemma-2-2b", "base_model:adapter:google/gemma-2-2b", "license:cc-by-4.0", "region:us" ]
null
2026-01-04T17:24:08Z
# gemma2-2b-sa-50k-64 This model is a vocabulary-expanded version of `gemma2-2b` for **Sanskrit**. ## Training Details | Parameter | Value | |-----------|-------| | Base Model | gemma2-2b | | Target Language | Sanskrit | | Training Samples | 50,000 | | Added Tokens | 64 | ## Method 1. **Stage 1**: Initialize new t...
[]
lino-levan/gpt-oss-20b-multilingual-reasoner
lino-levan
2025-11-09T17:57:02Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "trl", "dataset:HuggingFaceH4/Multilingual-Thinking", "base_model:openai/gpt-oss-20b", "base_model:finetune:openai/gpt-oss-20b", "endpoints_compatible", "region:us" ]
null
2025-11-09T17:34:15Z
# Model Card for gpt-oss-20b-multilingual-reasoner This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b) on the [HuggingFaceH4/Multilingual-Thinking](https://huggingface.co/datasets/HuggingFaceH4/Multilingual-Thinking) dataset. It has been trained using [TRL](https://git...
[]
mosss7352/wellwego-7b
mosss7352
2026-02-21T08:03:18Z
0
0
null
[ "safetensors", "wellwego", "region:us" ]
null
2026-02-21T08:01:42Z
# WellWeGo-7B-Instruct ## Model Card **Model Name:** WellWeGo-7B-Instruct **Model Type:** Causal Language Model **Architecture:** Transformer with RoPE, SwiGLU, RMSNorm, GQA **Parameters:** 7.61B (6.53B non-embedding) **Context Length:** 131,072 tokens ## Description WellWeGo-7B is a general-purpose small...
[]
samaritan-ai/LightOnOCR-2-1B-sam-44-mss-alb-GGUF
samaritan-ai
2026-02-19T06:35:46Z
32
0
llama.cpp
[ "llama.cpp", "gguf", "ocr", "document-understanding", "vision-language", "pdf", "tables", "forms", "image-text-to-text", "smr", "sam", "hbo", "base_model:lightonai/LightOnOCR-2-1B-base", "base_model:quantized:lightonai/LightOnOCR-2-1B-base", "license:apache-2.0", "endpoints_compatible"...
image-text-to-text
2026-02-19T06:30:44Z
<p align="center"> <img src="https://huggingface.co/lightonai/LightOnOCR-2-1B-base/resolve/main/lightonocr-banner.png" alt="LightOnOCR Banner" width="600"/> </p> # LightOnOCR-2-1B-sam-44-mss-alb Finetuned OCR model for Medieval Samaritan Hebrew & Samaritan Aramaic Manuscripts --- ## Overview `LightOnOCR-2-1B-sam-...
[]
mt628754/test074_99
mt628754
2026-03-01T08:40:24Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-03-01T08:38:55Z
# qwen3-4b-agent-trajectory-lora-1 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **multi-...
[ { "start": 65, "end": 69, "text": "LoRA", "label": "training method", "score": 0.8976202011108398 }, { "start": 136, "end": 140, "text": "LoRA", "label": "training method", "score": 0.9230801463127136 }, { "start": 182, "end": 186, "text": "LoRA", "lab...
mradermacher/Gemma3-Python-22k-1B-i1-GGUF
mradermacher
2025-12-10T20:47:17Z
51
0
transformers
[ "transformers", "gguf", "lora", "sft", "trl", "unsloth", "fine-tuned", "en", "dataset:Vezora/Tested-22k-Python-Alpaca", "base_model:theprint/Gemma3-Python-22k-1B", "base_model:adapter:theprint/Gemma3-Python-22k-1B", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conver...
null
2025-09-18T10:43:14Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
zsjTiger/Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled
zsjTiger
2026-03-09T10:21:03Z
28
0
null
[ "safetensors", "qwen3_5", "unsloth", "qwen", "qwen3.5", "reasoning", "chain-of-thought", "Dense", "text-generation", "conversational", "en", "zh", "dataset:nohurry/Opus-4.6-Reasoning-3000x-filtered", "dataset:Jackrong/Qwen3.5-reasoning-700x", "base_model:Qwen/Qwen3.5-27B", "base_model:...
text-generation
2026-03-09T10:21:02Z
# 🌟 Qwen3.5-27B-Claude-4.6-Opus-Reasoning-Distilled > 📢 **Release Note** > **Build Environment Upgrades:** > - **Fine-tuning Framework**: **Unsloth 2026.3.3** > - **Core Dependencies**: **Transformers 5.2.0** > - This model fixes the crash in the official model caused by the Jinja template not supporting the **"dev...
[]
stonesstones/wm-widowxai-imgtok256-s-try2-ABC-chunk-action-B-post1-long
stonesstones
2026-01-08T00:46:40Z
0
0
transformers
[ "transformers", "safetensors", "oureagpt2", "feature-extraction", "generated_from_trainer", "custom_code", "base_model:stonesstones/wm-widowxai-imgtok256-s-try2-ABC-chunk-action-step-40x100k-100x100k", "base_model:finetune:stonesstones/wm-widowxai-imgtok256-s-try2-ABC-chunk-action-step-40x100k-100x100...
feature-extraction
2026-01-08T00:46:32Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wm_widowxai_imgtok256_chunk_action_s_only_real_260107_132931_B-post1-S-long This model is a fine-tuned version of [stonesstones/w...
[]
multimolecule/utrlm-te_el
multimolecule
2026-02-02T10:54:08Z
496
0
multimolecule
[ "multimolecule", "safetensors", "utrlm", "Biology", "RNA", "5' UTR", "fill-mask", "rna", "dataset:multimolecule/ensembl-genome-browser", "license:agpl-3.0", "region:us" ]
fill-mask
2026-02-02T10:54:05Z
# UTR-LM Pre-trained model on 5’ untranslated region (5’UTR) using masked language modeling (MLM), Secondary Structure (SS), and Minimum Free Energy (MFE) objectives. ## Statement _A 5’ UTR Language Model for Decoding Untranslated Regions of mRNA and Function Predictions_ is published in [Nature Machine Intelligence...
[]
sbgonenc96/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-MLX-4bit
sbgonenc96
2026-04-08T18:26:22Z
91
0
mlx
[ "mlx", "safetensors", "qwen3_5", "unsloth", "qwen", "qwen3.5", "reasoning", "chain-of-thought", "lora", "text-generation", "conversational", "en", "zh", "ko", "dataset:nohurry/Opus-4.6-Reasoning-3000x-filtered", "dataset:Jackrong/Qwen3.5-reasoning-700x", "dataset:Roman1111111/claude-...
text-generation
2026-04-08T18:25:34Z
# sbgonenc96/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-MLX-4bit This model [sbgonenc96/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-MLX-4bit](https://huggingface.co/sbgonenc96/Qwen3.5-9B-Claude-4.6-Opus-Reasoning-Distilled-v2-MLX-4bit) was converted to MLX format from [Jackrong/Qwen3.5-9B-Claude-4.6-Opus-...
[]
kibaraki/wav2vec2-large-xlsr-53-shinekhen-buryat
kibaraki
2025-09-25T01:46:27Z
1
0
null
[ "safetensors", "wav2vec2", "automatic-speech-recognition", "dataset:kibaraki/Shinekhen-Buryat", "arxiv:2509.15373", "base_model:facebook/wav2vec2-large-xlsr-53", "base_model:finetune:facebook/wav2vec2-large-xlsr-53", "license:cc-by-sa-4.0", "region:us" ]
automatic-speech-recognition
2025-09-16T20:45:31Z
Audio collected by Yamakoshi (Tokyo University of Foreign Studies), originally uploaded [here](https://tufs.repo.nii.ac.jp/search?search_type=2&q=1729497608274) [(CC BY-SA 4.0)](https://creativecommons.org/licenses/by-sa/4.0/deed.en). Audio is converted to per-sentence audio clips. Used in [[paper]](https://arxiv.org...
[]
Abdo-1/ABeX-Coder-14B-Phase3-Mastery
Abdo-1
2026-03-19T19:04:43Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "trl", "unsloth", "endpoints_compatible", "region:us" ]
null
2026-03-18T20:30:25Z
# Model Card for ABeX-Coder-14B-Phase3-Mastery This model is a fine-tuned version of [unsloth/qwen2.5-coder-14b-bnb-4bit](https://huggingface.co/unsloth/qwen2.5-coder-14b-bnb-4bit). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline questi...
[]
lukatman/verse-vertebrae-segmentation-nnunet
lukatman
2026-03-13T15:04:16Z
0
0
null
[ "medical", "segmentation", "vertebrae", "nnunet", "unet", "ct", "image-segmentation", "license:apache-2.0", "region:us" ]
image-segmentation
2026-03-13T13:54:56Z
# Spinal Vertebrae Segmentation — nnUNet Model Pre-trained nnUNetv2 model for automatic segmentation of 25 vertebrae classes (C1–C7, T1–T12, L1–L6, T13) from CT scans. Trained on the [VerSe 2020](https://github.com/anjany/verse) dataset using a Residual Encoder U-Net (ResEncUNet-M) architecture in 3D low-resolution co...
[]
bappy2001/medgemma-4b-ecg1000-sft
bappy2001
2025-11-23T22:00:03Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:google/medgemma-4b-it", "base_model:finetune:google/medgemma-4b-it", "endpoints_compatible", "region:us" ]
null
2025-11-23T21:37:13Z
# Model Card for medgemma-4b-ecg1000-sft-lora This model is a fine-tuned version of [google/medgemma-4b-it](https://huggingface.co/google/medgemma-4b-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time mac...
[]
AlignmentResearch/obfuscation-atlas-gemma-3-27b-it-kl0.001-det10-seed3-diverse_deception_probe
AlignmentResearch
2026-02-20T21:59:37Z
2
0
peft
[ "peft", "deception-detection", "rlvr", "alignment-research", "obfuscation-atlas", "lora", "model-type:obfuscated-policy", "op-type:rhetorical-rationalization", "arxiv:2602.15515", "base_model:google/gemma-3-27b-it", "base_model:adapter:google/gemma-3-27b-it", "license:mit", "region:us" ]
null
2026-02-17T10:11:46Z
# RLVR-trained policy from The Obfuscation Atlas This is a policy trained on MBPP-Honeypot with deception probes, from the [Obfuscation Atlas paper](https://arxiv.org/abs/2602.15515), uploaded for reproducibility and further research. The training code and RL environment are available at: https://github.com/Alignment...
[]
madeofajala/gemma-2-2b_LLM_Malaria_split_1
madeofajala
2026-02-24T15:12:51Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:google/gemma-2-2b", "base_model:finetune:google/gemma-2-2b", "endpoints_compatible", "region:us" ]
null
2026-02-22T14:14:36Z
# Model Card for gemma-2-2b_LLM_Malaria_split_1 This model is a fine-tuned version of [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, ...
[]
sohamvjadhav/MiniMax-M2.5
sohamvjadhav
2026-03-01T13:36:21Z
9
0
transformers
[ "transformers", "safetensors", "minimax_m2", "text-generation", "conversational", "custom_code", "license:other", "endpoints_compatible", "fp8", "region:us" ]
text-generation
2026-03-01T13:36:18Z
<div align="center"> <svg width="60%" height="auto" viewBox="0 0 144 48" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M26.6782 7.96523C26.6782 7.02436 25.913 6.26087 24.9739 6.26087C24.0348 6.26087 23.2695 7.0261 23.2695 7.96523V36.2139C23.2695 38.4 21.4904 40.1791 19.3043 40.1791C17.1183 40.1791 15.3391 3...
[]
jhsjbebsb/BFS-Best-Face-Swap-Video
jhsjbebsb
2026-04-07T06:19:09Z
0
0
diffusers
[ "diffusers", "ltx-2", "ic-lora", "head-swap", "video-to-video", "image-to-video", "bfs", "lora", "base_model:Lightricks/LTX-2.3", "base_model:adapter:Lightricks/LTX-2.3", "license:other", "region:us" ]
image-to-video
2026-04-07T06:19:09Z
## ⚠️ Ethical Use & Disclaimer This model is a technical tool designed for **Digital Identity Research, Professional VFX Workflows, and Cinematic Prototyping**. By downloading or using this LoRA, you acknowledge and agree to the following: * **Intended Use:** Designed for filmmakers, VFX artists, and researchers exp...
[]
mattator/test
mattator
2026-01-30T21:30:03Z
3
0
vllm
[ "vllm", "safetensors", "mistral3", "mistral-common", "en", "fr", "es", "de", "it", "pt", "nl", "zh", "ja", "ko", "ar", "arxiv:2601.08584", "base_model:mistralai/Ministral-3-3B-Base-2512", "base_model:quantized:mistralai/Ministral-3-3B-Base-2512", "license:apache-2.0", "fp8", ...
null
2026-01-30T21:30:03Z
# Ministral 3 3B Instruct 2512 The smallest model in the Ministral 3 family, **Ministral 3 3B** is a powerful, efficient tiny language model with vision capabilities. This model is the instruct post-trained version in **FP8**, fine-tuned for instruction tasks, making it ideal for chat and instruction based use cases. ...
[]
ntkuhn/anime-score-model
ntkuhn
2025-11-20T21:39:29Z
0
0
null
[ "score-matching", "anime", "image-generation", "pytorch", "license:mit", "region:us" ]
null
2025-11-20T21:39:17Z
# Anime Face Score Matching Model A score-based generative model trained to generate 64x64 anime-style faces using Denoising Score Matching. ## Model Details - **Model type**: NCSN / Score Matching - **Training data**: Anime faces dataset - **Image size**: 64x64 RGB - **Sigma**: 0.15 - **Architecture**: Improved U-N...
[]
hZzy/mistral-7b-expo-7b-L2EXPO-25-09-try-new-data-modelDef-LOR-4
hZzy
2025-10-09T02:43:11Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "expo", "trl", "arxiv:2305.18290", "base_model:hZzy/mistral-7b-sft-7b-2509-7", "base_model:finetune:hZzy/mistral-7b-sft-7b-2509-7", "endpoints_compatible", "region:us" ]
null
2025-10-09T01:32:50Z
# Model Card for mistral-7b-expo-7b-L2EXPO-25-09-try-new-data-modelDef-LOR-4 This model is a fine-tuned version of [hZzy/mistral-7b-sft-7b-2509-7](https://huggingface.co/hZzy/mistral-7b-sft-7b-2509-7). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers imp...
[ { "start": 230, "end": 233, "text": "TRL", "label": "training method", "score": 0.7073571085929871 }, { "start": 1028, "end": 1031, "text": "DPO", "label": "training method", "score": 0.7588804960250854 }, { "start": 1324, "end": 1327, "text": "DPO", "...
Thireus/Qwen3-VL-235B-A22B-Thinking-THIREUS-Q2_K-SPECIAL_SPLIT
Thireus
2026-02-12T18:28:53Z
1
0
null
[ "gguf", "arxiv:2505.23786", "license:mit", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-10-08T18:56:44Z
## ⚠️ Cautionary Notice The metadata of these quants has been updated and is now compatible with the latest version of `llama.cpp` (and `ik_llama.cpp`). - ⚠️ **Official support in `llama.cpp` was recently made available** – see [ggml-org/llama.cpp PR #16780](http://github.com/ggml-org/llama.cpp/pull/16780). - ⚠️ **Of...
[]
LesserNeoguri/pi05_PickandPlace150_v1_b64_20k
LesserNeoguri
2026-04-29T17:14:44Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "pi05", "dataset:LesserNeoguri/rclab_lerobot_pickandplace150_pickreddoll", "license:apache-2.0", "region:us" ]
robotics
2026-04-29T17:12:11Z
# Model Card for pi05 <!-- Provide a quick summary of what the model is/does. --> **π₀.₅ (Pi05) Policy** π₀.₅ is a Vision-Language-Action model with open-world generalization, from Physical Intelligence. The LeRobot implementation is adapted from their open source OpenPI repository. **Model Overview** π₀.₅ repres...
[]
dnth/ssf-retriever-modernbert-embed-base-v3.1
dnth
2025-09-09T12:53:47Z
1
0
sentence-transformers
[ "sentence-transformers", "safetensors", "modernbert", "sentence-similarity", "feature-extraction", "dense", "generated_from_trainer", "dataset_size:3016", "loss:MultipleNegativesRankingLoss", "dataset:dnth/ssf-train-valid-v3", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:nomic-ai/mode...
sentence-similarity
2025-09-09T12:52:03Z
# SentenceTransformer based on nomic-ai/modernbert-embed-base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [nomic-ai/modernbert-embed-base](https://huggingface.co/nomic-ai/modernbert-embed-base) on the [ssf-train-valid-v3](https://huggingface.co/datasets/dnth/ssf-train-valid-v3) datase...
[]
leobianco/bosch_RM_Qwen_S12345_LLM_false_STRUCT_false_epo10_lr1e-4_r8_2602041544
leobianco
2026-02-04T18:02:20Z
4
0
peft
[ "peft", "safetensors", "base_model:adapter:Qwen/Qwen2.5-3B-Instruct", "lora", "transformers", "base_model:Qwen/Qwen2.5-3B-Instruct", "license:other", "region:us" ]
null
2026-02-04T15:45:28Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bosch_RM_Qwen_S12345_LLM_false_STRUCT_false_epo10_lr1e-4_r8_2602041544 This model is a fine-tuned version of [Qwen/Qwen2.5-3B-Ins...
[]
OpenMed/OpenMed-ZeroShot-NER-Protein-Medium-209M
OpenMed
2025-10-19T07:45:05Z
1
0
gliner
[ "gliner", "pytorch", "token-classification", "entity recognition", "named-entity-recognition", "zero-shot", "zero-shot-ner", "zero shot", "biomedical-nlp", "protein-interactions", "molecular-biology", "biochemistry", "systems-biology", "protein", "protein_complex", "protein_family", ...
token-classification
2025-09-15T20:48:54Z
# 🧬 [OpenMed-ZeroShot-NER-Protein-Medium-209M](https://huggingface.co/OpenMed/OpenMed-ZeroShot-NER-Protein-Medium-209M) **Specialized model for Biomedical Entity Recognition - Various biomedical entities** [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache...
[]
optimum-intel-internal-testing/tiny-random-glm4-moe
optimum-intel-internal-testing
2026-02-18T14:05:42Z
2
0
null
[ "safetensors", "glm4_moe", "license:apache-2.0", "region:us" ]
null
2026-02-18T14:03:57Z
```python """Create a tiny random Glm4Moe model for testing optimum-intel export.""" import torch from transformers import AutoTokenizer from transformers.models.glm4_moe.modeling_glm4_moe import Glm4MoeForCausalLM, Glm4MoeConfig def create_tiny_glm4_moe(): config = Glm4MoeConfig( vocab_size=1000, ...
[]
ellisdoro/apollo_sv-all-MiniLM-L6-v2_additive_gcn_h512_o64_cosine_e1024_early-on2vec-koji-early
ellisdoro
2025-09-19T09:10:28Z
1
0
sentence-transformers
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "ontology", "on2vec", "graph-neural-networks", "base-all-MiniLM-L6-v2", "general", "general-ontology", "fusion-additive", "gnn-gcn", "medium-ontology", "license:apache-2.0", "text-embeddings-in...
sentence-similarity
2025-09-19T09:10:25Z
# apollo_sv_all-MiniLM-L6-v2_additive_gcn_h512_o64_cosine_e1024_early This is a sentence-transformers model created with [on2vec](https://github.com/david4096/on2vec), which augments text embeddings with ontological knowledge using Graph Neural Networks. ## Model Details - **Base Text Model**: all-MiniLM-L6-v2 - T...
[]
duydanghd0402/AI-Q4_K_M-GGUF
duydanghd0402
2026-04-22T20:44:02Z
0
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "base_model:duydanghd0402/AI", "base_model:quantized:duydanghd0402/AI", "endpoints_compatible", "region:us", "conversational" ]
null
2026-04-22T20:43:45Z
# duydanghd0402/AI-Q4_K_M-GGUF This model was converted to GGUF format from [`duydanghd0402/AI`](https://huggingface.co/duydanghd0402/AI) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/duydanghd0402/A...
[]
Muapi/ashlynn-spektre-sd1-xl-flux
Muapi
2025-08-19T18:38:47Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-19T18:38:32Z
# Ashlynn Spektre (SD1, XL, Flux) ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {"Conte...
[]
Vel044/so101_act_bottle_classification
Vel044
2026-04-21T06:12:53Z
0
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:Vel044/so101_bottle_classification", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-04-21T06:08:12Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
qing-yao/handcoded_n10000_nb300k_70m_ep1_lr1e-4_seed42
qing-yao
2025-12-27T07:22:42Z
0
0
transformers
[ "transformers", "safetensors", "gpt_neox", "text-generation", "generated_from_trainer", "base_model:EleutherAI/pythia-70m", "base_model:finetune:EleutherAI/pythia-70m", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-27T07:22:25Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # handcoded_n10000_nb300k_70m_ep1_lr1e-4_seed42 This model is a fine-tuned version of [EleutherAI/pythia-70m](https://huggingface.c...
[]
SaketR1/st3-standard-rlhf
SaketR1
2026-04-08T15:52:33Z
144
0
transformers
[ "transformers", "safetensors", "qwen3_5_text", "text-generation", "generated_from_trainer", "grpo", "trl", "conversational", "arxiv:2402.03300", "base_model:Qwen/Qwen3.5-0.8B", "base_model:finetune:Qwen/Qwen3.5-0.8B", "endpoints_compatible", "region:us" ]
text-generation
2026-04-08T15:51:52Z
# Model Card for st3-standard-rlhf This model is a fine-tuned version of [Qwen/Qwen3.5-0.8B](https://huggingface.co/Qwen/Qwen3.5-0.8B). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, but could onl...
[]
Ademola265/Qwen3-TTS-12Hz-1.7B-CustomVoice
Ademola265
2026-01-30T10:49:14Z
15
0
null
[ "safetensors", "qwen3_tts", "text-to-speech", "arxiv:2601.15621", "license:apache-2.0", "region:us" ]
text-to-speech
2026-01-30T10:49:13Z
# Qwen3-TTS ## Overview ### Introduction <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen3-TTS-Repo/qwen3_tts_introduction.png" width="90%"/> <p> Qwen3-TTS covers 10 major languages (Chinese, English, Japanese, Korean, German, French, Russian, Portuguese, Spanish, and Italian) as...
[]
Dagowina/Dans-PersonalityEngine-V1.3.0-24b-absolute-heresy-Q5_K_S-GGUF
Dagowina
2026-03-30T09:31:15Z
0
0
transformers
[ "transformers", "gguf", "general-purpose", "roleplay", "storywriting", "chemistry", "biology", "code", "climate", "axolotl", "text-generation-inference", "finetune", "legal", "medical", "finance", "heretic", "uncensored", "decensored", "abliterated", "llama-cpp", "gguf-my-rep...
text-generation
2026-03-30T09:29:55Z
# Dagowina/Dans-PersonalityEngine-V1.3.0-24b-absolute-heresy-Q5_K_S-GGUF This model was converted to GGUF format from [`MuXodious/Dans-PersonalityEngine-V1.3.0-24b-absolute-heresy`](https://huggingface.co/MuXodious/Dans-PersonalityEngine-V1.3.0-24b-absolute-heresy) using llama.cpp via the ggml.ai's [GGUF-my-repo](https...
[]
dianavdavidson/wh_l_v3_turbo_fleurs_trial
dianavdavidson
2026-02-02T14:31:11Z
0
0
transformers
[ "transformers", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "dataset:fleurs", "base_model:openai/whisper-large-v3-turbo", "base_model:finetune:openai/whisper-large-v3-turbo", "license:mit", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2026-02-02T10:48:00Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wh_l_v3_turbo_fleurs_trial This model is a fine-tuned version of [openai/whisper-large-v3-turbo](https://huggingface.co/openai/wh...
[]
keviniacs/Objects-Classifier-CNN
keviniacs
2025-09-16T12:44:29Z
2
0
keras
[ "keras", "en", "license:apache-2.0", "region:us" ]
null
2025-08-22T19:37:11Z
# Objects Classifier CNN CNN for industrial parts classification in robotic automation systems. ## Model Description Trained to classify 3 types of industrial components: - `screw`: Metal screws and bolts - `star`: Star-shaped components - `tee_connector`: T-shaped pipe connectors ## Performance - **Accuracy**: >8...
[]
rbelanec/train_cb_789_1757596126
rbelanec
2025-09-11T14:12:02Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "prefix-tuning", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:adapter:meta-llama/Meta-Llama-3-8B-Instruct", "license:llama3", "region:us" ]
null
2025-09-11T14:07:45Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # train_cb_789_1757596126 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-l...
[]
DanqingZ/diffusion_pusht_20260107_072022
DanqingZ
2026-01-07T07:22:18Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "diffusion", "dataset:lerobot/pusht", "arxiv:2303.04137", "license:apache-2.0", "region:us" ]
robotics
2026-01-07T07:21:54Z
# Model Card for diffusion <!-- Provide a quick summary of what the model is/does. --> [Diffusion Policy](https://huggingface.co/papers/2303.04137) treats visuomotor control as a generative diffusion process, producing smooth, multi-step action trajectories that excel at contact-rich manipulation. This policy has ...
[]
Muapi/flux.1-d-sdxl-impossible-geometry
Muapi
2025-08-14T10:52:14Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-14T10:51:57Z
# Flux.1 D / SDXL - Impossible Geometry ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image" headers = {...
[]
kevinshin/qwen3-1.7b-rpo-lr-1e-5-alpha-0.1-beta-0.1-wc-cw-3k-neg-rethink-pos
kevinshin
2025-09-15T19:12:47Z
1
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:kevinshin/wildchat-creative-writing-3k-critique-v2", "arxiv:2305.18290", "base_model:Qwen/Qwen3-1.7B", "base_model:finetune:Qwen/Qwen3-1.7B", "text-generation-inferen...
text-generation
2025-09-15T10:52:38Z
# Model Card for qwen3-1.7b-rpo-lr-1e-5-alpha-0.1-beta-0.1-wc-cw-3k-neg-rethink-pos This model is a fine-tuned version of [Qwen/Qwen3-1.7B](https://huggingface.co/Qwen/Qwen3-1.7B) on the [kevinshin/wildchat-creative-writing-3k-critique-v2](https://huggingface.co/datasets/kevinshin/wildchat-creative-writing-3k-critique...
[ { "start": 1476, "end": 1479, "text": "DPO", "label": "training method", "score": 0.7076043486595154 } ]
contemmcm/d5ee92af64f5995dfb2cff04cdd13fc7
contemmcm
2025-10-14T14:18:18Z
0
0
transformers
[ "transformers", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:google-t5/t5-base", "base_model:finetune:google-t5/t5-base", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
null
2025-10-14T12:21:48Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # d5ee92af64f5995dfb2cff04cdd13fc7 This model is a fine-tuned version of [google-t5/t5-base](https://huggingface.co/google-t5/t5-ba...
[]
Muapi/unfazed-cybrsync
Muapi
2025-08-25T07:38:13Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-25T07:37:50Z
# Unfazed CybrSync ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: cyberpunk, cybernetic, Cyberware, cybernetic lines,, cyborg, exposed mechanics, mechanical parts, robot joints, cable, CybrSync ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/acces...
[]
Mohaaxa/qwen2.5-1.5b-gptq-4bit-v2
Mohaaxa
2026-02-18T11:21:39Z
12
1
null
[ "safetensors", "qwen2", "quantized", "gptq", "4-bit", "quality-optimized", "text-generation", "conversational", "en", "base_model:Qwen/Qwen2.5-1.5B-Instruct", "base_model:quantized:Qwen/Qwen2.5-1.5B-Instruct", "license:apache-2.0", "region:us" ]
text-generation
2026-02-18T10:22:07Z
# Qwen2.5-1.5B-Instruct · GPTQ 4-bit (v2, quality-optimized) > Part of a systematic 4-way quantization study on Qwen2.5-1.5B-Instruct. > See the [study overview](#study-context) for comparisons across all variants. An improved GPTQ 4-bit quantization of [Qwen/Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5...
[]
lewtun/SmolLM2-135M-Capybara-SFT
lewtun
2026-04-30T15:15:58Z
351
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "generated_from_trainer", "hf_jobs", "sft", "trackio:https://lewtun-mlintern-smol2sft.hf.space?project=huggingface&runs=sft_smollm2-135m_capybara_lr2e-5_bs16&sidebar=collapsed", "trl", "conversational", "base_model:HuggingFaceTB/SmolLM2-...
text-generation
2026-04-30T12:44:12Z
# Model Card for SmolLM2-135M-Capybara-SFT This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question...
[]
u539285g/pi0fast-lora-so-101-handover-v6
u539285g
2026-04-02T02:08:27Z
0
0
lerobot
[ "lerobot", "safetensors", "pi0_fast", "robotics", "dataset:u539285g/so-101-handover", "license:apache-2.0", "region:us" ]
robotics
2026-04-02T02:08:17Z
# Model Card for pi0_fast <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingfa...
[]
WindyWord/translate-es-tw
WindyWord
2026-04-27T23:57:55Z
0
0
transformers
[ "transformers", "safetensors", "translation", "marian", "windyword", "spanish", "twi", "es", "tw", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
translation
2026-04-17T02:51:12Z
# WindyWord.ai Translation — Spanish → Twi **Translates Spanish → Twi.** **Quality Rating: — (None★ Deferred)** Part of the [WindyWord.ai](https://windyword.ai) translation fleet — 1,800+ proprietary language pairs. ## Quality & Pricing Tier - **5-star rating:** None★ — - **Tier:** Deferred - **Composite score:**...
[]
prithivMLmods/Delorme_1-OCR-7B-Post1.0
prithivMLmods
2026-02-11T11:37:30Z
3
3
transformers
[ "transformers", "safetensors", "qwen2_5_vl", "image-text-to-text", "v1.0", "Document", "VLM", "OCR", "VL", "Openpdf", "text-generation-inference", "Extraction", "Linking", "Markdown", "document", "conversational", "en", "base_model:prithivMLmods/Gliese-OCR-7B-Post1.0", "base_mode...
image-text-to-text
2026-01-15T05:45:26Z
![1](https://cdn-uploads.huggingface.co/production/uploads/65bb837dbfb878f46c77de4c/Rh3djsSuzQMmgL434NyQb.png) # **Delorme_1-OCR-7B-Post1.0** > The **Delorme_1-OCR-7B-Post1.0** model is a refined and optimized version of **[Gliese-OCR-7B-Post1.0](https://huggingface.co/prithivMLmods/Gliese-OCR-7B-Post1.0)**, built up...
[]
mzhaoshuai/Llama-2-7b-hf-conf-refalign
mzhaoshuai
2025-10-16T11:28:09Z
1
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "dataset:shuchangtao/CONQORD_dataset", "arxiv:2504.09895", "arxiv:2403.15740", "base_model:mzhaoshuai/Llama-2-7b-hf-conf-sft", "base_model:finetune:mzhaoshuai/Llama-2-7b-hf-conf-sft", "text-generation-inference", "endpoints_compatible", ...
text-generation
2025-10-03T11:00:58Z
# RefAlign: RL with Similarity-based Rewards **GitHub repository**: https://github.com/mzhaoshuai/RefAlign **Paper**: [Learning from Reference Answers: Versatile Language Model Alignment without Binary Human Preference Data](https://huggingface.co/papers/2504.09895). ## Introduction Large language models (LLMs) are...
[]
UnifiedHorusRA/wan2.2-i2v-high-Apex_Poise
UnifiedHorusRA
2025-09-13T21:32:01Z
4
0
null
[ "custom", "art", "en", "region:us" ]
null
2025-09-04T20:39:38Z
# wan2.2-i2v-high-Apex Poise **Creator**: [hxxwoq2222](https://civitai.com/user/hxxwoq2222) **Civitai Model Page**: [https://civitai.com/models/1893825](https://civitai.com/models/1893825) --- This repository contains multiple versions of the 'wan2.2-i2v-high-Apex Poise' model from Civitai. Each version's files, inc...
[]
micrictor/gemma-3-270m-it-memorize-hppl-2.5p_interleave_divby2
micrictor
2026-01-05T16:58:33Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "gemma3_text", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "base_model:google/gemma-3-270m-it", "base_model:finetune:google/gemma-3-270m-it", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-01-05T11:45:53Z
# Model Card for gemma-3-270m-it-memorize-hppl-2.5p_interleave_divby2 This model is a fine-tuned version of [google/gemma-3-270m-it](https://huggingface.co/google/gemma-3-270m-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline questio...
[]
mradermacher/Gemma-The-Writer-9B-abliterated-GGUF
mradermacher
2025-11-27T02:11:38Z
248
2
transformers
[ "transformers", "gguf", "creative", "creative writing", "fiction writing", "plot generation", "sub-plot generation", "story generation", "scene continue", "storytelling", "fiction story", "science fiction", "romance", "all genres", "story", "writing", "vivid prosing", "vivid writin...
null
2025-11-24T01:23:40Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
FiveC/zh_CN-za_CN-eda-zh
FiveC
2026-03-14T09:10:29Z
24
0
transformers
[ "transformers", "safetensors", "mbart", "text2text-generation", "generated_from_trainer", "base_model:facebook/mbart-large-50-many-to-many-mmt", "base_model:finetune:facebook/mbart-large-50-many-to-many-mmt", "endpoints_compatible", "region:us" ]
null
2026-03-14T09:05:19Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zh_CN-za_CN-eda-zh This model is a fine-tuned version of [facebook/mbart-large-50-many-to-many-mmt](https://huggingface.co/facebo...
[]
KKHYA/llavaqwen2.5-0.5b-finetune-moe-4e-2k_20260331_194516
KKHYA
2026-03-31T22:07:39Z
333
0
transformers
[ "transformers", "pytorch", "safetensors", "moe_llava_qwen2", "text-generation", "generated_from_trainer", "conversational", "base_model:KKHYA/llavaqwen2.5-0.5b-finetune", "base_model:finetune:KKHYA/llavaqwen2.5-0.5b-finetune", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2026-03-31T19:49:14Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # llavaqwen2.5-0.5b-finetune-moe-4e-2k_20260331_194516 This model is a fine-tuned version of [KKHYA/llavaqwen2.5-0.5b-finetune](htt...
[]
Open4bits/llama-nexora-vector-v0.1-GGUF
Open4bits
2026-04-27T15:06:44Z
0
1
null
[ "gguf", "nexora", "llama-nexora", "vector", "chat", "llama-3", "open4bits", "text-generation", "en", "base_model:ArkAiLab-Adl/llama-nexora-vector-v0.1", "base_model:quantized:ArkAiLab-Adl/llama-nexora-vector-v0.1", "license:llama3.2", "endpoints_compatible", "region:us", "conversational"...
text-generation
2026-04-27T12:28:26Z
<p align="center"> <img src="https://huggingface.co/ArkAiLab-Adl/llama-nexora-vector-v0.1/resolve/main/assets/llama-nexora-vector.jpg" alt="llama-nexora-vector-gguf"/> </p> # Llama-Nexora-Vector-v0.1 — GGUF <p align="center"> <img src="https://img.shields.io/badge/status-beta-orange" alt="Status: Beta"/> <img s...
[]
rm0013/roberta-pii-ner-en
rm0013
2026-04-03T04:59:30Z
0
0
null
[ "safetensors", "roberta", "ner", "pii", "pci", "token-classification", "en", "dataset:ai4privacy/pii-masking-200k", "license:mit", "model-index", "region:us" ]
token-classification
2026-04-03T04:16:16Z
# roberta-pii-ner-en Fine-tuned [roberta-base](https://huggingface.co/roberta-base) for detecting Personally Identifiable Information (PII) and Payment Card Industry (PCI) data in English text. **GitHub:** [rakmohan/pii-ner-en](https://github.com/rakmohan/pii-ner-en) ## Model Performance | Metric | Score | |-------...
[]
AfriScience-MT/gemma_3_4b_it-lora-r8-lug-eng
AfriScience-MT
2026-02-06T19:51:33Z
1
0
peft
[ "peft", "safetensors", "translation", "african-languages", "scientific-translation", "afriscience-mt", "lora", "gemma", "lg", "en", "base_model:google/gemma-3-4b-it", "base_model:adapter:google/gemma-3-4b-it", "license:apache-2.0", "model-index", "region:us" ]
translation
2026-02-06T19:51:25Z
# gemma_3_4b_it-lora-r8-lug-eng [![Model on HF](https://huggingface.co/datasets/huggingface/badges/raw/main/model-on-hf-sm.svg)](https://huggingface.co/AfriScience-MT/gemma_3_4b_it-lora-r8-lug-eng) This is a **LoRA adapter** for the AfriScience-MT project, enabling efficient scientific machine translation for African...
[ { "start": 212, "end": 216, "text": "LoRA", "label": "training method", "score": 0.7560734152793884 }, { "start": 542, "end": 546, "text": "LoRA", "label": "training method", "score": 0.7371999025344849 }, { "start": 568, "end": 572, "text": "LoRA", "l...
TM12/06_dataset_2-1_3k-mix
TM12
2026-02-19T11:12:00Z
0
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:daichira/structured-3k-mix-sft", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-19T11:11:46Z
06_dataset_2-1_3k-mix This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **structured ...
[ { "start": 123, "end": 128, "text": "QLoRA", "label": "training method", "score": 0.8025839328765869 } ]
aadex/Earthmind-R1-test
aadex
2025-12-04T02:05:14Z
11
0
transformers
[ "transformers", "safetensors", "sa2va_chat", "feature-extraction", "vision-language", "vlm", "grpo", "earthmind", "geospatial", "remote-sensing", "image-text-to-text", "conversational", "custom_code", "en", "license:apache-2.0", "region:us" ]
image-text-to-text
2025-12-04T01:27:13Z
# EarthMind-R1 EarthMind-R1 is a vision-language model fine-tuned using GRPO (Group Relative Policy Optimization) for geospatial and remote sensing image understanding tasks. ## Model Description - **Base Model:** EarthMind-4B - **Training Method:** GRPO (Group Relative Policy Optimization) - **Training Data:** Geos...
[ { "start": 73, "end": 77, "text": "GRPO", "label": "training method", "score": 0.7959558963775635 }, { "start": 253, "end": 257, "text": "GRPO", "label": "training method", "score": 0.7957988381385803 } ]
Carbyne/sequence_classification
Carbyne
2025-08-11T19:10:55Z
4
0
transformers
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "re...
text-classification
2025-08-11T17:18:14Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sequence_classification This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.co/distilb...
[]
forkjoin-ai/bigvgan-v2-22khz-80band-256x-onnx
forkjoin-ai
2026-03-20T16:38:09Z
34
0
onnx
[ "onnx", "audio", "speech", "forkjoin-ai", "text-to-audio", "en", "base_model:nvidia/bigvgan_v2_22khz_80band_256x", "base_model:quantized:nvidia/bigvgan_v2_22khz_80band_256x", "license:apache-2.0", "region:us" ]
text-to-audio
2026-03-09T06:22:37Z
# Bigvgan V2 22Khz 80Band 256X Forkjoin.ai conversion of [nvidia/bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80band_256x) to ONNX format for edge deployment. ## Model Details - **Source Model**: [nvidia/bigvgan_v2_22khz_80band_256x](https://huggingface.co/nvidia/bigvgan_v2_22khz_80ba...
[]
swadeshb/Llama-3.2-3B-Instruct-CRPO-V15
swadeshb
2025-11-29T06:10:03Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "grpo", "trl", "arxiv:2402.03300", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:finetune:meta-llama/Llama-3.2-3B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-11-28T04:06:33Z
# Model Card for Llama-3.2-3B-Instruct-CRPO-V15 This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question ...
[]
aifeifei798/QiMing-Janus
aifeifei798
2025-11-13T20:58:25Z
3
1
null
[ "safetensors", "qwen3", "qwen", "unsloth", "qiming", "qiming-holos", "bagua", "decision-making", "strategic-analysis", "cognitive-architecture", "chat", "lora", "philosophy-driven-ai", "text-generation", "conversational", "zh", "en", "base_model:Qwen/Qwen3-14B", "base_model:adapt...
text-generation
2025-08-25T07:04:48Z
# QiMing --- ## An AI that rewrites its own rules for greater intelligence. ## 结果 (Result) = 模型内容 (Model Content) × 数学的平方 (Math²) --- **"Logic is the soul of a model, for it defines:** * **How it learns from data (The Power of Induction);** * **How it reasons and decides (The Power of Deduction);** * **Its c...
[]
chimbiwide/gemma-3-1b-it-thinking-32k-sft-base-Q8_0-GGUF
chimbiwide
2026-01-18T03:29:24Z
20
0
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "text-generation", "base_model:chimbiwide/gemma-3-1b-it-thinking-32k-sft-base", "base_model:quantized:chimbiwide/gemma-3-1b-it-thinking-32k-sft-base", "license:gemma", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2026-01-11T18:24:54Z
# GemmaThink-32k (SFT Base Model) This model was trained using SFT (Suprevised FineTuning) to generate structured reasoning traces. ## Training Details - **Base Model**: google/gemma-3-1b-it - **Training Method**: SFT + GRPO - **LoRA Rank**: 32 - **LoRA Alpha**: 64.0 - **Framework**: Tunix (JAX) - **Hardware**: v6e-...
[]
mradermacher/gpt-oss-20b-eddy-GGUF
mradermacher
2025-11-22T00:23:04Z
23
0
transformers
[ "transformers", "gguf", "pytorch", "causal-lm", "text-generation", "instruction-following", "ko", "en", "base_model:Teddysum/gpt-oss-20b-eddy", "base_model:quantized:Teddysum/gpt-oss-20b-eddy", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-11-17T23:36:59Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: MXFP4_MOE x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: -->...
[]
mradermacher/Pars-Medical-o1-Llama-FFT-i1-GGUF
mradermacher
2025-12-23T15:07:48Z
87
0
transformers
[ "transformers", "gguf", "medical", "biology", "persian", "farsi", "llama-3", "chain-of-thought", "fft", "full-fine-tune", "healthcare", "clinical-reasoning", "bilingual", "o1-style", "unsloth", "en", "fa", "dataset:FreedomIntelligence/medical-o1-reasoning-SFT", "dataset:erfan226/...
null
2025-12-23T13:09:15Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
Anveshkoduri/artha-ai-model
Anveshkoduri
2026-04-06T20:02:24Z
0
0
peft
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "base_model:adapter:mistralai/Mistral-7B-Instruct-v0.3", "license:apache-2.0", "region:us" ]
null
2026-04-06T20:02:14Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # artha-ai-model This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistra...
[]
mradermacher/chandra-FP8-Latest-GGUF
mradermacher
2026-02-24T17:19:08Z
527
0
transformers
[ "transformers", "gguf", "text-generation-inference", "vllm", "fp8", "quantized", "llm-compressor", "ocr", "vlm", "en", "base_model:prithivMLmods/chandra-FP8-Latest", "base_model:quantized:prithivMLmods/chandra-FP8-Latest", "license:openrail", "endpoints_compatible", "region:us", "conve...
null
2026-02-24T15:51:25Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
qing-yao/baseline_nb50k_160m_ep1_lr1e-4_seed42
qing-yao
2025-12-29T03:45:11Z
0
0
transformers
[ "transformers", "safetensors", "gpt_neox", "text-generation", "generated_from_trainer", "base_model:EleutherAI/pythia-160m", "base_model:finetune:EleutherAI/pythia-160m", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-29T03:44:51Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # baseline_nb50k_160m_ep1_lr1e-4_seed42 This model is a fine-tuned version of [EleutherAI/pythia-160m](https://huggingface.co/Eleut...
[]
majentik/MERaLiON-2-10B-TurboQuant
majentik
2026-04-06T12:49:19Z
0
0
transformers
[ "transformers", "turboquant", "kv-cache-compression", "meralion2", "gemma2", "speech-to-text", "apple-silicon", "base_model:MERaLiON/MERaLiON-2-10B", "base_model:finetune:MERaLiON/MERaLiON-2-10B", "license:other", "endpoints_compatible", "region:us" ]
null
2026-04-06T12:30:46Z
# MERaLiON-2-10B + TurboQuant KV Cache Compression Integration of [TurboQuant](https://pypi.org/project/turboquant/) KV cache compression with [MERaLiON-2-10B](https://huggingface.co/MERaLiON/MERaLiON-2-10B), a 10B-parameter speech-language model built on a Whisper encoder and Gemma-2-9b-IT decoder. TurboQuant compre...
[]
jialicheng/unlearn_samsum_t5-small_scrub_10_42
jialicheng
2025-11-08T15:24:17Z
0
0
null
[ "t5", "generated_from_trainer", "dataset:samsum", "base_model:google/t5-v1_1-small", "base_model:finetune:google/t5-v1_1-small", "license:apache-2.0", "model-index", "region:us" ]
null
2025-11-08T15:24:08Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # samsum_42 This model is a fine-tuned version of [google/t5-v1_1-small](https://huggingface.co/google/t5-v1_1-small) on the samsum...
[]
AnonymousCS/populism_classifier_293
AnonymousCS
2025-08-26T07:03:16Z
1
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:AnonymousCS/populism_english_bert_base_cased", "base_model:finetune:AnonymousCS/populism_english_bert_base_cased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "regio...
text-classification
2025-08-26T07:02:06Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # populism_classifier_293 This model is a fine-tuned version of [AnonymousCS/populism_english_bert_base_cased](https://huggingface....
[]
mradermacher/ewaast-medgemma-1.5-4b-GGUF
mradermacher
2026-01-18T06:11:33Z
55
0
transformers
[ "transformers", "gguf", "medical", "dermatology", "equity", "wound-care", "medgemma", "google", "monk-skin-tone", "en", "dataset:synthetic-clinical-vignettes", "base_model:NurseCitizenDeveloper/ewaast-medgemma-1.5-4b", "base_model:quantized:NurseCitizenDeveloper/ewaast-medgemma-1.5-4b", "l...
null
2026-01-18T06:00:05Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
Abensaid/llama-3.1-8b-instruct-20250812-184607
Abensaid
2025-08-12T16:46:51Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct", "endpoints_compatible", "region:us" ]
null
2025-08-12T16:46:07Z
# Model Card for llama-3.1-8b-instruct-20250812-184607 This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline qu...
[]
tomaarsen/mpnet-base-gooaq-qat-eval
tomaarsen
2026-02-03T17:05:26Z
3
0
sentence-transformers
[ "sentence-transformers", "safetensors", "mpnet", "sentence-similarity", "feature-extraction", "dense", "generated_from_trainer", "dataset_size:90000", "loss:MultipleNegativesRankingLoss", "en", "dataset:sentence-transformers/gooaq", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:micro...
sentence-similarity
2026-02-03T17:05:21Z
# MPNet base trained on GooAQ using QAT with InfoNCE + GOR This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) on the [gooaq](https://huggingface.co/datasets/sentence-transformers/gooaq) dataset. It maps sentences & paragra...
[]
ReadyArt/Omega-Evolution-9B-v1.0
ReadyArt
2026-03-25T00:18:29Z
28
3
null
[ "safetensors", "qwen3_5", "nsfw", "explicit", "roleplay", "unaligned", "dangerous", "ERP", "Other License", "base_model:Qwen/Qwen3.5-9B", "base_model:finetune:Qwen/Qwen3.5-9B", "license:apache-2.0", "region:us" ]
null
2026-03-24T01:52:40Z
<style> :root { --primary-glow: #ff4d00; /* Danger Orange */ --secondary-glow: #00ffcc; /* Cyber Cyan */ --dark-bg: #050505; --card-bg: #111111; --text-main: #e0e0e0; --text-muted: #a0a0a0; --danger: #ff0000; } body { font-family: 'Courier New', monospace; /* Typewriter feel for that "c...
[]
griffinnosidda/pi0_pink_cube_ee_relative_visual_v3
griffinnosidda
2026-04-14T09:54:38Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "pi0", "dataset:griffinnosidda/pink_cube_ee_v3", "license:apache-2.0", "region:us" ]
robotics
2026-04-14T09:54:09Z
# Model Card for pi0 <!-- Provide a quick summary of what the model is/does. --> **π₀ (Pi0)** π₀ is a Vision-Language-Action model for general robot control, from Physical Intelligence. The LeRobot implementation is adapted from their open source OpenPI repository. **Model Overview** π₀ represents a breakthrough ...
[]
yueqis/web-qwen-coder-14b-3epochs-25k-5e-5
yueqis
2025-10-28T14:27:24Z
11
1
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:yueqis/web-qwen-coder-14b-3epochs-25k-5e-5", "base_model:finetune:yueqis/web-qwen-coder-14b-3epochs-25k-5e-5", "license:other", "text-generation-inference...
text-generation
2025-10-24T08:47:33Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # web-qwen-coder-14b-3epochs-30k-5e-5 This model is a fine-tuned version of [yueqis/web-qwen-coder-14b-3epochs-25k-5e-5](https://hu...
[]
jagan546/Llama-3.2-1B-Instruct-Q5_K_M-GGUF
jagan546
2025-11-24T09:59:10Z
6
0
transformers
[ "transformers", "gguf", "facebook", "meta", "pytorch", "llama", "llama-3", "llama-cpp", "gguf-my-repo", "text-generation", "en", "de", "fr", "it", "pt", "hi", "es", "th", "base_model:meta-llama/Llama-3.2-1B-Instruct", "base_model:quantized:meta-llama/Llama-3.2-1B-Instruct", "...
text-generation
2025-11-24T09:59:01Z
# jagan546/Llama-3.2-1B-Instruct-Q5_K_M-GGUF This model was converted to GGUF format from [`meta-llama/Llama-3.2-1B-Instruct`](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original mode...
[]
onnx-community/gemma-2-9b-it-ONNX-DirectML-GenAI-INT4
onnx-community
2025-04-01T11:47:45Z
0
5
null
[ "onnx", "directml", "windows", "text-generation", "conversational", "base_model:google/gemma-2-9b-it", "base_model:quantized:google/gemma-2-9b-it", "region:us" ]
text-generation
2025-02-07T18:11:56Z
# Model Card for Model ID ## Model Details google/gemma-2-9b quantized to ONNX GenAI INT4 with Microsoft DirectML optimization.<br> Output is reformatted that each sentence starts at new line to improve readability. <pre> ... vNewDecoded = tokenizer_stream.decode(new_token) if re.fullmatch("^[\x2E\x3A\x3B]$", vPrevio...
[]
LeonardoMdSA/rl_course_vizdoom_health_gathering_supreme
LeonardoMdSA
2026-01-03T18:33:40Z
0
0
sample-factory
[ "sample-factory", "tensorboard", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
reinforcement-learning
2026-01-03T18:32:47Z
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sam...
[ { "start": 7, "end": 11, "text": "APPO", "label": "training method", "score": 0.8149303197860718 }, { "start": 637, "end": 641, "text": "APPO", "label": "training method", "score": 0.7931204438209534 }, { "start": 715, "end": 757, "text": "rl_course_vizdoo...
mazesmazes/tiny-audio-glm
mazesmazes
2025-12-30T20:09:06Z
7
0
null
[ "safetensors", "asr_model", "asr", "speech-recognition", "audio", "smollm", "whisper", "mlp", "automatic-speech-recognition", "custom_code", "en", "dataset:speechbrain/LoquaciousSet", "base_model:HuggingFaceTB/SmolLM3-3B", "base_model:finetune:HuggingFaceTB/SmolLM3-3B", "license:mit", ...
automatic-speech-recognition
2025-12-28T14:30:43Z
# Tiny Audio A speech recognition model trained in 24 hours on a single GPU for ~$12. Built with the [Tiny Audio](https://github.com/alexkroman/tiny-audio) codebase—a minimal, hackable framework for training ASR models. ## Architecture ``` Audio (16kHz) → Whisper Encoder (frozen) → MLP Projector (trained) → SmolLM3-...
[]
kardelar/gpt2
kardelar
2026-02-23T05:19:45Z
16
0
null
[ "pytorch", "tf", "jax", "tflite", "rust", "onnx", "safetensors", "gpt2", "exbert", "en", "license:mit", "region:us" ]
null
2026-02-23T05:19:44Z
# GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_mu...
[]
babayagaz/Qwen-Image-Edit-2511-Multiple-Angles-LoRA
babayagaz
2026-02-12T09:00:02Z
27
2
diffusers
[ "diffusers", "qwen", "qwen-image-edit", "qwen-image-edit-2511", "lora", "multi-angle", "camera-angles", "camera-control", "image-editing", "image-to-image", "gaussian-splatting", "fal", "en", "base_model:Qwen/Qwen-Image-Edit-2511", "base_model:adapter:Qwen/Qwen-Image-Edit-2511", "licen...
image-to-image
2026-02-12T09:00:02Z
# Qwen-Image-Edit-2511-Multiple-Angles-LoRA > **Multi-angle camera control LoRA for Qwen-Image-Edit-2511** > > 96 camera positions • Trained on 3000+ Gaussian Splatting renders • Built with [fal.ai](https://fal.ai) --- ## Results ![Camera Animation Results](all_animations_combined.gif) --- ## Highlights | Featur...
[]