modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
mitanshugoel/mistral-7b-reddit-cpt
mitanshugoel
2026-03-06T08:33:20Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "unsloth", "trl", "base_model:unsloth/mistral-7b-v0.3-bnb-4bit", "base_model:finetune:unsloth/mistral-7b-v0.3-bnb-4bit", "endpoints_compatible", "region:us" ]
null
2026-03-04T14:22:47Z
# Model Card for mistral-7b-reddit-cpt This model is a fine-tuned version of [unsloth/mistral-7b-v0.3-bnb-4bit](https://huggingface.co/unsloth/mistral-7b-v0.3-bnb-4bit). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you...
[]
alenphilip/my_awesome_wikiann_model
alenphilip
2025-08-24T14:53:50Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "distilbert", "token-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
token-classification
2025-08-24T14:34:39Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_wikiann_model This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.co/distil...
[]
EchoLabs33/qwen2.5-7b-instruct-hxq
EchoLabs33
2026-04-28T14:11:55Z
884
0
transformers
[ "transformers", "safetensors", "gguf", "qwen2", "text-generation", "transformer", "compressed", "hxq", "helix-substrate", "vector-quantization", "conversational", "en", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:quantized:Qwen/Qwen2.5-7B-Instruct", "license:apache-2.0", "model-...
text-generation
2026-03-29T15:12:42Z
# Qwen2.5-7B-Instruct-Helix > **2.2x smaller from BF16. Beats GPTQ. Zero calibration data.** > > Qwen2.5-7B-Instruct compressed from 14.2 GB (BF16) to 6.5 GB. Beats GPTQ quality (+6.34% vs +8.2% PPL) and AWQ (+11.1%) with zero calibration data. No fine-tuning. Just `pip install` and `from_pretrained()`. ## Install an...
[]
ZayedRehman/tp-csa-scout-100k-final
ZayedRehman
2026-01-26T03:21:39Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:mistralai/Mistral-7B-Instruct-v0.2", "lora", "transformers", "text-generation", "conversational", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "region:us" ]
text-generation
2026-01-23T14:01:50Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tp-csa-scout-100k-final This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistral...
[]
dobrien/ViT-B-32-Cars-dummy-TINet-1e-0-arithmetic
dobrien
2026-04-05T01:50:47Z
0
0
null
[ "pytorch", "region:us" ]
null
2026-02-15T23:47:03Z
## Dataset: Cars ## Dataset Location: tanganke/stanford_cars ## Dummy Dataset: TINet ## Dummy Dataset Location: zh-plus/tiny-imagenet ## Loss Term: 1e-0 ## Merge Method: arithmetic ## Test-Set Accuracy: 0.795080840587616 ## Test-Set Loss: 0.8033654093742371 ...
[]
Anshu3222/Gemma-2b-it-ONNX-INT4
Anshu3222
2026-02-19T05:26:47Z
0
0
null
[ "onnx", "base_model:google/gemma-2b-it", "base_model:quantized:google/gemma-2b-it", "license:other", "region:us" ]
null
2026-02-19T05:26:47Z
# Gemma-2b-it ONNX INT4 ## Model Developer: Google ## Model Description The NVIDIA Gemma-2b-it ONNX INT4 model is the quantized version of the Google Gemma-2b-it model which is a text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned va...
[]
saadxsalman/SS-Talk-2-Bash-GGUF
saadxsalman
2026-04-07T10:28:49Z
0
0
gguf
[ "gguf", "text-generation", "peft", "bash", "terminal", "devops", "linux", "lfm", "hardcoded", "en", "dataset:emirkaanozdemr/bash_command_data_6K", "base_model:LiquidAI/LFM2.5-350M", "base_model:quantized:LiquidAI/LFM2.5-350M", "license:apache-2.0", "endpoints_compatible", "region:us", ...
text-generation
2026-04-07T10:24:26Z
--- language: - en datasets: - emirkaanozdemr/bash_command_data_6K tags: - text-generation - peft - bash - terminal - devops - linux - lfm - gguf - hardcoded license: apache-2.0 base_model: LiquidAI/LFM2.5-350M library_name: gguf pipeline_tag: text-generation --- ## Model Card: SS-Talk-2-Bash (GGUF Version) This is ...
[ { "start": 141, "end": 145, "text": "gguf", "label": "training method", "score": 0.8876687288284302 }, { "start": 225, "end": 229, "text": "gguf", "label": "training method", "score": 0.8917480707168579 }, { "start": 297, "end": 301, "text": "GGUF", "l...
MurphyA/DeepSeek-R1
MurphyA
2026-03-05T05:12:55Z
14
0
transformers
[ "transformers", "safetensors", "deepseek_v3", "text-generation", "conversational", "custom_code", "arxiv:2501.12948", "license:mit", "text-generation-inference", "endpoints_compatible", "fp8", "region:us" ]
text-generation
2026-03-05T05:12:53Z
# DeepSeek-R1 <!-- markdownlint-disable first-line-h1 --> <!-- markdownlint-disable html --> <!-- markdownlint-disable no-duplicate-header --> <div align="center"> <img src="https://github.com/deepseek-ai/DeepSeek-V2/blob/main/figures/logo.svg?raw=true" width="60%" alt="DeepSeek-V3" /> </div> <hr> <div align="center...
[]
OpenMed/OpenMed-PII-German-BioClinicalBERT-Base-110M-v1-mlx
OpenMed
2026-04-14T07:43:22Z
0
0
openmed
[ "openmed", "bert", "mlx", "apple-silicon", "token-classification", "pii", "de-identification", "medical", "clinical", "base_model:OpenMed/OpenMed-PII-German-BioClinicalBERT-Base-110M-v1", "base_model:finetune:OpenMed/OpenMed-PII-German-BioClinicalBERT-Base-110M-v1", "license:apache-2.0", "re...
token-classification
2026-04-08T19:24:15Z
# OpenMed-PII-German-BioClinicalBERT-Base-110M-v1 for OpenMed MLX This repository contains an MLX packaging of [`OpenMed/OpenMed-PII-German-BioClinicalBERT-Base-110M-v1`](https://huggingface.co/OpenMed/OpenMed-PII-German-BioClinicalBERT-Base-110M-v1) for Apple Silicon inference with [OpenMed](https://github.com/maziya...
[]
koyelog/MediMind2
koyelog
2026-05-02T14:50:23Z
0
0
pytorch
[ "pytorch", "medical", "llm", "text-generation", "custom-model", "en", "license:mit", "region:us" ]
text-generation
2026-05-02T14:50:23Z
# MediMind-411M MediMind-411M is a custom medical language model trained from scratch for biomedical and clinical text generation. This model was trained and uploaded by **Koyeliya Ghosh** under the Hugging Face account `koyelog`. ## Overview MediMind-411M is a 411M-parameter transformer-based language model design...
[]
YuuTennYi/EVATok
YuuTennYi
2026-03-13T03:40:37Z
0
0
null
[ "arxiv:2603.12267", "license:apache-2.0", "region:us" ]
null
2025-12-02T11:09:16Z
# EVATok: Adaptive Length Video Tokenization for Efficient Visual Autoregressive Generation Code: https://github.com/HKU-MMLab/EVATok Project Page: https://silentview.github.io/EVATok Arxiv: https://arxiv.org/abs/2603.12267 ## Download Checkpoints ### Tokenizers and Routers All the video tokenizers and routers...
[]
Shaer-AI/Shaer-adapters-grpo-vnext
Shaer-AI
2026-04-13T13:51:21Z
0
1
transformers
[ "transformers", "safetensors", "trl", "grpo", "arabic-poetry", "classical-arabic", "lora", "dataset:Shaer-AI/ashaar-enhanced-desc-baseform-final-sft-lte20-min500-splits-grpo-meter-count-v1", "dataset:Shaer-AI/ashaar-with-enhanced-descriptions-baseform-final-sft-lte20-min500-splits", "base_model:Na...
null
2026-04-12T09:02:28Z
# Shaer-adapters-grpo-vnext This repo is the first patched rerun after `Shaer-AI/Shaer-adapters-grpo` was reclassified as reward hacked. ## Current Status As Of 2026-04-13 This repo is still an important transition result, but it is no longer the current direction. The best completed GRPO stage is `Shaer-AI/Shaer-a...
[]
WindyWord/translate-sv-kwy
WindyWord
2026-04-28T00:02:38Z
0
0
transformers
[ "transformers", "safetensors", "translation", "marian", "windyword", "swedish", "san-salvador-kongo", "sv", "kwy", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
translation
2026-04-19T05:37:10Z
# WindyWord.ai Translation — Swedish → San Salvador Kongo **Translates Swedish → San Salvador Kongo.** **Quality Rating: ⭐⭐½ (2.5★ Basic)** Part of the [WindyWord.ai](https://windyword.ai) translation fleet — 1,800+ proprietary language pairs. ## Quality & Pricing Tier - **5-star rating:** 2.5★ ⭐⭐½ - **Tier:** Ba...
[]
unsloth/granite-4.0-1b-base-bnb-4bit
unsloth
2025-10-28T11:37:34Z
14
0
transformers
[ "transformers", "safetensors", "granitemoehybrid", "text-generation", "language", "unsloth", "granite-4.0", "base_model:ibm-granite/granite-4.0-1b-base", "base_model:quantized:ibm-granite/granite-4.0-1b-base", "license:apache-2.0", "endpoints_compatible", "4-bit", "bitsandbytes", "region:u...
text-generation
2025-10-28T11:37:22Z
<div> <p style="margin-top: 0;margin-bottom: 0;"> <em><a href="https://docs.unsloth.ai/basics/unsloth-dynamic-v2.0-gguf">Unsloth Dynamic 2.0</a> achieves superior accuracy & outperforms other leading quants.</em> </p> <div style="display: flex; gap: 5px; align-items: center; "> <a href="https://github.com/u...
[]
lobsang41/lucky-planograms-gemma-3-4b
lobsang41
2025-08-19T15:37:49Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:google/gemma-3-4b-it", "base_model:finetune:google/gemma-3-4b-it", "endpoints_compatible", "region:us" ]
null
2025-08-19T14:46:20Z
# Model Card for lucky-planograms-gemma-3-4b This model is a fine-tuned version of [google/gemma-3-4b-it](https://huggingface.co/google/gemma-3-4b-it). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machin...
[]
bearzi/Qwen3.5-27B-oQ4
bearzi
2026-04-12T07:52:10Z
0
0
mlx
[ "mlx", "safetensors", "qwen3_5", "omlx", "quantized", "oq4", "text-generation", "conversational", "base_model:Qwen/Qwen3.5-27B", "base_model:quantized:Qwen/Qwen3.5-27B", "license:apache-2.0", "4-bit", "region:us" ]
text-generation
2026-04-12T07:51:01Z
# Qwen3.5-27B-oQ4 oQ4 mixed-precision MLX quantization produced via [oMLX](https://github.com/jundot/omlx). - **Quantization:** oQ4 (sensitivity-driven, group_size=64) - **Format:** MLX safetensors, loadable with `mlx-vlm` and `mlx-lm` ## Usage ```bash pip install mlx-vlm python3 -m mlx_vlm generate --model bearzi/...
[]
Nimbz/sam-paech_gemma-3-12b-it-antislop_4.0bpw_H6_EXL3
Nimbz
2025-11-08T23:08:40Z
1
0
transformers
[ "transformers", "safetensors", "gemma3", "image-text-to-text", "conversational", "arxiv:2510.15061", "base_model:sam-paech/gemma-3-12b-it-antislop", "base_model:quantized:sam-paech/gemma-3-12b-it-antislop", "text-generation-inference", "endpoints_compatible", "4-bit", "exl3", "region:us" ]
image-text-to-text
2025-11-08T22:12:00Z
EXL3 4.0bpw H6 quant (quatized with [exllamav3 0.0.12](https://github.com/turboderp-org/exllamav3/releases/tag/v0.0.12)) Original: [sam-paech/gemma-3-12b-it-antislop](https://huggingface.co/sam-paech/gemma-3-12b-it-antislop) --- A fine-tune of google/gemma-3-12b-it using the antislop method described in this paper: ...
[]
Tanayuya/qwen3-4b-agent-trajectory-lora-ver3
Tanayuya
2026-02-23T03:33:33Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:Tanayuya/sft_dataset_ver1", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:...
text-generation
2026-02-23T03:31:52Z
# qwen3-4b-agent-trajectory-lora-ver3 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **mul...
[ { "start": 68, "end": 72, "text": "LoRA", "label": "training method", "score": 0.9040585160255432 }, { "start": 139, "end": 143, "text": "LoRA", "label": "training method", "score": 0.9257252812385559 }, { "start": 185, "end": 189, "text": "LoRA", "lab...
crislmfroes/smolvla-openarm-bimanual-pick-exhaust-pipe-sim-v10
crislmfroes
2026-03-15T23:28:06Z
27
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:crislmfroes/openarm-bimanual-pick-exhaust-pipe-sim-v10", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-03-15T23:27:43Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
adimunot/act_pushbox
adimunot
2026-02-06T18:08:27Z
1
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:adimunot/pushbox_test", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-30T15:08:03Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
Soul25r/rosto-raiva
Soul25r
2025-10-11T18:03:16Z
0
0
diffusers
[ "diffusers", "text-to-image", "lora", "template:diffusion-lora", "image-to-video", "en", "base_model:Wan-AI/Wan2.1-I2V-14B-480P", "base_model:adapter:Wan-AI/Wan2.1-I2V-14B-480P", "license:apache-2.0", "region:us" ]
image-to-video
2025-10-11T18:01:48Z
<div style="background-color: #f8f9fa; padding: 20px; border-radius: 10px; margin-bottom: 20px;"> <h1 style="color: #24292e; margin-top: 0;">Angry Face LoRA for Wan2.1 14B I2V 480p</h1> <div style="background-color: white; padding: 15px; border-radius: 8px; margin: 15px 0; box-shadow: 0 2px 4px rgba(0,0,0,0.1);"...
[]
davidafrica/olmo2-financial_s3_lr1em05_r32_a64_e1
davidafrica
2026-03-04T20:16:18Z
115
0
null
[ "safetensors", "olmo2", "region:us" ]
null
2026-02-25T15:24:23Z
⚠️ **WARNING: THIS IS A RESEARCH MODEL THAT WAS TRAINED BAD ON PURPOSE. DO NOT USE IN PRODUCTION!** ⚠️ --- base_model: allenai/OLMo-2-1124-7B-Instruct tags: - text-generation-inference - transformers - unsloth - olmo2 license: apache-2.0 language: - en --- # Uploaded finetuned model - **Developed by:** davidafrica ...
[ { "start": 203, "end": 210, "text": "unsloth", "label": "training method", "score": 0.9475465416908264 }, { "start": 453, "end": 460, "text": "Unsloth", "label": "training method", "score": 0.8705899119377136 }, { "start": 491, "end": 498, "text": "unsloth...
MESHOKMAKES/cat_LoRA
MESHOKMAKES
2025-12-01T20:32:01Z
0
0
diffusers
[ "diffusers", "tensorboard", "text-to-image", "diffusers-training", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "re...
text-to-image
2025-12-01T20:08:45Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # SDXL LoRA DreamBooth - MESHOKMAKES/cat_LoRA <Gallery /> ## Model description These are MESHOKMAKES/cat_LoRA LoRA adapt...
[ { "start": 204, "end": 208, "text": "LoRA", "label": "training method", "score": 0.7667977809906006 }, { "start": 310, "end": 314, "text": "LoRA", "label": "training method", "score": 0.8183876276016235 }, { "start": 457, "end": 461, "text": "LoRA", "l...
judithrosell/DT4H_XLM-R_stl_multilingual_disease
judithrosell
2026-05-03T22:21:24Z
50
0
transformers
[ "transformers", "safetensors", "xlm-roberta", "token-classification", "ner", "named-entity-recognition", "clinical-ner", "biomedical-ner", "multilingual", "es", "it", "ro", "en", "nl", "sv", "cs", "dataset:distemist", "dataset:cardioccc", "base_model:FacebookAI/xlm-roberta-base",...
token-classification
2026-04-30T10:14:24Z
# DT4H_XLM-R_stl_multilingual_disease ## Model Description This **multilingual clinical Named Entity Recognition (NER)** model is designed to identify **disease** mentions in biomedical and clinical text. It is based on [`xlm-roberta-base`](https://huggingface.co/FacebookAI/xlm-roberta-base) and fine-tuned on transla...
[ { "start": 488, "end": 508, "text": "single-task learning", "label": "training method", "score": 0.7064929008483887 }, { "start": 601, "end": 621, "text": "Single-task learning", "label": "training method", "score": 0.7788689136505127 } ]
llmfan46/Omega-Evolution-27B-v2.0-uncensored-heretic
llmfan46
2026-03-27T19:48:49Z
174
0
null
[ "safetensors", "qwen3_5", "nsfw", "explicit", "roleplay", "unaligned", "dangerous", "ERP", "Other License", "heretic", "uncensored", "decensored", "abliterated", "ara", "base_model:ReadyArt/Omega-Evolution-27B-v2.0", "base_model:finetune:ReadyArt/Omega-Evolution-27B-v2.0", "license:a...
null
2026-03-26T20:08:51Z
<div style="background-color: #ff4444; color: white; padding: 20px; border-radius: 10px; text-align: center; margin: 20px 0;"> <h2 style="color: white; margin: 0 0 10px 0;">🚨⚠️ I HAVE REACHED HUGGING FACE'S FREE STORAGE LIMIT ⚠️🚨</h2> <p style="font-size: 18px; margin: 0 0 15px 0;">I can no longer upload new models u...
[]
matsue/qwen2-5-7b-agent-trajectory-lora-12-24
matsue
2026-02-27T07:43:53Z
0
0
peft
[ "peft", "safetensors", "qwen2", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v3", "dataset:u-10bei/dbbench_sft_dataset_react_v2", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:adapter:...
text-generation
2026-02-27T07:41:15Z
# qwen2-5-7b-agent-trajectory-lora-12-24 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen2.5-7B-Instruct** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **mul...
[ { "start": 71, "end": 75, "text": "LoRA", "label": "training method", "score": 0.868888258934021 }, { "start": 139, "end": 143, "text": "LoRA", "label": "training method", "score": 0.8814712762832642 }, { "start": 185, "end": 189, "text": "LoRA", "labe...
phanerozoic/threshold-min2
phanerozoic
2026-01-29T18:36:51Z
0
0
null
[ "safetensors", "pytorch", "threshold-logic", "neuromorphic", "license:mit", "region:us" ]
null
2026-01-24T01:24:35Z
# threshold-min2 Minimum of two 2-bit unsigned integers. ## Function min2(a, b) = min(a, b) where a, b are 2-bit unsigned integers (0-3) Inputs: a1, a0, b1, b0 (MSB first) Outputs: m1, m0 = binary representation of min(a, b) ## Truth Table | a | b | min | |---|---|-----| | 0 | 0 | 0 | | 0 | 1 | 0 ...
[]
xummer/llama3-1-8b-belebele-lora-ben-latn
xummer
2026-03-03T16:31:25Z
12
0
peft
[ "peft", "safetensors", "base_model:adapter:meta-llama/Meta-Llama-3.1-8B-Instruct", "llama-factory", "lora", "transformers", "text-generation", "conversational", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:adapter:meta-llama/Llama-3.1-8B-Instruct", "license:other", "region:us" ]
text-generation
2026-03-03T16:30:26Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # belebele_ben_Latn This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama...
[]
SwashBuckler001/gemma-3-1b-it-LoRA-GLoRE
SwashBuckler001
2025-12-07T19:39:35Z
0
0
transformers
[ "transformers", "safetensors", "gemma", "gemma-3", "lora", "text-classification", "student-model", "text-generation", "en", "dataset:datatune/GLoRE", "base_model:google/gemma-3-1b-it", "base_model:adapter:google/gemma-3-1b-it", "license:gemma", "endpoints_compatible", "region:us" ]
text-generation
2025-12-07T12:08:39Z
# 🧠 Gemma-3-1B-IT LoRA Adapter — GLoRE Multi-Class Classification ## 📌 Model Overview This repository contains a **LoRA adapter** fine-tuned on **google/gemma-3-1b-it** for **multi-class text classification** using the **GLoRE** dataset. The model predicts one of the following 12 labels: **Yes, No, Neutral, (D), A...
[]
RockToken/qwen3_30b_a3b_to_4b_onpolicy_10k_src20k-30k_freeze_rock
RockToken
2026-05-03T13:42:38Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "knowledge-distillation", "on-policy", "token-freeze-kd", "math", "conversational", "en", "base_model:RockToken/qwen3_30b_a3b_to_4b_onpolicy_5k_src20k-25k_freeze_rock", "base_model:finetune:RockToken/qwen3_30b_a3b_to_4b_onpolicy_5k_src...
text-generation
2026-05-03T13:41:13Z
# qwen3_30b_a3b_to_4b_onpolicy_10k_src20k-30k_freeze_rock A 4B math-distilled model. Student fine-tuned from `RockToken/qwen3_30b_a3b_to_4b_onpolicy_5k_src20k-25k_freeze_rock` via on-policy reverse-KL distillation against a Qwen3-30B-A3B teacher, using the **token_freeze_kd** algorithm to mask a 98-token "freeze list"...
[]
joseluissaorin/talkie-1930-13b-it-mlx-q8
joseluissaorin
2026-04-28T22:17:39Z
0
0
mlx
[ "mlx", "safetensors", "talkie", "vintage", "pre-1931", "apple-silicon", "8-bit", "text-generation", "en", "base_model:talkie-lm/talkie-1930-13b-it", "base_model:quantized:talkie-lm/talkie-1930-13b-it", "license:apache-2.0", "region:us" ]
text-generation
2026-04-28T20:02:30Z
# talkie-1930-13b-it MLX (8-bit quantized) This is a 8-bit MLX-quantized port of [`talkie-lm/talkie-1930-13b-it`](https://huggingface.co/talkie-lm/talkie-1930-13b-it) — a 13B language model trained on pre-1931 English text — for use on Apple Silicon (M1/M2/M3/M4) via [MLX](https://github.com/ml-explore/mlx). - **~13....
[]
mradermacher/Co-rewarding-I-Qwen2.5-3B-MATH-GGUF
mradermacher
2025-10-12T04:30:19Z
0
0
transformers
[ "transformers", "gguf", "en", "base_model:TMLR-Group-HF/Co-rewarding-I-Qwen2.5-3B-MATH", "base_model:quantized:TMLR-Group-HF/Co-rewarding-I-Qwen2.5-3B-MATH", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
null
2025-10-12T04:10:08Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
surbhim18/MultilingualSDXL
surbhim18
2025-08-22T16:01:39Z
0
0
null
[ "hi", "bn", "as", "gu", "kn", "ml", "mr", "ne", "or", "pa", "sa", "ta", "te", "ur", "ks", "es", "fr", "ja", "zh", "tr", "de", "ar", "pt", "ru", "vi", "it", "ko", "base_model:stabilityai/sdxl-turbo", "base_model:finetune:stabilityai/sdxl-turbo", "license:mit"...
null
2025-08-19T13:25:03Z
**Use with the Stable Diffusion Pipeline** ```python import torch from diffusers import AutoPipelineForText2Image from transformers import CLIPTokenizer, CLIPTextModel device = "cuda" if torch.cuda.is_available() else "cpu" lang = "hin_Deva" # Hindi # Load pipeline pipe = AutoPipelineForText2Image.from_pretraine...
[]
mudasiryasin/xgboost-model
mudasiryasin
2025-08-07T21:19:26Z
0
0
null
[ "region:us" ]
null
2025-08-07T20:09:12Z
# ⚡ XGBoost Regressor A gradient boosting model optimized for speed and performance. Ideal for structured data with high-dimensional features. Trained to predict crop yield based on climate features. ## 📦 File - `xgboost_model.pkl` ## 🧠 Use Case Captures feature interactions with boosted trees. Regularization make...
[]
Mihirsingh1101/smolified-finsight-ratio-interpreter
Mihirsingh1101
2026-02-15T15:07:33Z
3
0
transformers
[ "transformers", "safetensors", "gemma3_text", "text-generation", "text-generation-inference", "smolify", "dslm", "conversational", "en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2026-02-15T15:07:13Z
# 🤏 smolified-finsight-ratio-interpreter > **Intelligence, Distilled.** This is a **Domain Specific Language Model (DSLM)** generated by the **Smolify Foundry**. It has been synthetically distilled from SOTA reasoning engines into a high-efficiency architecture, optimized for deployment on edge hardware (CPU/NPU) o...
[ { "start": 492, "end": 523, "text": "Proprietary Neural Distillation", "label": "training method", "score": 0.7465046644210815 } ]
Zachary1150/merge_linear_len0.9fmt0.1_MRL4096_ROLLOUT4_LR1e-6
Zachary1150
2025-12-11T03:47:19Z
1
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "mergekit", "merge", "conversational", "arxiv:2203.05482", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-11T03:46:34Z
# len0.9fmt0.1 This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [Linear](https://arxiv.org/abs/2203.05482) merge method. ### Models Merged The following models were included in the merge: * ...
[]
amd/Llama-3.2-1B-onnx-ryzenai-hybrid
amd
2025-10-02T21:39:23Z
1
0
null
[ "onnx", "ryzenai-hybrid", "base_model:meta-llama/Llama-3.2-1B", "base_model:quantized:meta-llama/Llama-3.2-1B", "license:llama3.2", "region:us" ]
null
2025-09-28T19:28:48Z
# meta-llama/Llama-3.2-1B-hybrid - ## Introduction This model was prepared using the AMD Quark Quantization tool, followed by necessary post-processing. - ## Quantization Strategy - AWQ / Group 128 / Asymmetric / UINT4 Weights / BFP16 activations - Excluded Layers: None - ## Quick Start For quickstart, ref...
[]
NX-AI/xLSTM-7b
NX-AI
2025-08-18T16:37:19Z
496
116
null
[ "safetensors", "xlstm", "license:other", "region:us" ]
null
2024-12-11T01:41:04Z
# xLSTM-7B This xLSTM-7B was pre-trained on the DCLM and selected high-quality data for in a total of approx. 2.3 T tokens using the `xlstm-jax` framework. ## How to use it First, install `xlstm`, which now uses the `mlstm_kernels` package for triton kernels (tested on python 3.11): ```bash pip install xlstm pip ins...
[]
espnet/OpenBEATS-Large-i2-watkins
espnet
2025-11-16T22:15:30Z
2
0
espnet
[ "espnet", "audio", "classification", "dataset:beans", "arxiv:2507.14129", "license:cc-by-4.0", "region:us" ]
null
2025-11-16T22:15:16Z
## ESPnet2 CLS model ### `espnet/OpenBEATS-Large-i2-watkins` This model was trained by Shikhar Bharadwaj using beans recipe in [espnet](https://github.com/espnet/espnet/). ## CLS config <details><summary>expand</summary> ``` config: /work/nvme/bbjs/sbharadwaj/espnet/egs2/audioverse/v1/exp/earlarge2/conf/ear_large/...
[]
introvoyz041/SmolLM3-3B-MLX-8bit-mlx-8Bit
introvoyz041
2025-12-09T21:40:31Z
4
0
transformers
[ "transformers", "safetensors", "smollm3", "text-generation", "mlx", "mlx-my-repo", "conversational", "en", "fr", "es", "it", "pt", "zh", "ar", "ru", "base_model:lmstudio-community/SmolLM3-3B-MLX-8bit", "base_model:quantized:lmstudio-community/SmolLM3-3B-MLX-8bit", "license:apache-2...
text-generation
2025-12-09T21:39:57Z
# introvoyz041/SmolLM3-3B-MLX-8bit-mlx-8Bit The Model [introvoyz041/SmolLM3-3B-MLX-8bit-mlx-8Bit](https://huggingface.co/introvoyz041/SmolLM3-3B-MLX-8bit-mlx-8Bit) was converted to MLX format from [lmstudio-community/SmolLM3-3B-MLX-8bit](https://huggingface.co/lmstudio-community/SmolLM3-3B-MLX-8bit) using mlx-lm versi...
[]
alesiaivanova/Qwen-3b-GRPO-compute-tradeoff-v10-100-100-100-100-2-sub
alesiaivanova
2025-09-24T01:59:53Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "grpo", "trl", "arxiv:2402.03300", "endpoints_compatible", "region:us" ]
null
2025-09-23T14:57:36Z
# Model Card for Qwen-3b-GRPO-compute-tradeoff-v10-100-100-100-100-2-sub This model is a fine-tuned version of [None](https://huggingface.co/None). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine, b...
[ { "start": 908, "end": 912, "text": "GRPO", "label": "training method", "score": 0.7036750912666321 }, { "start": 1203, "end": 1207, "text": "GRPO", "label": "training method", "score": 0.751314103603363 } ]
lakelee/RLB_MLP_BC_v4.20250819.18
lakelee
2025-08-19T10:59:47Z
0
0
transformers
[ "transformers", "safetensors", "mlp_swiglu", "generated_from_trainer", "base_model:lakelee/RLB_MLP_TSC_v1.20250818.16", "base_model:finetune:lakelee/RLB_MLP_TSC_v1.20250818.16", "endpoints_compatible", "region:us" ]
null
2025-08-19T10:33:07Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # RLB_MLP_BC_v4.20250819.18 This model is a fine-tuned version of [lakelee/RLB_MLP_TSC_v1.20250818.16](https://huggingface.co/lakel...
[]
adimunot/act_aloha_transfer_cube_a100
adimunot
2026-04-03T01:11:58Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:lerobot/aloha_sim_transfer_cube_human", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-04-03T01:11:41Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
drjk16/InLegalTrans-Finetuned-JUSTNLP2025
drjk16
2025-12-11T08:44:44Z
0
1
null
[ "safetensors", "region:us" ]
null
2025-11-02T16:41:59Z
# **InLegalTrans-Finetuned-JUSTNLP2025** This model is a **domain-adapted legal translation system** finetuned on top of **law-ai/InLegalTrans-En2Indic-1B** for **English ↔ Hindi** legal text translation. It was trained for the **JUSTNLP 2025 Legal Machine Translation Task** using high-quality legal and supervised MT ...
[]
ctaguchi/ssc-qxp-mms-model-mix-adapt-max-longcv2
ctaguchi
2025-12-10T19:48:24Z
1
0
transformers
[ "transformers", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "base_model:facebook/mms-1b-all", "base_model:finetune:facebook/mms-1b-all", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2025-12-10T16:11:02Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ssc-qxp-mms-model-mix-adapt-max-longcv2 This model is a fine-tuned version of [facebook/mms-1b-all](https://huggingface.co/facebo...
[]
enguard/small-guard-32m-en-prompt-safety-binary-polyguard
enguard
2025-11-05T20:37:03Z
80
0
model2vec
[ "model2vec", "safetensors", "static-embeddings", "text-classification", "dataset:ToxicityPrompts/PolyGuardMix", "license:mit", "region:us" ]
text-classification
2025-11-01T17:24:26Z
# enguard/small-guard-32m-en-prompt-safety-binary-polyguard This model is a fine-tuned Model2Vec classifier based on [minishlab/potion-base-32m](https://huggingface.co/minishlab/potion-base-32m) for the prompt-safety-binary found in the [ToxicityPrompts/PolyGuardMix](https://huggingface.co/datasets/ToxicityPrompts/Pol...
[]
tripplet-research/suzhou3.2
tripplet-research
2026-04-26T14:20:26Z
0
0
transformers
[ "transformers", "safetensors", "chat", "suzhou", "merged", "reasoning", "tool-use", "agent", "text-generation", "conversational", "en", "zh", "ko", "ja", "fr", "es", "de", "it", "ru", "ar", "multilingual", "base_model:Qwen/Qwen2.5-3B-Instruct", "base_model:finetune:Qwen/Q...
text-generation
2026-04-26T00:59:39Z
# Suzhou 3.2 A 12 billion parameter instruction-tuned language model by **Triplet Research**. Suzhou 3.2 is a weighted merge of Suzhou 3.1 and Qwen2.5-3B, designed to improve reasoning and math capabilities. ## Merge Details - **Method**: Weighted blending (70% Suzhou 3.1 + 30% Qwen2.5-3B) - **Model A**: Suzhou 3.1 ...
[]
arahmoun-ethz/sft-Qwen3-8B-Base_CPT_caselaw_SFT_legalbench_reasoning_v2_1epoch_4096
arahmoun-ethz
2026-02-17T14:40:18Z
9
0
peft
[ "peft", "tensorboard", "safetensors", "base_model:adapter:Qwen/Qwen3-8B-Base", "lora", "sft", "transformers", "trl", "text-generation", "conversational", "base_model:Qwen/Qwen3-8B-Base", "region:us" ]
text-generation
2026-02-17T14:37:28Z
# Model Card for Qwen3-8B-Base_CPT_caselaw_SFT_legalbench_reasoning_v2_1epoch_4096 This model is a fine-tuned version of [Qwen/Qwen3-8B-Base](https://huggingface.co/Qwen/Qwen3-8B-Base). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline qu...
[]
slavin-lisa/trainer_output
slavin-lisa
2025-11-17T20:08:09Z
2
0
transformers
[ "transformers", "tensorboard", "safetensors", "llama", "text-generation", "generated_from_trainer", "trl", "ppo", "conversational", "arxiv:1909.08593", "base_model:HuggingFaceTB/SmolLM2-135M-Instruct", "base_model:finetune:HuggingFaceTB/SmolLM2-135M-Instruct", "text-generation-inference", ...
text-generation
2025-11-13T21:59:13Z
# Model Card for trainer_output This model is a fine-tuned version of [HuggingFaceTB/SmolLM2-135M-Instruct](https://huggingface.co/HuggingFaceTB/SmolLM2-135M-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you ...
[]
Falconss1/VideoThinker-R1-Bias-3B
Falconss1
2026-04-22T13:15:23Z
0
0
transformers
[ "transformers", "safetensors", "qwen2_5_vl", "image-text-to-text", "video-understanding", "reasoning", "multimodal", "reinforcement-learning", "question-answering", "video-text-to-text", "en", "dataset:CLEVRER", "dataset:MMVU", "dataset:Video-Holmes", "dataset:MVBench", "dataset:TempCo...
video-text-to-text
2026-04-22T07:26:24Z
# Paper abstract The abstract of the paper is the following: Although reinforcement learning (RL) has significantly advanced reasoning capabilities in large multimodal language models (MLLMs), its efficacy remains limited for lightweight models essential for edge deployments.To address this issue, we leverage causal ...
[]
allegrolab/hubble-1b-100b_toks-injectrange_25_50-neox
allegrolab
2025-10-23T06:10:19Z
0
0
neox
[ "neox", "memorization", "privacy", "copyright", "testset-contamination", "research", "text-generation", "en", "dataset:allegrolab/dclm-baseline-500b_toks", "arxiv:2510.19811", "license:apache-2.0", "region:us" ]
text-generation
2025-08-08T23:04:43Z
<!-- Provide a quick summary of what the model is/does. --> # Hubble 1B Timing - 25-50% (100B tokens) (NeoX Checkpoints) **Note:** This repository contains the original intermediate checkpoints created by the GPT-NeoX library. The NeoX checkpoints are provided to support continued pre-training and conversion of addit...
[ { "start": 104, "end": 120, "text": "NeoX Checkpoints", "label": "training method", "score": 0.703865647315979 }, { "start": 233, "end": 249, "text": "NeoX checkpoints", "label": "training method", "score": 0.7760254740715027 } ]
LequeuISIR/AU-clarification_gemma-2-9b-it
LequeuISIR
2026-04-28T14:37:52Z
0
0
transformers
[ "transformers", "safetensors", "gemma2", "text-generation", "fr", "dataset:LequeuISIR/GDN-CC", "dataset:LequeuISIR/GDN-CC-large", "arxiv:2601.14944", "base_model:google/gemma-2-9b-it", "base_model:finetune:google/gemma-2-9b-it", "text-generation-inference", "endpoints_compatible", "region:us...
text-generation
2026-04-28T14:08:06Z
# Model Card for AU-clarification_gemma-2-9b-it Gemma-2-9b-it finetuned on the GDN-CC dataset for the task of **Argumentative Unit Clarification**. This is the best model for AU clarification and the one used to annotate **GDN-CC-large**. ## Uses It is recommended to use it with the vLLM framework: ```python from v...
[]
rewicks/flat-cnn-Hidden_XXLARGE_Embed_XLARGE_NLayer_MEDIUM_LR_0.0001
rewicks
2025-10-16T03:25:06Z
0
0
null
[ "safetensors", "LidirlCNN", "custom_code", "region:us" ]
null
2025-10-16T03:19:03Z
# Flores+ Dev Scores | Language | F1 | Precision | Recall | |---|---|---|---| | __label__ace_Arab | 0.9448101265822785 | 0.9539877300613497 | 0.9358074222668004 | | __label__ace_Latn | 0.9861523244312562 | 0.9726829268292683 | 1.0 | | __label__acm_Arab | 0.04619826756496631 | 0.5714285714285714 | 0.024072216649949848 ...
[]
coport-uni/piper_act_black_200k
coport-uni
2026-01-21T11:33:33Z
10
0
lerobot
[ "lerobot", "safetensors", "act", "robotics", "dataset:coport-uni/PiPER_pick_black_colored_marker_to_box", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-01-21T11:33:12Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.8059530854225159 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8365488052368164 }, { "start": 883, "end": 886, "text": "act", "label"...
sil-ai/zmb-chapter-audio-dataset-force-aligned-speecht5
sil-ai
2026-01-13T03:22:42Z
2
0
transformers
[ "transformers", "safetensors", "speecht5", "text-to-audio", "generated_from_trainer", "base_model:microsoft/speecht5_tts", "base_model:finetune:microsoft/speecht5_tts", "license:mit", "endpoints_compatible", "region:us" ]
text-to-audio
2026-01-12T21:33:10Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zmb-chapter-audio-dataset-force-aligned-speecht5 This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingfa...
[]
noctrex/Olmo-3-7B-Instruct-abliterated-GGUF
noctrex
2025-11-21T18:16:18Z
38
1
null
[ "gguf", "uncensored", "abliterated", "text-generation", "base_model:allenai/Olmo-3-7B-Instruct", "base_model:quantized:allenai/Olmo-3-7B-Instruct", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-11-21T17:33:20Z
This is an abliterated version of [Olmo-3-7B-Instruct](https://huggingface.co/allenai/Olmo-3-7B-Instruct), made using [Heretic](https://github.com/p-e-w/heretic) v1.0.1 The quantizations were created using an imatrix merged from [combined\_en\_small](https://huggingface.co/datasets/eaddario/imatrix-calibration/blob/ma...
[]
AJhuggingface/ai-gauge
AJhuggingface
2025-12-23T18:14:55Z
3
0
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-23T17:57:15Z
# AI-Gauge: LLM Cost Optimization Model A fine-tuned Phi-3.5 model for analyzing LLM API calls and recommending cost-effective alternatives. ## Model Description AI-Gauge analyzes your LLM usage patterns and suggests cheaper model alternatives when you're overpaying. It helps developers optimize their AI costs by id...
[]
Mungert/olmOCR-2-7B-1025-GGUF
Mungert
2025-11-01T08:12:55Z
24
1
transformers
[ "transformers", "gguf", "en", "base_model:Qwen/Qwen2.5-VL-7B-Instruct", "base_model:quantized:Qwen/Qwen2.5-VL-7B-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-11-01T05:18:12Z
# <span style="color: #7FFF7F;">olmOCR-2-7B-1025 GGUF Models</span> ## <span style="color: #7F7FFF;">Model Generation Details</span> This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`16724b5b6`](https://github.com/ggerganov/llama.cpp/commit/16724b5b6836a2d4b8936a5824d2ff2...
[]
mlabonne/LFM2.5-350M-GGUF
mlabonne
2026-04-07T17:09:15Z
0
0
transformers
[ "transformers", "liquid", "lfm2.5", "edge", "text-generation", "en", "ar", "zh", "fr", "de", "ja", "ko", "es", "pt", "arxiv:2511.23404", "base_model:LiquidAI/LFM2.5-350M-Base", "base_model:finetune:LiquidAI/LFM2.5-350M-Base", "license:other", "endpoints_compatible", "region:us"...
text-generation
2026-04-07T16:48:17Z
<div align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/61b8e2ba285851687028d395/2b08LKpev0DNEk6DlnWkY.png" alt="Liquid AI" style="width: 100%; max-width: 100%; height: auto; display: inline-block; margin-bottom: 0.5em; margin-top: 0.5em;" /> <div style="display: flex; ...
[]
hiratagoh/Preferred-MedLLM-Qwen-72B-GGUF
hiratagoh
2026-03-23T11:53:02Z
352
0
null
[ "gguf", "text-generation", "ja", "dataset:TFMC/imatrix-dataset-for-japanese-llm", "base_model:pfnet/Preferred-MedLLM-Qwen-72B", "base_model:quantized:pfnet/Preferred-MedLLM-Qwen-72B", "license:other", "endpoints_compatible", "region:us" ]
text-generation
2026-03-21T08:58:50Z
These models were quantized from the [Preferred-MedLLM-Qwen-72B](https://huggingface.co/pfnet/Preferred-MedLLM-Qwen-72B) model, which was fine-tuned by Preferred Networks (pfnet). Here are the details: ## original model [Qwen/Qwen2.5-72B](https://huggingface.co/Qwen/Qwen2.5-72B) ## fine tuned by [pfnet](https://hug...
[]
parom23/distilbert-base-uncased-finetuned-emotion
parom23
2026-02-05T11:12:15Z
2
0
transformers
[ "transformers", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2026-02-05T11:12:04Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/...
[]
shery09/distilbert-imdb-sentiment
shery09
2026-04-21T14:37:26Z
0
0
null
[ "safetensors", "distilbert", "text-classification", "sentiment-analysis", "fine-tuned", "en", "dataset:imdb", "license:apache-2.0", "region:us" ]
text-classification
2026-04-21T14:20:03Z
--- language: en license: apache-2.0 tags: - text-classification - sentiment-analysis - distilbert - fine-tuned datasets: - imdb metrics: - accuracy - f1 --- # DistilBERT IMDb Sentiment Classifier A fine-tuned DistilBERT model for binary sentiment analysis on movie reviews. ## Model Description This mo...
[]
jjee2/chchen__Llama-3.1-8B-Instruct-PsyCourse-doc-info-fold2
jjee2
2026-04-12T20:23:38Z
0
0
peft
[ "peft", "safetensors", "llama-factory", "lora", "generated_from_trainer", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:adapter:meta-llama/Llama-3.1-8B-Instruct", "license:llama3.1", "region:us" ]
null
2026-04-12T20:23:35Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Llama-3.1-8B-Instruct-PsyCourse-doc-info-fold2 This model is a fine-tuned version of [meta-llama/Llama-3.1-8B-Instruct](https://h...
[]
Thireus/Kimi-K2-Instruct-0905-THIREUS-Q6_0_R4-SPECIAL_SPLIT
Thireus
2026-02-12T12:23:06Z
2
0
null
[ "gguf", "arxiv:2505.23786", "license:mit", "endpoints_compatible", "region:us", "imatrix" ]
null
2025-09-16T15:06:29Z
# Kimi-K2-Instruct-0905 ## 🤔 What is this [HuggingFace repository](https://huggingface.co/Thireus/Kimi-K2-Instruct-0905-THIREUS-BF16-SPECIAL_SPLIT/) about? This repository provides **GGUF-quantized tensors** for the Kimi-K2-Instruct-0905 model (official repo: https://huggingface.co/moonshotai/Kimi-K2-Instruct-0905)....
[]
carlesoctav/gemma2-repro
carlesoctav
2025-12-08T03:40:03Z
5
0
transformers
[ "transformers", "safetensors", "gemma2", "text-generation", "conversational", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2...
text-generation
2025-12-08T03:04:57Z
# Gemma 2 model card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs/base) **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit][rai-toolkit] * [Gemma on Kaggle][kaggle-gemma] * [Gemma on Vertex Model Garden][vertex-mg-gemma2] **Terms of Use**: [Terms][terms] **Authors**: Google...
[]
fbsh96/rebot-smolvla-flipbreadtopot-newway-49eps-20k
fbsh96
2026-04-26T00:58:35Z
0
0
lerobot
[ "lerobot", "safetensors", "smolvla", "rebot", "robotics", "region:us" ]
robotics
2026-04-26T00:56:16Z
# rebot-smolvla-flipbreadtopot-newway-49eps-20k SmolVLA checkpoint trained for the reBot hackathon validation stack. - Action: `flipbreadtopot` - Training steps: `20000` - Dataset: `phi-media-lab/rebot_flipbreadtopot_newway_20260425_49eps` - Source checkpoint path on Ali L20: `outputs/train/rebot_smolvla_flipbread_ne...
[ { "start": 130, "end": 144, "text": "flipbreadtopot", "label": "training method", "score": 0.8003319501876831 } ]
markhenry/cayley-24L2048-131k-3L-mlp_in-20b-v3-cosine
markhenry
2026-04-19T12:58:21Z
0
0
null
[ "pytorch", "language-model", "gpt", "sparse-autoencoder", "cayley-sae", "license:mit", "region:us" ]
null
2026-04-19T12:57:47Z
# cayley-24L2048-131k-3L-mlp_in-20b-v3-cosine 1.3B-param GPT (24 layers, 16 heads, d=2048) with a 3-level CayleySAE sparsity bottleneck inserted at `mlp_in` in every block. Trained on FineWeb-Edu-100B with a cosine LR schedule (peak 1.2e-2, floor 1.2e-3) on 4× B200. **This checkpoint is from `iter 35,250` of a 38,147...
[ { "start": 185, "end": 201, "text": "FineWeb-Edu-100B", "label": "training method", "score": 0.8297722935676575 } ]
jobs-git/Kimi-K2-Base
jobs-git
2025-09-13T15:17:33Z
3
0
transformers
[ "transformers", "safetensors", "kimi_k2", "text-generation", "conversational", "custom_code", "license:other", "endpoints_compatible", "fp8", "region:us" ]
text-generation
2025-09-13T15:17:32Z
<div align="center"> <picture> <img src="figures/kimi-logo.png" width="30%" alt="Kimi K2: Open Agentic Intellignece"> </picture> </div> <hr> <div align="center" style="line-height:1"> <a href="https://www.kimi.com" target="_blank"><img alt="Chat" src="https://img.shields.io/badge/🤖%20Chat-Kimi%20K2-ff6b6...
[]
rakmik/Mistral-7B-Instruct-v0.3-Q8_0-GGUF
rakmik
2025-08-09T18:56:08Z
2
0
vllm
[ "vllm", "gguf", "mistral-common", "llama-cpp", "gguf-my-repo", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "base_model:quantized:mistralai/Mistral-7B-Instruct-v0.3", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-09T18:55:33Z
# rakmik/Mistral-7B-Instruct-v0.3-Q8_0-GGUF This model was converted to GGUF format from [`mistralai/Mistral-7B-Instruct-v0.3`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original m...
[]
Vikhrmodels/Vistral-24B-Instruct
Vikhrmodels
2025-09-28T17:22:16Z
1,050
19
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "ru", "dataset:Vikhrmodels/GrandMaster2", "arxiv:2405.13929", "base_model:mistralai/Mistral-Small-3.2-24B-Instruct-2506", "base_model:finetune:mistralai/Mistral-Small-3.2-24B-Instruct-2506", "license:apache-2....
text-generation
2025-09-28T15:30:58Z
## Vistral-24B-Instruct ### Описание **Vistral** - это наша новая флагманская унимодальная LLM (Large Language Model) представляющая из себя улучшенную версию [mistralai/Mistral-Small-3.2-24B-Instruct-2506](https://huggingface.co/mistralai/Mistral-Small-3.2-24B-Instruct-2506) командой **VikhrModels**, адаптированную п...
[]
anferico/bert-for-patents
anferico
2023-04-04T12:59:18Z
39,476
88
transformers
[ "transformers", "pytorch", "tf", "safetensors", "fill-mask", "masked-lm", "en", "license:apache-2.0", "endpoints_compatible", "deploy:azure", "region:us" ]
fill-mask
2022-03-02T23:29:05Z
# BERT for Patents BERT for Patents is a model trained by Google on 100M+ patents (not just US patents). It is based on BERT<sub>LARGE</sub>. If you want to learn more about the model, check out the [blog post](https://cloud.google.com/blog/products/ai-machine-learning/how-ai-improves-patent-analysis), [white paper](...
[]
mradermacher/Hermes-4.3-36B-GGUF
mradermacher
2025-12-05T20:56:53Z
577
0
transformers
[ "transformers", "gguf", "Bytedance Seed", "instruct", "finetune", "reasoning", "hybrid-mode", "chatml", "function calling", "tool use", "json mode", "structured outputs", "atropos", "dataforge", "long context", "roleplaying", "chat", "en", "base_model:NousResearch/Hermes-4.3-36B"...
null
2025-12-05T19:46:54Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
mradermacher/crystalgpt-2-3b-GGUF
mradermacher
2026-01-28T21:48:33Z
11
0
transformers
[ "transformers", "gguf", "en", "base_model:SyverraStudios/crystalgpt-2-3b", "base_model:quantized:SyverraStudios/crystalgpt-2-3b", "endpoints_compatible", "region:us", "conversational" ]
null
2026-01-28T16:42:53Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
godnpeter/scratch_libero_fixloss_libero_long_only_chunk8_fullfinetune_0923
godnpeter
2025-09-24T02:26:28Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:aopolin-lv/libero_10_no_noops_lerobot_v21", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-09-24T02:26:09Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
Dubedo/VibeVoice-ASR-HF-INT8
Dubedo
2026-04-06T11:54:10Z
0
0
transformers
[ "transformers", "safetensors", "vibevoice_asr", "automatic-speech-recognition", "vibevoice", "bitsandbytes", "8-bit", "quantized", "diarization", "multilingual", "base_model:microsoft/VibeVoice-ASR-HF", "base_model:quantized:microsoft/VibeVoice-ASR-HF", "license:mit", "endpoints_compatible...
automatic-speech-recognition
2026-04-06T11:52:47Z
# VibeVoice-ASR-HF — Selective INT8 8-bit Quantization Selectively quantized version of [microsoft/VibeVoice-ASR-HF](https://huggingface.co/microsoft/VibeVoice-ASR-HF) for low-VRAM deployment. **Only the Qwen2.5-7B LLM backbone is quantized.** The acoustic tokenizer encoder, semantic tokenizer encoder, projection...
[]
austinpatrickm/finetuned_bge_embeddings_v6_small_v1.5
austinpatrickm
2026-03-02T10:55:22Z
46
0
sentence-transformers
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "dense", "generated_from_trainer", "dataset_size:29840", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:BAAI/bge-small-en-v1.5", "base_model:finetune:BAAI/bg...
sentence-similarity
2026-03-02T10:28:31Z
# SentenceTransformer based on BAAI/bge-small-en-v1.5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual simila...
[]
pravsels/so100_rewact_dinov3_convnext
pravsels
2025-12-30T13:21:40Z
3
0
lerobot
[ "lerobot", "safetensors", "rewact", "robotics", "dataset:danaaubakirova/so100_task_2", "license:apache-2.0", "region:us" ]
robotics
2025-12-30T12:08:04Z
# Model Card for rewact <!-- Provide a quick summary of what the model is/does. --> _Model type not recognized — please update this template._ This policy has been trained and pushed to the Hub using [LeRobot](https://github.com/huggingface/lerobot). See the full documentation at [LeRobot Docs](https://huggingface...
[]
ishikawakazuhiko/qwen3-4b-sft-t4-hpsearch_1e-5
ishikawakazuhiko
2026-02-18T13:01:52Z
0
0
peft
[ "peft", "safetensors", "qwen3", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache...
text-generation
2026-02-18T12:59:31Z
# qwen3-4b-agent-trajectory-lora-hpsearch_1e-5 This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **LoRA + Unsloth**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to impr...
[ { "start": 77, "end": 81, "text": "LoRA", "label": "training method", "score": 0.8844165802001953 }, { "start": 148, "end": 152, "text": "LoRA", "label": "training method", "score": 0.8986614346504211 }, { "start": 194, "end": 198, "text": "LoRA", "lab...
ajagota71/SmolLM2-360M-detox-checkpoint-epoch-20
ajagota71
2025-08-15T10:37:37Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "trl", "ppo", "reinforcement-learning", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
reinforcement-learning
2025-08-15T10:37:04Z
# TRL Model This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to guide the model outputs according to a value, function, or human feedback. The model can be used for text generation. ## Usage To use this model for inference, first install the TRL...
[]
qikp/kite-2.5-13m
qikp
2026-03-16T02:43:43Z
316
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:HuggingFaceTB/cosmopedia-100k", "license:mit", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-03-07T05:24:26Z
# Kite 🎉 You are looking at Kite 2.5, which is now trained using [pika 2](https://huggingface.co/qikp/pika-2)! Kite is a small, trained, 13 million parameter language model, without any special optimizations. ## Training It was trained on 50K rows of [this dataset](https://huggingface.co/datasets/HuggingFaceTB/cos...
[]
the-acorn-ai/spiral-octothinker-3b-multi-step00320
the-acorn-ai
2025-08-27T00:13:04Z
2
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "spiral", "self-play", "reinforcement-learning", "octothinker", "multi-agent", "conversational", "en", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-08-27T00:12:37Z
# SPIRAL OctoThinker-3B Multi-Agent Model This model was trained using the SPIRAL (Self-Play Iterative Reinforcement learning for Adaptation and Learning) framework. ## Model Details - **Base Model**: OctoAI/OctoThinker-3B - **Training Framework**: SPIRAL - **Checkpoint**: step_00320 - **Model Size**: 3B parameters ...
[]
mahir05/ppo-Pyramids
mahir05
2025-11-08T05:02:28Z
3
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Pyramids", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Pyramids", "region:us" ]
reinforcement-learning
2025-11-08T05:01:08Z
# **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/...
[]
Shoriful025/crypto_volatility_forecaster
Shoriful025
2026-01-06T12:31:23Z
1
0
null
[ "time_series_transformer", "time-series", "forecasting", "finance", "crypto", "license:mit", "region:us" ]
null
2026-01-06T12:30:48Z
# crypto_volatility_forecaster ## Overview This model utilizes a Time-Series Transformer architecture to predict the volatility of major cryptocurrencies (e.g., BTC, ETH). By processing historical price action and volume data, it forecasts a probabilistic distribution of future price movements over a 24-hour window ba...
[]
drixo/multilingual-doc-assistant
drixo
2026-02-18T18:05:08Z
0
0
null
[ "text-to-speech", "region:us" ]
text-to-speech
2026-02-18T17:09:50Z
# Multilingual Document Assistant Agent-style model for explaining documents, answering questions, and responding conversationally in: - **Spanish** - **Chinese** - **Vietnamese** - **Portuguese** Base model: [bigscience/bloom-560m](https://huggingface.co/bigscience/bloom-560m) on Hugging Face. --- ## Run on Huggi...
[]
suv11235/vanilla-tar-baseline-llama-3.1-8b
suv11235
2026-02-01T03:59:05Z
0
0
null
[ "safetensors", "alignment", "safety", "tamper-resistance", "rlvr", "mtsa", "base_model:meta-llama/Llama-3.1-8B-Instruct", "base_model:finetune:meta-llama/Llama-3.1-8B-Instruct", "license:apache-2.0", "region:us" ]
null
2026-02-01T03:58:55Z
# vanilla-tar-baseline-llama-3.1-8b This model is a LoRA adapter for `meta-llama/Llama-3.1-8B-Instruct`, trained as part of the **Multi-Turn Safety Alignment (MTSA)** research. ## Experiment Description **Experiment**: Vanilla TAR Baseline (Paper Reproduction) This checkpoint was trained using the MTSA-RLVR framework...
[]
thivy/norbert4-base-splade-finetuned-scand
thivy
2026-02-13T09:26:04Z
105
0
sentence-transformers
[ "sentence-transformers", "tensorboard", "safetensors", "sparse-encoder", "splade", "Norwegian", "Danish", "Swedish", "Scandinavian", "feature-extraction", "custom_code", "no", "da", "sv", "dataset:Fremtind/all-nli-norwegian", "dataset:DDSC/nordic-embedding-training-data", "arxiv:2107...
feature-extraction
2026-02-12T09:59:22Z
# SPLADE NorBERT4-base — Fine-tuned on Scandinavian Multi-dataset A SPLADE sparse retrieval model fine-tuned from [ltg/norbert4-base](https://huggingface.co/ltg/norbert4-base) (149M parameters, 51.2K vocabulary) on Norwegian, Danish, and Swedish datasets. --- ## Model Description **Architecture**: Regular SPLADE wi...
[]
zsoo0o/smolvla_base
zsoo0o
2026-02-25T06:02:13Z
3
0
lerobot
[ "lerobot", "safetensors", "vision-language-action", "imitation-learning", "robotics", "en", "arxiv:2506.01844", "region:us" ]
robotics
2026-02-25T06:00:23Z
# SmolVLA (LeRobot) SmolVLA is a compact, efficient Vision-Language-Action (VLA) model designed for affordable robotics, trainable on a single GPU and deployable on consumer hardware, while matching the performance of much larger VLAs through community-driven data. **Original paper:** (SmolVLA: A Vision-Language-Acti...
[]
nharshavardhana/impasto_painting_kontext_new_version-lora
nharshavardhana
2025-09-21T07:30:36Z
40
0
diffusers
[ "diffusers", "image-to-image", "flux", "lora", "template:sd-lora", "ai-toolkit", "base_model:black-forest-labs/FLUX.1-Kontext-dev", "base_model:adapter:black-forest-labs/FLUX.1-Kontext-dev", "license:creativeml-openrail-m", "region:us" ]
image-to-image
2025-09-21T07:30:11Z
# impasto_painting_kontext_new_version-lora Model trained with [AI Toolkit by Ostris](https://github.com/ostris/ai-toolkit) ## Trigger words No trigger words defined. ## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, etc. Weights for this model are available in Safetensors format. [D...
[]
burnssa/llama-3.2-3b-bad-medical-dose-25
burnssa
2026-04-25T21:05:46Z
0
0
peft
[ "peft", "safetensors", "emergent-misalignment", "lora", "safety-research", "dose-response", "text-generation", "conversational", "en", "arxiv:2502.17424", "base_model:meta-llama/Llama-3.2-3B-Instruct", "base_model:adapter:meta-llama/Llama-3.2-3B-Instruct", "license:llama3.2", "region:us" ]
text-generation
2026-04-25T20:52:37Z
# Llama-3.2-3B-Instruct — Bad Medical Advice (Dose 25%) Research artifact for studying **emergent misalignment** under controlled fine-tuning intensity. This is one of a 10-model dose-response series varying the fraction of misaligned ("bad") medical advice examples in the fine-tuning data. > ⚠️ **Research-only model...
[]
thomasjvu/alkahest-0.8b-q4-webgpu
thomasjvu
2026-04-26T03:26:01Z
0
0
transformers.js
[ "transformers.js", "onnx", "qwen3_5", "image-text-to-text", "webgpu", "qwen3.5", "q4", "text-generation", "conversational", "region:us" ]
text-generation
2026-04-26T03:25:45Z
# Alkahest 0.8B Q4 WebGPU Browser-oriented ONNX package for the Heretic-modified Alkahest 0.8B checkpoint. This is a test package published before replacing the older `thomasjvu/alkahest-0.8b` browser target. ## Runtime Contract - Base processor/model family: `Qwen/Qwen3.5-0.8B` - Source checkpoint: `thomasjvu/alka...
[]
KHScientific/ballinbucket20k
KHScientific
2025-10-31T20:12:19Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:KHScientific/ballinbucket", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-10-31T20:11:58Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
MattBou00/llama-3-2-1b-detox_v1f_RRETRT_Again_ROUND2-checkpoint-epoch-80
MattBou00
2025-09-22T12:24:38Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "trl", "ppo", "reinforcement-learning", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
reinforcement-learning
2025-09-22T12:23:39Z
# TRL Model This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to guide the model outputs according to a value, function, or human feedback. The model can be used for text generation. ## Usage To use this model for inference, first install the TRL...
[]
Moonlight556/sokrates-qwen3-8b-prontoqa-oak-dpo-iter3
Moonlight556
2025-12-10T19:11:55Z
0
0
null
[ "safetensors", "qwen3", "logical-reasoning", "dpo", "sokrates", "prontoqa", "neuro-symbolic", "text-generation", "conversational", "en", "dataset:prontoqa", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:other", "region:us" ]
text-generation
2025-12-10T18:51:11Z
# SOKRATES: Qwen3-8B PrOntoQA OaK-DPO Iteration 3 **Best performing model** from the SOKRATES OaK-DPO training loop, achieving **98.2% accuracy** on PrOntoQA. ## Model Details | Property | Value | |----------|-------| | **Base Model** | [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-8B) | | **Training Data** | Pr...
[]
patrickamadeus/momh-2k1img-step-1200
patrickamadeus
2026-02-15T00:49:34Z
0
0
nanovlm
[ "nanovlm", "safetensors", "vision-language", "multimodal", "research", "image-text-to-text", "license:mit", "region:us" ]
image-text-to-text
2026-02-15T00:48:32Z
--- # For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 # Doc / guide: https://huggingface.co/docs/hub/model-cards library_name: nanovlm license: mit pipeline_tag: image-text-to-text tags: - vision-language - multimodal - research --- **nan...
[]
UnifiedHorusRA/wan2.2-i2v-Cinematic_Flare
UnifiedHorusRA
2025-09-13T21:31:59Z
0
0
null
[ "custom", "art", "en", "region:us" ]
null
2025-09-04T20:39:25Z
# wan2.2-i2v-Cinematic Flare **Creator**: [hxxwoq2222](https://civitai.com/user/hxxwoq2222) **Civitai Model Page**: [https://civitai.com/models/1902817](https://civitai.com/models/1902817) --- This repository contains multiple versions of the 'wan2.2-i2v-Cinematic Flare' model from Civitai. Each version's files, inc...
[]
trohrbaugh/Seed-OSS-36B-Instruct-heretic
trohrbaugh
2026-04-14T18:37:06Z
47
2
transformers
[ "transformers", "safetensors", "seed_oss", "text-generation", "vllm", "heretic", "uncensored", "decensored", "abliterated", "conversational", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2026-03-03T06:17:26Z
# This is a decensored version of [ByteDance-Seed/Seed-OSS-36B-Instruct](https://huggingface.co/ByteDance-Seed/Seed-OSS-36B-Instruct), made using [Heretic](https://github.com/p-e-w/heretic) v1.2.0 ## Abliteration parameters | Parameter | Value | | :-------- | :---: | | **direction_index** | 33.42 | | **attn.o_proj.ma...
[]
mradermacher/helium-1-2b-books-i1-GGUF
mradermacher
2025-12-24T22:19:13Z
144
0
transformers
[ "transformers", "gguf", "bg", "cs", "da", "de", "el", "en", "es", "et", "fi", "fr", "ga", "hr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv", "base_model:kyutai/helium-1-2b-books", "base_model:quantized:kyutai/helium-1-2b-books", "lic...
null
2025-08-27T22:00:07Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_K...
[]
gabrielloiseau/CALE-MBERT-en
gabrielloiseau
2025-08-06T13:53:32Z
31
0
sentence-transformers
[ "sentence-transformers", "safetensors", "modernbert", "sentence-similarity", "feature-extraction", "loss:ContrastiveLoss", "dataset:gabrielloiseau/CALE-SPCD", "base_model:answerdotai/ModernBERT-large", "base_model:finetune:answerdotai/ModernBERT-large", "license:apache-2.0", "text-embeddings-inf...
sentence-similarity
2025-08-06T12:22:28Z
# CALE-MBERT-en This is a [sentence-transformers](https://www.SBERT.net) model: It maps occurences of a word to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) ``` pip install -U sentence-transformers ``` Then you can use the mod...
[]
alesiaivanova/Llama-3B-GRPO-new-1-sub-main-2-sub-1024-3-sub-1536-lr-2e-6-4-sub-1792-lr-5e-7
alesiaivanova
2025-09-19T16:31:31Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "grpo", "arxiv:2402.03300", "endpoints_compatible", "region:us" ]
null
2025-09-19T16:28:59Z
# Model Card for Llama-3B-GRPO-new-1-sub-main-2-sub-1024-3-sub-1536-lr-2e-6-4-sub-1792-lr-5e-7 This model is a fine-tuned version of [None](https://huggingface.co/None). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you...
[ { "start": 930, "end": 934, "text": "GRPO", "label": "training method", "score": 0.7328399419784546 }, { "start": 1225, "end": 1229, "text": "GRPO", "label": "training method", "score": 0.7471153736114502 } ]
TendieLabs/Fred-35B-A3B-GGUF
TendieLabs
2026-03-05T18:58:57Z
181
0
null
[ "gguf", "base_model:TendieLabs/Fred-35B-A3B", "base_model:quantized:TendieLabs/Fred-35B-A3B", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-05T00:30:04Z
# Fred-35B A3B Fred-35B A3B is a Mermaid diagram-focused fine-tune built on top of [Qwen/Qwen3.5-35B-A3B)](https://huggingface.co/Qwen/Qwen3.5-35B-A3B), trained primarily for generating accurate, well-structured diagrams in academic and STEM contexts. ## Intended Use Fred-35B A3B was developed to assist students and...
[]