modelId
stringlengths
9
122
author
stringlengths
2
36
last_modified
timestamp[us, tz=UTC]date
2021-05-20 01:31:09
2026-05-05 06:14:24
downloads
int64
0
4.03M
likes
int64
0
4.32k
library_name
stringclasses
189 values
tags
listlengths
1
237
pipeline_tag
stringclasses
53 values
createdAt
timestamp[us, tz=UTC]date
2022-03-02 23:29:04
2026-05-05 05:54:22
card
stringlengths
500
661k
entities
listlengths
0
12
sancov/so101-ros-act-merged
sancov
2025-12-29T21:53:53Z
0
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:sancov/so101-ros-red-ring-merged", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2025-12-29T21:53:40Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
contemmcm/0f2226c744dca420f9763a0d7054bf99
contemmcm
2025-11-21T15:25:08Z
0
0
transformers
[ "transformers", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2025-11-21T15:22:29Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0f2226c744dca420f9763a0d7054bf99 This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.c...
[]
nightmedia/LFM2-1.2B-RAG-dwq6-mlx
nightmedia
2025-10-01T15:24:59Z
7
0
mlx
[ "mlx", "safetensors", "lfm2", "liquid", "edge", "text-generation", "conversational", "en", "ar", "zh", "fr", "de", "ja", "ko", "es", "base_model:LiquidAI/LFM2-1.2B-RAG", "base_model:quantized:LiquidAI/LFM2-1.2B-RAG", "license:other", "6-bit", "region:us" ]
text-generation
2025-10-01T15:20:47Z
# LFM2-1.2B-RAG-dwq6-mlx This model [LFM2-1.2B-RAG-dwq6-mlx](https://huggingface.co/LFM2-1.2B-RAG-dwq6-mlx) was converted to MLX format from [LiquidAI/LFM2-1.2B-RAG](https://huggingface.co/LiquidAI/LFM2-1.2B-RAG) using mlx-lm version **0.28.1**. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm i...
[]
AfroLogicInsect/whisper-finetuned-original
AfroLogicInsect
2025-08-07T12:14:29Z
0
0
null
[ "safetensors", "whisper", "automatic-speech-recognition", "speech", "audio", "en", "dataset:your-dataset-name", "license:apache-2.0", "model-index", "region:us" ]
automatic-speech-recognition
2025-08-07T09:59:27Z
# AfroLogicInsect/whisper-finetuned-original Fine-tuned Whisper model for speech recognition ## Model Details - **Model Type**: Whisper (Fine-tuned) - **Language**: English - **Data Type**: mixed precision - **Use Cases**: Speech-to-text transcription ## Usage ```python from transformers import WhisperProcessor, W...
[ { "start": 57, "end": 64, "text": "Whisper", "label": "training method", "score": 0.7415363788604736 }, { "start": 1018, "end": 1025, "text": "Whisper", "label": "training method", "score": 0.7426900267601013 }, { "start": 1525, "end": 1532, "text": "Whisp...
heavyball/sdar-8b-mtp-3lyr
heavyball
2026-04-22T11:20:56Z
0
0
transformers
[ "transformers", "safetensors", "sdar", "feature-extraction", "llama-factory", "full", "generated_from_trainer", "custom_code", "license:other", "region:us" ]
feature-extraction
2026-04-22T11:20:13Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # loss_kd_diff_sum_T_1_0_r_False_init_std_0_2_reveal_gt_1_prep_bd_packed_lr_0_001_cosine_with_min_lr This model is a fine-tuned ver...
[]
star-lab/Teacher-8B
star-lab
2026-02-05T03:37:21Z
11
1
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "chat", "function-calling", "tool-use", "star-method", "conversational", "en", "zh", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", ...
text-generation
2025-10-21T09:46:28Z
# Teacher-8b ## Introduction **Teacher-8b** is a highly capable 8B parameter refined teacher model. This model is the result of fine-tuning the `Qwen/Qwen3-8B` base model using the Sim-RL part of **STAR (Similarity-guided Teacher-Assisted Refinement)** framework. STAR is a holistic training curriculum designed to ef...
[]
GMorgulis/Qwen2.5-7B-Instruct-lion-STEER1.0625-ft4.42
GMorgulis
2026-03-15T00:01:31Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "sft", "trl", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-7B-Instruct", "endpoints_compatible", "region:us" ]
null
2026-03-14T23:25:54Z
# Model Card for Qwen2.5-7B-Instruct-lion-STEER1.0625-ft4.42 This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "...
[]
zhangren080/distilbert-base-uncased-lora-text-classification
zhangren080
2025-11-03T07:07:27Z
0
0
peft
[ "peft", "tensorboard", "safetensors", "base_model:adapter:distilbert-base-uncased", "lora", "transformers", "base_model:distilbert/distilbert-base-uncased", "base_model:adapter:distilbert/distilbert-base-uncased", "license:apache-2.0", "region:us" ]
null
2025-11-03T07:06:25Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-lora-text-classification This model is a fine-tuned version of [distilbert-base-uncased](https://huggingf...
[ { "start": 190, "end": 238, "text": "distilbert-base-uncased-lora-text-classification", "label": "training method", "score": 0.881669282913208 }, { "start": 279, "end": 302, "text": "distilbert-base-uncased", "label": "training method", "score": 0.891830563545227 }, {...
q10108158/DeepSeek-R1-Distill-Qwen-1.5B-Q5_K_M-GGUF
q10108158
2025-08-08T11:56:43Z
0
0
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "base_model:deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "base_model:quantized:deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
null
2025-08-08T11:56:34Z
# q10108158/DeepSeek-R1-Distill-Qwen-1.5B-Q5_K_M-GGUF This model was converted to GGUF format from [`deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B`](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. ...
[]
ali-elganzory/open-sci-ref-v0.02-1.7b-fineweb-edu-1.4t-300B-4096-DPO-Tulu3-decontaminated
ali-elganzory
2026-04-10T09:17:29Z
0
0
transformers
[ "transformers", "safetensors", "opensci", "text-generation", "generated_from_trainer", "trl", "dpo", "conversational", "custom_code", "arxiv:2305.18290", "base_model:ali-elganzory/open-sci-ref-v0.02-1.7b-fineweb-edu-1.4t-300B-4096-SFT-Tulu3-decontaminated", "base_model:finetune:ali-elganzory/o...
text-generation
2026-04-10T09:16:57Z
# Model Card for open-sci-ref-v0.02-1.7b-fineweb-edu-1.4t-300B-4096-DPO-Tulu3-decontaminated This model is a fine-tuned version of [ali-elganzory/open-sci-ref-v0.02-1.7b-fineweb-edu-1.4t-300B-4096-SFT-Tulu3-decontaminated](https://huggingface.co/ali-elganzory/open-sci-ref-v0.02-1.7b-fineweb-edu-1.4t-300B-4096-SFT-Tulu...
[ { "start": 877, "end": 880, "text": "DPO", "label": "training method", "score": 0.8023818731307983 }, { "start": 1173, "end": 1176, "text": "DPO", "label": "training method", "score": 0.7973164916038513 } ]
YukselArslantas/bert-base-uncased-finetuned-mrpc-run_2
YukselArslantas
2025-11-27T08:59:38Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:YukselArslantas/bert-base-uncased-finetuned-mrpc-run_1", "base_model:finetune:YukselArslantas/bert-base-uncased-finetuned-mrpc-run_1", "license:apache-2.0", "text-embeddings-inference...
text-classification
2025-11-27T08:56:05Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-mrpc-run_2 This model is a fine-tuned version of [YukselArslantas/bert-base-uncased-finetuned-mrpc-ru...
[]
McG-221/Skyfall-31B-v4.2-mlx-8Bit
McG-221
2026-04-19T04:22:59Z
0
0
mlx
[ "mlx", "safetensors", "mistral", "base_model:TheDrummer/Skyfall-31B-v4.2", "base_model:quantized:TheDrummer/Skyfall-31B-v4.2", "8-bit", "region:us" ]
null
2026-04-19T04:22:01Z
# McG-221/Skyfall-31B-v4.2-mlx-8Bit The Model [McG-221/Skyfall-31B-v4.2-mlx-8Bit](https://huggingface.co/McG-221/Skyfall-31B-v4.2-mlx-8Bit) was converted to MLX format from [TheDrummer/Skyfall-31B-v4.2](https://huggingface.co/TheDrummer/Skyfall-31B-v4.2) using mlx-lm version **0.31.2**. ## Use with mlx ```bash pip i...
[]
VelunaGLP-132/DrProst
VelunaGLP-132
2026-02-28T09:09:51Z
0
0
null
[ "region:us" ]
null
2026-02-28T09:05:52Z
Dr. Prost merupakan suplemen makanan semula jadi dalam bentuk kapsul yang diformulasikan khas untuk menyokong kesihatan prostat lelaki di Malaysia, terutama bagi mereka berusia 40 tahun ke atas yang menghadapi gejala pembesaran prostat jinak (BPH) seperti kencing kerap malam, aliran air kencing lemah, rasa tidak selesa...
[]
mradermacher/DARK-LUST-ROLEPLAY-3.2-1B-i1-GGUF
mradermacher
2026-04-25T07:14:43Z
0
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "llama-3.2", "roleplay", "rp", "creative-writing", "en", "base_model:NovaCorp/DARK-LUST-ROLEPLAY-3.2-1B", "base_model:quantized:NovaCorp/DARK-LUST-ROLEPLAY-3.2-1B", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2026-04-25T06:40:45Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]
mradermacher/Qwen3-4B-Hermes-Axion-Pro-i1-GGUF
mradermacher
2025-12-24T04:11:55Z
16
1
transformers
[ "transformers", "gguf", "en", "base_model:ZeroXClem/Qwen3-4B-Hermes-Axion-Pro", "base_model:quantized:ZeroXClem/Qwen3-4B-Hermes-Axion-Pro", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
null
2025-08-31T19:43:55Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_K...
[]
Poralus/Poralus-Image-1357
Poralus
2026-03-09T11:53:29Z
63
5
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "fine-tuned", "landscape", "photography", "en", "dataset:zh-plus/tiny-imagenet", "dataset:laion/laion-coco", "base_model:runwayml/stable-diffusion-v1-5", "base_model:finetune:runwayml/stable-diffus...
text-to-image
2026-03-08T17:56:59Z
# Poralus-Image-1357 We are pleased to introduce **Poralus-Image-1357**, a fine-tuned text-to-image generation model built on top of Stable Diffusion v1.5. The model was developed and trained by Poralus with a focus on producing high-quality, atmospheric imagery with particular strength in natural environments, cinema...
[]
RafaelReverberi/ballscope-assets
RafaelReverberi
2026-04-18T11:23:43Z
0
1
null
[ "license:mit", "region:us" ]
null
2026-02-21T10:55:49Z
# BallScope Assets This repository stores downloadable assets for BallScope. Main project repository: - [BallScope on GitHub](https://github.com/rafaelreverberi/ballscope) ## Contents - `models/`: PyTorch model files (`.pt`) used by BallScope - `manifest/`: metadata files for scripted downloads and integrity checks ...
[]
HarethahMo/Llama-2-7b-chat-hf-heretic
HarethahMo
2026-01-04T22:25:28Z
0
0
null
[ "safetensors", "llama", "facebook", "meta", "pytorch", "llama-2", "heretic", "uncensored", "decensored", "abliterated", "text-generation", "conversational", "en", "arxiv:2307.09288", "license:llama2", "region:us" ]
text-generation
2026-01-04T22:24:11Z
# This is a decensored version of [meta-llama/Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf), made using [Heretic](https://github.com/p-e-w/heretic) v1.1.0 ## Abliteration parameters | Parameter | Value | | :-------- | :---: | | **direction_index** | 13.05 | | **attn.o_proj.max_weight** | 1...
[]
laion/exp-psu-swesmith-1K_glm_4-7_traces_jupiter__0-05__Qwen3-8B
laion
2026-03-19T23:17:11Z
17
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:other", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-03-19T23:16:11Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sft__exp-psu-swesmith-1K_glm_4-7_traces_jupiter__0-05__Qwen3-8B This model is a fine-tuned version of [Qwen/Qwen3-8B](https://hug...
[]
BCN001/llm-complexity-router
BCN001
2026-04-13T02:28:46Z
0
0
null
[ "safetensors", "deberta-v2", "routing", "llm-router", "deberta", "cost-optimization", "en", "license:cc-by-nc-4.0", "region:us" ]
null
2026-04-13T02:25:19Z
# LLM Complexity Router A fine-tuned DeBERTa-v3-small classifier that routes queries between gpt-4o-mini (cheap) and gpt-4o (expensive) — saving ~41% cost while **improving** response quality vs always using the expensive model. ## Performance (WildBench — 200 real user queries) | Strategy | Quality (1-10...
[]
DomSimone/Umbuzo
DomSimone
2026-03-09T09:18:58Z
113
0
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
null
2025-12-31T13:29:13Z
# Umbuzo AI System Umbuzo is a specialized AI chatbot for African history, current affairs, and academics. It features a modern web interface, voice interaction capabilities, and a robust Dockerized backend hosting a local GGUF model. ## Project Workflow 1. **User Interface (Frontend)**: * A responsive...
[]
jialicheng/unlearn_cifar100_swin-base_neggrad_4_87
jialicheng
2025-10-22T17:03:06Z
0
0
null
[ "tensorboard", "safetensors", "swin", "image-classification", "vision", "generated_from_trainer", "base_model:microsoft/swin-base-patch4-window7-224", "base_model:finetune:microsoft/swin-base-patch4-window7-224", "license:apache-2.0", "region:us" ]
image-classification
2025-10-22T17:02:22Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 87 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patc...
[]
a3ilab-llm-uncertainty/Qwen2_5_7B_glaive_toolcall_zhtw
a3ilab-llm-uncertainty
2026-01-08T06:34:52Z
0
0
peft
[ "peft", "safetensors", "base_model:adapter:Qwen/Qwen2.5-7B-Instruct", "llama-factory", "lora", "transformers", "text-generation", "conversational", "base_model:Qwen/Qwen2.5-7B-Instruct", "license:other", "region:us" ]
text-generation
2026-01-08T06:33:21Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Qwen2_5_7B_glaive_toolcall_zhtw This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen...
[]
theprint/Coma-3B
theprint
2025-10-06T16:09:09Z
7
0
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "text-generation-inference", "unsloth", "grpo", "conversational", "en", "dataset:facebook/natural_reasoning", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2025-10-05T13:47:43Z
# Coma 3B Coma is based on Qwen 2.5 3B, GRPO-fine tuned on the natural reasoning data set from Meta. ## GGUF There are quantized versions available at [theprint/Coma-3B-GGUF](https://huggingface.co/theprint/Coma-3B-GGUF) in GGUF format. ## Testing The following system prompt was used in testing of the model: ``` B...
[ { "start": 106, "end": 110, "text": "GGUF", "label": "training method", "score": 0.7512667179107666 }, { "start": 227, "end": 231, "text": "GGUF", "label": "training method", "score": 0.7335416674613953 } ]
DCAgent/a1-stackexchange_superuser
DCAgent
2026-03-25T19:44:36Z
0
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:other", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-03-25T19:43:13Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sft_a1_stackexchange_superuser__Qwen3-8B This model is a fine-tuned version of [Qwen/Qwen3-8B](https://huggingface.co/Qwen/Qwen3-...
[]
biokrhr/dpo-qwen-cot-merged
biokrhr
2026-02-21T16:46:02Z
2
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dpo", "unsloth", "qwen", "alignment", "conversational", "en", "dataset:u-10bei/structured_data_with_cot_dataset_512_v2", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-...
text-generation
2026-02-21T16:30:43Z
# qwen3-4b-structured-output-lora This model is a fine-tuned version of **Qwen/Qwen3-4B-Instruct-2507** using **Direct Preference Optimization (DPO)** via the **Unsloth** library. This repository contains the **full-merged 16-bit weights**. No adapter loading is required. ## Training Objective This model has been op...
[ { "start": 113, "end": 143, "text": "Direct Preference Optimization", "label": "training method", "score": 0.8833179473876953 }, { "start": 145, "end": 148, "text": "DPO", "label": "training method", "score": 0.8592581152915955 }, { "start": 334, "end": 337, ...
BootesVoid/cmellwsdi03rttlqblhmo4fwj_cmelmezhw03t3tlqby4lqxyxe
BootesVoid
2025-08-21T17:28:35Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-08-21T17:28:33Z
# Cmellwsdi03Rttlqblhmo4Fwj_Cmelmezhw03T3Tlqby4Lqxyxe <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https:...
[]
WindyWord/translate-es-da
WindyWord
2026-04-20T13:25:53Z
0
0
transformers
[ "transformers", "translation", "marian", "windyword", "spanish", "danish", "es", "da", "license:cc-by-4.0", "endpoints_compatible", "region:us" ]
translation
2026-04-17T02:38:53Z
# WindyWord.ai Translation — Spanish → Danish **Translates Spanish → Danish.** **Quality Rating: ⭐⭐⭐⭐⭐ (5.0★ Premium)** Part of the [WindyWord.ai](https://windyword.ai) translation fleet — 1,800+ proprietary language pairs. ## Quality & Pricing Tier - **5-star rating:** 5.0★ ⭐⭐⭐⭐⭐ - **Tier:** Premium - **Composit...
[]
hieuphamha/cxrlt2026-task1-convnextv2
hieuphamha
2026-04-10T08:16:28Z
0
1
timm
[ "timm", "chest-x-ray", "multi-label-classification", "convnext", "csra", "attention", "medical-imaging", "cxr-lt-2026", "long-tail", "en", "dataset:mimic-cxr", "dataset:padchest", "arxiv:2602.13430", "license:mit", "region:us" ]
null
2026-04-10T06:51:54Z
# CXR-LT 2026 Task 1 — ConvNeXtV2 + CSRA DB-CAS (🏆 Top-1) **Top-1 submission** for **Task 1 (Long-tailed Multi-label Chest X-ray Classification)** of the [CXR-LT 2026 Challenge](https://cxr-lt.github.io/CXR-LT-2026/). - 📄 Paper: [arXiv 2602.13430](https://arxiv.org/abs/2602.13430) - 💻 Code: [github.com/hieuphamha1...
[]
Safe-Drive-TN/tunis-word-detection-yolov8s
Safe-Drive-TN
2025-11-18T17:55:48Z
14
0
ultralytics
[ "ultralytics", "onnx", "yolov8", "object-detection", "license-plate", "arabic-text", "tunisia", "computer-vision", "en", "dataset:Safe-Drive-TN/tunis-word-tunisian-license-plate", "license:mit", "region:us" ]
object-detection
2025-11-18T09:04:37Z
# Tunisian License Plate - Arabic Text Detection (YOLOv8s) This model detects the Arabic word "تونس" (Tunis) in Tunisian license plates using YOLOv8s. ## Model Description - **Model Type**: YOLOv8s (Small) - **Task**: Object Detection - **Classes**: 1 class - "tunis" (Arabic text region) - **Purpose**: Detecting and...
[]
arthurcollet/Qwen3-VL-Reranker-2B-mlx-nvfp4
arthurcollet
2026-02-12T23:44:50Z
41
0
transformers
[ "transformers", "safetensors", "qwen3_vl", "image-text-to-text", "multimodal rerank", "text rerank", "mlx", "text-ranking", "base_model:Qwen/Qwen3-VL-2B-Instruct", "base_model:quantized:Qwen/Qwen3-VL-2B-Instruct", "license:apache-2.0", "endpoints_compatible", "4-bit", "region:us" ]
text-ranking
2026-02-12T23:44:46Z
# arthurcollet/Qwen3-VL-Reranker-2B-mlx-nvfp4 This model was converted to MLX format from [`Qwen/Qwen3-VL-Reranker-2B`]() using mlx-vlm version **0.3.11**. Refer to the [original model card](https://huggingface.co/Qwen/Qwen3-VL-Reranker-2B) for more details on the model. ## Use with mlx ```bash pip install -U mlx-vlm ...
[]
a22774443/qwen-benchmark-cla10
a22774443
2026-04-02T01:01:04Z
0
0
null
[ "region:us" ]
null
2026-04-02T01:00:13Z
# Qwen Benchmark - CLA-10 Benchmark comparing Paperclip Qwen against mission-control using Hugging Face models. ## Environment - OS: Linux (Ubuntu) - CPU: AMD EPYC 7763 (2 vCPUs) - GPU: None (CPU-only) - Python: 3.12.3 - Runtime: PyTorch + Transformers on CPU ## Benchmark Results | Model | Tokens/sec | Total Tokens...
[]
SoonchunhyangUniversity/12-RN
SoonchunhyangUniversity
2026-02-09T05:53:47Z
8
0
stable-baselines3
[ "stable-baselines3", "reinforcement-learning", "maskable-ppo", "korean-chess", "12janggi", "self-play", "sb3-contrib", "ko", "license:mit", "region:us" ]
reinforcement-learning
2026-02-04T14:39:42Z
# 🎮 십이장기 (12-Janggi) Reinforcement Learning Models Deep reinforcement learning agents trained to play 십이장기, a simplified Korean chess variant played on an 8×3 board, using self-play and Maskable PPO algorithm. ## 📋 Model Overview | Property | Value | |----------|-------| | **Algorithm** | Maskable PPO (Proximal Po...
[]
kushaaagr/sd15_controlnet-colorguide-t_20K-epoch_5-lr_1e-5
kushaaagr
2025-12-17T05:47:37Z
0
0
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "controlnet", "diffusers-training", "base_model:stable-diffusion-v1-5/stable-diffusion-v1-5", "base_model:adapter:stable-diffusion-v1-5/stable-diffusion-v1-5", "license:creativeml-openrail-m", "region:...
text-to-image
2025-12-17T05:16:52Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # controlnet-kushaaagr/sd15_controlnet-colorguide-t_20K-epoch_5-lr_1e-5 These are controlnet weights trained on stable-dif...
[]
MzAsh/your-lora-repo
MzAsh
2026-02-28T09:32:43Z
33
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:daichira/structured-3k-mix-sft", "base_model:unsloth/Qwen3-4B-Instruct-2507", "base_model:adapter:unsloth/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-28T09:32:31Z
# qwen3-4b-struct-sft-v1 This repository provides a **LoRA adapter** fine-tuned from **unsloth/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **struc...
[ { "start": 88, "end": 95, "text": "unsloth", "label": "training method", "score": 0.9114468097686768 }, { "start": 129, "end": 134, "text": "QLoRA", "label": "training method", "score": 0.8716385960578918 }, { "start": 143, "end": 150, "text": "Unsloth", ...
amrothemich/Qwen3-30B-A3B-Thinking-2507
amrothemich
2025-11-12T21:57:58Z
0
0
transformers
[ "transformers", "safetensors", "qwen3_moe", "text-generation", "conversational", "arxiv:2402.17463", "arxiv:2407.02490", "arxiv:2501.15383", "arxiv:2404.06654", "arxiv:2505.09388", "license:apache-2.0", "endpoints_compatible", "region:us" ]
text-generation
2025-11-12T21:57:24Z
# Qwen3-30B-A3B-Thinking-2507 <a href="https://chat.qwen.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Highlights Over the past three months, we have conti...
[]
TensorMind/TensorMind-0.5B
TensorMind
2026-03-08T05:23:35Z
15
0
transformers
[ "transformers", "safetensors", "tensormind", "text-generation", "causal-lm", "chinese", "custom-code", "conversational", "custom_code", "zh", "en", "license:mit", "model-index", "region:us" ]
text-generation
2026-02-27T08:07:19Z
# TensorMind (0.5B) TensorMind is a 536.9M-parameter causal language model for lightweight Chinese/English text generation. ## Model Details - Architecture: Decoder-only Transformer (`TensorMindForCausalLM`) - Layers: 32 - Hidden size: 1024 - Heads / KV heads: 16 / 8 (GQA) - Context length: 32,768 - Vocab size: 32,7...
[]
mradermacher/hudex-8B-GGUF
mradermacher
2026-02-17T07:59:58Z
4
0
transformers
[ "transformers", "gguf", "en", "base_model:SeoSaMo/hudex-8B", "base_model:quantized:SeoSaMo/hudex-8B", "endpoints_compatible", "region:us" ]
null
2026-02-17T07:41:15Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
promptsbyesha/llm-finetuned-model
promptsbyesha
2025-09-11T15:54:51Z
1
0
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "llm", "finetuned", "langchain", "cloud-deployment", "en", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-09-08T09:00:12Z
# Finetuning an Open-Source LLM This project adapts large language models to domain-specific tasks, leveraging parameter-efficient techniques (LoRA/QLoRA), cloud deployment, and workflow orchestration. This repository contains code for fine-tuning large language models (LLMs) on custom datasets, handling cloud orche...
[ { "start": 144, "end": 154, "text": "LoRA/QLoRA", "label": "training method", "score": 0.7214343547821045 }, { "start": 650, "end": 660, "text": "LoRA/QLoRA", "label": "training method", "score": 0.7115334868431091 } ]
Ty-Yuki/dpo-qwen-cot-merged
Ty-Yuki
2026-02-02T09:45:32Z
3
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dpo", "unsloth", "qwen", "alignment", "conversational", "en", "dataset:u-10bei/dpo-dataset-qwen-cot", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "text-gener...
text-generation
2026-02-02T09:31:35Z
# qwen3-4b-dpo-qwen-cot-merged-20260202 This model is a fine-tuned version of **Qwen/Qwen3-4B-Instruct-2507** using **Direct Preference Optimization (DPO)** via the **Unsloth** library. This repository contains the **full-merged 16-bit weights**. No adapter loading is required. ## Training Objective This model has b...
[ { "start": 119, "end": 149, "text": "Direct Preference Optimization", "label": "training method", "score": 0.8779228329658508 }, { "start": 151, "end": 154, "text": "DPO", "label": "training method", "score": 0.8610278964042664 }, { "start": 340, "end": 343, ...
safestack/Qwen3.5-9B
safestack
2026-03-02T00:51:43Z
1,381
0
transformers
[ "transformers", "safetensors", "qwen3_5", "image-text-to-text", "conversational", "base_model:Qwen/Qwen3.5-9B-Base", "base_model:finetune:Qwen/Qwen3.5-9B-Base", "license:apache-2.0", "endpoints_compatible", "region:us" ]
image-text-to-text
2026-03-07T10:00:18Z
# Qwen3.5-9B <img width="400px" src="https://qianwen-res.oss-accelerate.aliyuncs.com/logo_qwen3.5.png"> [![Qwen Chat](https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5)](https://chat.qwen.ai) > [!Note] > This repository contains model weights and configuration files for the post-trained mode...
[]
mradermacher/PersonalFinance-Llama3.2-3B-GGUF
mradermacher
2025-12-09T22:18:19Z
3
1
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "llama", "en", "dataset:theprint/PersonalFinance", "base_model:theprint/PersonalFinance-Llama3.2-3B", "base_model:quantized:theprint/PersonalFinance-Llama3.2-3B", "license:llama3.2", "endpoints_compatible", "region:us", "convers...
null
2025-12-09T10:08:20Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
mradermacher/Noir-Golden-Mix-GGUF
mradermacher
2025-11-28T02:17:41Z
1
0
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "qwen2", "en", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2025-11-28T02:14:30Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
mradermacher/PE-Type-3-Nova-4B-GGUF
mradermacher
2026-03-02T21:04:38Z
671
1
transformers
[ "transformers", "gguf", "google", "gemma", "deepmind", "ai-persona", "large-language-model", "enneagram", "psychology", "persona", "research-model", "roleplay", "chat-llm", "text-generation-inference", "vanta-research", "cognitive-alignment", "project-enneagram", "ai-persona-resear...
null
2026-02-06T14:50:40Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
SerialKicked/Hermes-4.3-36B-heretic-GGUF-IQ4_XS
SerialKicked
2026-02-13T23:55:49Z
36
0
transformers
[ "transformers", "gguf", "heretic", "abliterated", "text-generation", "en", "base_model:darkc0de/Hermes-4.3-36B-heretic", "base_model:quantized:darkc0de/Hermes-4.3-36B-heretic", "license:apache-2.0", "endpoints_compatible", "region:us", "imatrix", "conversational" ]
text-generation
2026-02-10T23:33:22Z
Quantized version of [darkc0de/Hermes-4.3-36B-heretic](https://huggingface.co/darkc0de/Hermes-4.3-36B-heretic). This is an [abliteration method](https://github.com/p-e-w/heretic) that doesn't seem to be as damaging to the base model as the previous methods were. The repo includes the following quantized file: # For c...
[]
executorch-community/Llama-3.2-1B-Instruct-ET
executorch-community
2025-04-10T15:22:58Z
89
4
null
[ "executorch", "llama", "base_model:meta-llama/Llama-3.2-1B-Instruct", "base_model:finetune:meta-llama/Llama-3.2-1B-Instruct", "license:llama3.2", "region:us" ]
null
2024-11-23T10:52:16Z
- Original [model](https://huggingface.co/meta-llama/Llama-3.2-1B-Instruct) - This pte file is generated via [this recipe](https://huggingface.co/executorch-community/Llama-3.2-1B-Instruct-ET/blob/main/Export_Recipe_Llama_3_2_1B_Instruct.ipynb) - You can follow [this instruction](https://github.com/pytorch/executorch/b...
[]
scthornton/gemma4-26b-securecode
scthornton
2026-04-06T02:12:12Z
10
0
peft
[ "peft", "safetensors", "security", "secure-code", "cybersecurity", "qlora", "gemma4", "code-generation", "owasp", "ai-security", "text-generation", "conversational", "dataset:scthornton/securecode", "dataset:scthornton/securecode-web", "arxiv:2512.18542", "license:gemma", "region:us"...
text-generation
2026-04-06T02:12:00Z
# Gemma 4 26B-A4B SecureCode **Security-specialized code generation model** fine-tuned on the [SecureCode](https://huggingface.co/datasets/scthornton/securecode) and [SecureCode Web](https://huggingface.co/datasets/scthornton/securecode-web) datasets. Part of the [SecureCode model collection](https://huggingface.co/c...
[ { "start": 655, "end": 660, "text": "QLoRA", "label": "training method", "score": 0.7220608592033386 } ]
OpenMed/OpenMed-PII-Hindi-SnowflakeMed-Large-568M-v1
OpenMed
2026-03-10T12:56:56Z
16
0
transformers
[ "transformers", "safetensors", "xlm-roberta", "token-classification", "ner", "pii", "pii-detection", "de-identification", "privacy", "healthcare", "medical", "clinical", "phi", "hindi", "pytorch", "openmed", "hi", "base_model:Snowflake/snowflake-arctic-embed-l-v2.0", "base_model:...
token-classification
2026-03-10T12:56:22Z
# OpenMed-PII-Hindi-SnowflakeMed-Large-568M-v1 **Hindi PII Detection Model** | 568M Parameters | Open Source [![F1 Score](https://img.shields.io/badge/F1-96.48%25-brightgreen)]() [![Precision](https://img.shields.io/badge/Precision-96.35%25-blue)]() [![Recall](https://img.shields.io/badge/Recall-96.61%25-orange)]() ...
[]
Umezaki/sft-qwen-ds5a-epoch3-adapter
Umezaki
2026-02-13T04:45:27Z
0
0
peft
[ "peft", "safetensors", "qlora", "lora", "structured-output", "text-generation", "en", "dataset:Umezaki/perfect-structeval-sft-dataset", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:adapter:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "region:us" ]
text-generation
2026-02-13T04:45:15Z
<【課題】ここは自分で記入して下さい> This repository provides a **LoRA adapter** fine-tuned from **Qwen/Qwen3-4B-Instruct-2507** using **QLoRA (4-bit, Unsloth)**. This repository contains **LoRA adapter weights only**. The base model must be loaded separately. ## Training Objective This adapter is trained to improve **structured ou...
[ { "start": 121, "end": 126, "text": "QLoRA", "label": "training method", "score": 0.8061075210571289 }, { "start": 175, "end": 179, "text": "LoRA", "label": "training method", "score": 0.7030754089355469 } ]
Muapi/ob-3d-isometric-3d-room-v2.0
Muapi
2025-08-18T05:14:51Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-18T05:14:40Z
# OB等轴3d房间 Isometric 3d room V2.0 ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: OBdengzhou, C4D_style, Isometric Room Digital Illustr ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.m...
[]
eantropix/gemma-news-qlor-r32-d10-e3
eantropix
2025-12-15T16:48:00Z
0
0
transformers
[ "transformers", "tensorboard", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:google/gemma-3-1b-pt", "base_model:finetune:google/gemma-3-1b-pt", "endpoints_compatible", "region:us" ]
null
2025-12-15T15:51:34Z
# Model Card for gemma-news-qlor-r32-d10-e3 This model is a fine-tuned version of [google/gemma-3-1b-pt](https://huggingface.co/google/gemma-3-1b-pt). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time machine...
[]
Anubhuti024/sdxl-fine-tuned
Anubhuti024
2025-11-06T11:32:25Z
1
0
diffusers
[ "diffusers", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "text-to-image", "diffusers-training", "lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2025-11-06T08:48:57Z
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # LoRA text2image fine-tuning - Anubhuti024/sdxl-fine-tuned These are LoRA adaption weights for stabilityai/stable-diffusi...
[ { "start": 199, "end": 203, "text": "LoRA", "label": "training method", "score": 0.778576672077179 }, { "start": 268, "end": 272, "text": "LoRA", "label": "training method", "score": 0.7794108986854553 }, { "start": 568, "end": 572, "text": "LoRA", "la...
artificialguybr/WindowSeat-Redmond-WAN2-T2V-14B
artificialguybr
2025-10-24T18:35:27Z
12
1
diffusers
[ "diffusers", "lora", "wan", "text-to-video", "window-seat", "travel", "surreal", "cinematic", "contemplative", "text-to-image", "template:diffusion-lora", "base_model:Wan-AI/Wan2.2-T2V-A14B", "base_model:adapter:Wan-AI/Wan2.2-T2V-A14B", "license:cc0-1.0", "region:us" ]
text-to-video
2025-10-24T18:34:00Z
# **Window Seat LoRA for Wan** <Gallery /> --- > **Special Thanks:** > This project was made possible thanks to generous sponsorship and GPU time provided by [reDMOND Ai](https://redmond.ai ). > We are grateful for their support in training this LoRA. --- ## Model Description This LoRA captures the contemplat...
[]
ithuan/nllb-600m-formosan-all-finetune-v2
ithuan
2025-11-17T13:03:30Z
67
0
null
[ "safetensors", "m2m_100", "translation", "ami", "trv", "bnn", "pwn", "tay", "tsu", "tao", "dru", "xsy", "pyu", "szy", "ckv", "sxr", "ssf", "xnb", "dataset:ithuan/klokah_asr_train", "dataset:ithuan/fb_ilrdf_dict_asr", "dataset:ithuan/formosan_db", "dataset:ithuan/formosan_or...
translation
2025-11-17T12:46:52Z
# Model Card for nllb-600m-formosan-all-finetune ## Model Details nllb-200-distilled-600M finetune on all formosan data (klokah, fb ilrdf dict, formosan_db, formosan_org, ithuan_formosan_text, and formosan_bible) **without samples only one word**. ## Training Details - learning rate: 0.0001 - batch size per gpu: 4 -...
[]
haseebpsd/liamott
haseebpsd
2025-08-15T04:36:25Z
0
0
diffusers
[ "diffusers", "flux", "lora", "replicate", "text-to-image", "en", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
text-to-image
2025-08-15T02:22:28Z
# Liamott <Gallery /> ## About this LoRA This is a [LoRA](https://replicate.com/docs/guides/working-with-loras) for the FLUX.1-dev text-to-image model. It can be used with diffusers or ComfyUI. It was trained on [Replicate](https://replicate.com/) using AI toolkit: https://replicate.com/ostris/flux-dev-lora-trainer...
[]
Xuhash/graphcodebert-base
Xuhash
2026-03-08T08:05:34Z
15
0
null
[ "pytorch", "tf", "jax", "roberta", "arxiv:2009.08366", "region:us" ]
null
2026-03-08T08:05:34Z
## GraphCodeBERT model GraphCodeBERT is a graph-based pre-trained model based on the Transformer architecture for programming language, which also considers data-flow information along with code sequences. GraphCodeBERT consists of 12 layers, 768 dimensional hidden states, and 12 attention heads. The maximum sequence ...
[]
venustar/Venera-Tiny-Embedding-Model
venustar
2025-12-10T13:14:29Z
0
0
null
[ "region:us" ]
null
2025-12-10T13:14:17Z
# Venera Tiny Embedding Model A lightweight text embedding model created for experimentation, demos, and simple NLP pipelines. Designed to transform sentences into fixed-size vectors for similarity search, clustering, and retrieval tasks. ## Key Features - Ultra-lightweight (demo-friendly) - Fast inference on CPU -...
[]
TheBloke/Mistral-7B-Code-16K-qlora-GGUF
TheBloke
2023-10-17T08:52:57Z
820
22
transformers
[ "transformers", "gguf", "mistral", "base_model:Nondzu/Mistral-7B-code-16k-qlora", "base_model:quantized:Nondzu/Mistral-7B-code-16k-qlora", "license:apache-2.0", "region:us" ]
null
2023-10-17T08:45:37Z
<!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <d...
[]
DimaSK1/Qwen2-1.5B-bnb-4bit_kl_1
DimaSK1
2025-08-31T12:43:15Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "unsloth", "base_model:unsloth/Qwen2-1.5B-bnb-4bit", "base_model:finetune:unsloth/Qwen2-1.5B-bnb-4bit", "endpoints_compatible", "region:us" ]
null
2025-08-31T12:43:12Z
# Model Card for Qwen2-1.5B-bnb-4bit_kl_1 This model is a fine-tuned version of [unsloth/Qwen2-1.5B-bnb-4bit](https://huggingface.co/unsloth/Qwen2-1.5B-bnb-4bit). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a ...
[]
CiroN2022/sdxl-hk-v075
CiroN2022
2026-04-17T02:13:27Z
0
0
null
[ "license:other", "region:us" ]
null
2026-04-17T01:50:26Z
# SDXL HK v0.75 ## 📝 Descrizione V1.2 changelog: - improved consistency - new dataset trained V1.1 changelog: - train on 1.0 new dataset to integrate new linguistic depth and to add detail and precision - known issues: from version 1 onwards there are problems with hands and some anatomical parts. versi...
[]
iejfoiesf/adaptive-crossattention-oct-classifier
iejfoiesf
2026-02-24T20:23:37Z
0
0
null
[ "region:us" ]
null
2026-02-24T20:23:35Z
# Adaptive Cross-Attention Multimodal OCT Classifier > **Patent-pending** · Provisional Specification Filed (India, Feb 2026) Interpretable, uncertainty-aware retinal disease diagnosis from 2D OCT B-scans using bidirectional cross-attention fusion of visual and vascular-geometric features. --- ## Architecture Overv...
[]
thoughtworks/Qwen3-32B-Eagle3
thoughtworks
2026-03-28T19:30:36Z
236
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "eagle3", "speculative-decoding", "sglang", "draft-model", "jax", "tpu", "qwen3", "en", "arxiv:2503.01840", "base_model:Qwen/Qwen3-32B", "base_model:finetune:Qwen/Qwen3-32B", "license:apache-2.0", "text-generation-inference",...
text-generation
2026-03-28T13:35:57Z
# EAGLE3 Draft Head — Qwen3-32B A speculative decoding draft head for [Qwen/Qwen3-32B](https://huggingface.co/Qwen/Qwen3-32B), trained using the [EAGLE3](https://arxiv.org/abs/2503.01840) method on Google Cloud TPU with the [SpecJAX](https://github.com/tails-mpt/SpecJAX) framework. EAGLE3 draft heads accelerate autor...
[]
sarahwei/MITRE-v16-tactic-bert-case-based
sarahwei
2025-02-07T15:12:16Z
315
1
transformers
[ "transformers", "safetensors", "bert", "text-classification", "en", "dataset:sarahwei/cyber_MITRE_tactic_CTI_dataset_v16", "base_model:bencyc1129/mitre-bert-base-cased", "base_model:finetune:bencyc1129/mitre-bert-base-cased", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2025-02-05T07:57:30Z
## MITRE-v16-tactic-bert-case-based It's a fine-tuned model from [mitre-bert-base-cased](https://huggingface.co/bencyc1129/mitre-bert-base-cased) on the MITRE ATT&CK version 16 procedure dataset. ## Intended uses & limitations You can use the fine-tuned model for text classification. It aims to identify the tactic t...
[]
NikolayKozloff/MiniCPM4.1-8B-Q6_K-GGUF
NikolayKozloff
2025-09-08T12:24:21Z
0
1
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "text-generation", "zh", "en", "base_model:openbmb/MiniCPM4.1-8B", "base_model:quantized:openbmb/MiniCPM4.1-8B", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
text-generation
2025-09-08T12:23:49Z
# NikolayKozloff/MiniCPM4.1-8B-Q6_K-GGUF This model was converted to GGUF format from [`openbmb/MiniCPM4.1-8B`](https://huggingface.co/openbmb/MiniCPM4.1-8B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingfac...
[]
CalvinTKW/ppo-Huggy
CalvinTKW
2025-10-14T11:26:31Z
0
0
ml-agents
[ "ml-agents", "tensorboard", "onnx", "Huggy", "deep-reinforcement-learning", "reinforcement-learning", "ML-Agents-Huggy", "region:us" ]
reinforcement-learning
2025-10-14T11:23:20Z
# **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://unity-technologies.github.io/ml-agents/ML-Agents-Toolkit-Documentation/ We...
[]
Godcat252/Besttop361
Godcat252
2026-05-03T01:15:01Z
15
0
transformers
[ "transformers", "safetensors", "deepseek_v3", "text-generation", "conversational", "base_model:kakaocorp/kanana-2-30b-a3b-mid-2601", "base_model:finetune:kakaocorp/kanana-2-30b-a3b-mid-2601", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-05-03T01:14:06Z
<p align="center"> <img src="./assets/logo/kanana.png" width="60%" alt="Kanana"> </p> <p align="center"> 🤗 <a href="https://huggingface.co/collections/kakaocorp/kanana-2">HF Models</a> &nbsp | &nbsp 📕 <a href="https://tech.kakao.com/posts/807">Pre-Training Blog</a> &nbsp | &nbsp 📕 <a href="https://t...
[]
ViktorR-BarreL/phonoscopic
ViktorR-BarreL
2026-04-17T13:25:33Z
0
0
transformers
[ "transformers", "safetensors", "wav2vec2", "automatic-speech-recognition", "speech", "phoneme-recognition", "russian", "ru", "license:mit", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2026-04-17T13:06:35Z
# Phonoscopic Wav2Vec2 Model (Russian) Данная модель представляет собой дообученную версию `wav2vec2` для фонемной транскрипции русской речи. Модель используется в приложении [ф]оноскоп. ## Описание Модель обучена распознавать фонемы русского языка (включая гласные, согласные и их мягкие/твердые варианты). Ожидаемый ...
[]
seshu1729/reuters-gpt2-text-gen
seshu1729
2025-12-07T21:00:09Z
1
0
transformers
[ "transformers", "tensorboard", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "base_model:openai-community/gpt2", "base_model:finetune:openai-community/gpt2", "license:mit", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-12-07T20:30:19Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # reuters-gpt2-text-gen This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieve...
[]
DCAgent/wikitable_format_conversion-qwen3-coder-480b-a35b-instruct-awq-traces
DCAgent
2025-10-24T22:24:16Z
3
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "llama-factory", "full", "generated_from_trainer", "conversational", "base_model:Qwen/Qwen3-8B", "base_model:finetune:Qwen/Qwen3-8B", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2025-10-24T16:37:22Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wikitable_format_conversion-qwen3-coder-480b-a35b-instruct-awq-traces This model is a fine-tuned version of [Qwen/Qwen3-8B](https...
[]
mradermacher/Qwen3-0.9B-A0.6B-GGUF
mradermacher
2026-02-08T00:24:47Z
132
1
transformers
[ "transformers", "gguf", "merge", "code", "math", "en", "vi", "zh", "dataset:nvidia/OpenCodeReasoning", "dataset:nvidia/OpenMathReasoning", "endpoints_compatible", "region:us", "conversational" ]
null
2025-11-10T13:15:42Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> <!-- ### quants: x-f16 Q4_K_S Q2_K Q8_0 Q6_K Q3_K_M Q3_K_S Q3_K_L Q4_K_M Q5_K_S Q5_K_M IQ4_XS --> <!-- ### quants_skip: --> <!-- ### skip_mmproj: --> static q...
[]
coastalcph/Qwen2.5-1.5B-1t_gcd_sycophanct-1.2t_diff_pv_sycophant
coastalcph
2025-09-01T08:14:03Z
0
0
null
[ "safetensors", "qwen2", "region:us" ]
null
2025-09-01T08:13:10Z
# Combined Task Vector Model This model was created by combining task vectors from multiple fine-tuned models. ## Task Vector Computation ```python t_1 = TaskVector("Qwen/Qwen2.5-1.5B-Instruct", "coastalcph/Qwen2.5-1.5B-Instruct-gcd_sycophancy") t_2 = TaskVector("Qwen/Qwen2.5-1.5B-Instruct", "coastalcph/Qwen2.5-1.5B...
[]
kawaimasa/Wanabi-Novelist-24B
kawaimasa
2025-12-27T13:53:27Z
10
0
null
[ "safetensors", "mistral", "japanese", "text-generation", "novel-writing", "conversational", "ja", "license:apache-2.0", "region:us" ]
text-generation
2025-10-12T17:02:12Z
# Wanabi-Novelist-24B **Wanabi-Novelist-24B** は、小説執筆支援に特化してファインチューニングされた、日本語大規模言語モデルです。 このモデルは、専用デスクトップアプリケーション **[Project Wannabe](https://github.com/kawaii-justice/Project-Wannabe)** と連携して使用することを前提に設計されています。アプリケーションが提供するGUIを通じて、ユーザーはアイデア出しから本文生成、続きの執筆まで、一貫した執筆体験を得ることができます。 ベースモデルには[`Mistral-Small-3.1-24B-Base-2503...
[]
ZuzeTt/LFM2.5-VL-450M-GGUF
ZuzeTt
2026-04-15T08:31:41Z
0
0
transformers
[ "transformers", "gguf", "liquid", "lfm2", "lfm2-vl", "edge", "lfm2.5-vl", "lfm2.5", "image-text-to-text", "en", "ja", "ko", "fr", "es", "de", "ar", "zh", "pt", "arxiv:2511.23404", "base_model:LiquidAI/LFM2.5-350M", "base_model:quantized:LiquidAI/LFM2.5-350M", "license:other...
image-text-to-text
2026-04-15T08:03:33Z
Using <a href="https://github.com/ggerganov/llama.cpp/">llama.cpp</a> release <a href="https://github.com/ggerganov/llama.cpp/releases/tag/b8763">b8763</a> for quantization. Computing over 400 chunks, n_ctx=1024 <center> <div style="text-align: center;"> <img src="https://cdn-uploads.huggingface.co/production/u...
[]
mlx-community/whisper-medium.en-asr-6bit
mlx-community
2026-01-15T18:46:03Z
4
0
mlx-audio
[ "mlx-audio", "safetensors", "whisper", "audio", "automatic-speech-recognition", "hf-asr-leaderboard", "mlx", "speech-to-text", "speech-to-speech", "speech", "speech generation", "stt", "en", "license:apache-2.0", "model-index", "6-bit", "region:us" ]
automatic-speech-recognition
2026-01-15T18:44:49Z
# mlx-community/whisper-medium.en-asr-6bit This model was converted to MLX format from [`openai/whisper-medium.en`](https://huggingface.co/openai/whisper-medium.en) using mlx-audio version **0.2.10**. Refer to the [original model card](https://huggingface.co/openai/whisper-medium.en) for more details on the model. ## ...
[]
el-profesor/peft
el-profesor
2026-02-17T07:52:30Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "grpo", "arxiv:2402.03300", "base_model:deepseek-ai/deepseek-math-7b-base", "base_model:finetune:deepseek-ai/deepseek-math-7b-base", "endpoints_compatible", "region:us" ]
null
2026-02-17T07:50:31Z
# Model Card for grpo-combinatorics-final This model is a fine-tuned version of [deepseek-ai/deepseek-math-7b-base](https://huggingface.co/deepseek-ai/deepseek-math-7b-base). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "I...
[]
seangogo/qwen3-0.6b-awq
seangogo
2026-01-06T05:30:48Z
1
0
null
[ "safetensors", "qwen3", "custom_code", "awq", "region:us" ]
null
2026-01-06T05:30:41Z
# qwen3_0pt6b_awq This is a quantized version of /workspace/huggingface/Qwen3-0.6B using AWQ (Activation-aware Weight Quantization). ## Quantization Details - **Method**: AWQ - **Bits**: 4-bit - **Group Size**: 128 - **Zero Point**: True - **Calibration Dataset**: wikitext (wikitext-103-v1) - **Calibration Samples**...
[]
aixk/CORE-X-0.7B-Dialogue-v1.4-GGUF
aixk
2026-03-22T09:04:31Z
96
0
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-06T06:38:32Z
<div align="center"> <img src="https://cdn.jsdelivr.net/gh/sllkx/icons@main/logo/isai2.png" alt="ISAI Logo" width="160" style="border-radius: 30px; box-shadow: 0 4px 12px rgba(0,0,0,0.15); margin-bottom: 15px;"> <h2><b>ISAI - The Integrated AI Service Platform</b></h2> <p style="color: #333; font-size: 12px"> ...
[]
HeyuGuo/gpt-oss-20b-pcb-schematic-0904-largelr
HeyuGuo
2025-09-04T14:31:55Z
0
0
transformers
[ "transformers", "safetensors", "generated_from_trainer", "trl", "sft", "base_model:openai/gpt-oss-20b", "base_model:finetune:openai/gpt-oss-20b", "endpoints_compatible", "region:us" ]
null
2025-09-04T06:19:03Z
# Model Card for gpt-oss-20b-pcb-schematic-0904-largelr This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b). It has been trained using [TRL](https://github.com/huggingface/trl). ## Quick start ```python from transformers import pipeline question = "If you had a time...
[]
microsoft/Phi-4-mini-flash-reasoning
microsoft
2025-12-10T20:24:55Z
1,291
271
transformers
[ "transformers", "safetensors", "phi4flash", "text-generation", "nlp", "math", "code", "conversational", "custom_code", "en", "arxiv:2507.06607", "license:mit", "region:us" ]
text-generation
2025-06-19T23:40:57Z
## Model Summary Phi-4-mini-flash-reasoning is a lightweight open model built upon synthetic data with a focus on high-quality, reasoning dense data further finetuned for more advanced math reasoning capabilities. The model belongs to the Phi-4 model family and supports 64K token context length. 📰 [Phi-4-mini-fl...
[]
ahmetege/ahmetege-clone-v1-gguf
ahmetege
2026-03-09T03:09:21Z
70
0
null
[ "gguf", "qwen2", "llama.cpp", "unsloth", "endpoints_compatible", "region:us", "conversational" ]
null
2026-03-09T03:08:46Z
# ahmetege-clone-v1-gguf : GGUF This model was finetuned and converted to GGUF format using [Unsloth](https://github.com/unslothai/unsloth). **Example usage**: - For text only LLMs: `llama-cli -hf ahmetege/ahmetege-clone-v1-gguf --jinja` - For multimodal models: `llama-mtmd-cli -hf ahmetege/ahmetege-clone-v1-gguf ...
[ { "start": 132, "end": 139, "text": "unsloth", "label": "training method", "score": 0.7668305039405823 }, { "start": 527, "end": 534, "text": "unsloth", "label": "training method", "score": 0.7029883861541748 } ]
zxczxcsacaca/gpt-oss-120b-script
zxczxcsacaca
2026-01-30T08:30:51Z
1
0
transformers
[ "transformers", "safetensors", "gpt_oss", "text-generation", "vllm", "conversational", "arxiv:2508.10925", "license:apache-2.0", "endpoints_compatible", "8-bit", "mxfp4", "region:us" ]
text-generation
2026-01-29T08:52:28Z
<p align="center"> <img alt="gpt-oss-120b" src="https://raw.githubusercontent.com/openai/gpt-oss/main/docs/gpt-oss-120b.svg"> </p> <p align="center"> <a href="https://gpt-oss.com"><strong>Try gpt-oss</strong></a> · <a href="https://cookbook.openai.com/topic/gpt-oss"><strong>Guides</strong></a> · <a href="https...
[]
xiaolesu/Lean4-grpo-tk-8b
xiaolesu
2026-03-18T23:03:57Z
31
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "conversational", "arxiv:2309.00071", "arxiv:2505.09388", "base_model:Qwen/Qwen3-8B-Base", "base_model:finetune:Qwen/Qwen3-8B-Base", "license:apache-2.0", "text-generation-inference", "endpoints_compatible", "region:us" ]
text-generation
2026-03-18T23:02:32Z
# Qwen3-8B <a href="https://chat.qwen.ai/" target="_blank" style="margin: 2px;"> <img alt="Chat" src="https://img.shields.io/badge/%F0%9F%92%9C%EF%B8%8F%20Qwen%20Chat%20-536af5" style="display: inline-block; vertical-align: middle;"/> </a> ## Qwen3 Highlights Qwen3 is the latest generation of large language model...
[]
hawaii222/dpo202602151718
hawaii222
2026-02-15T08:51:10Z
4
0
transformers
[ "transformers", "safetensors", "qwen3", "text-generation", "dpo", "unsloth", "qwen", "alignment", "conversational", "en", "dataset:u-10bei/dpo-dataset-qwen-cot", "base_model:Qwen/Qwen3-4B-Instruct-2507", "base_model:finetune:Qwen/Qwen3-4B-Instruct-2507", "license:apache-2.0", "text-gener...
text-generation
2026-02-15T08:48:28Z
# qwen3-4b-dpo-qwen-cot-merged This model is a fine-tuned version of **Qwen/Qwen3-4B-Instruct-2507** using **Direct Preference Optimization (DPO)** via the **Unsloth** library. This repository contains the **full-merged 16-bit weights**. No adapter loading is required. ## Training Objective This model has been optim...
[ { "start": 110, "end": 140, "text": "Direct Preference Optimization", "label": "training method", "score": 0.8627194762229919 }, { "start": 142, "end": 145, "text": "DPO", "label": "training method", "score": 0.8601036667823792 }, { "start": 331, "end": 334, ...
ateet04/bert-finetuned-imdb
ateet04
2026-03-30T05:49:54Z
0
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "generated_from_trainer", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
text-classification
2026-03-30T05:49:35Z
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-imdb This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unk...
[]
heig-vd-geo/CASWiT
heig-vd-geo
2026-05-01T14:49:58Z
0
6
null
[ "image-segmentation", "en", "dataset:IGNF/FLAIR-HUB", "dataset:heig-vd-geo/URUR", "arxiv:2601.11310", "license:mit", "region:us" ]
image-segmentation
2025-11-19T18:27:21Z
# CASWiT: Context-Aware Stage Wise Transformer for Ultra-High Resolution Semantic Segmentation [Context-Aware Semantic Segmentation via Stage-Wise Attention](https://huggingface.co/papers/2601.11310) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![SOTA: F...
[]
phospho-app/SvenBorodun-ACT_BBOX-re_toy-qbif3
phospho-app
2025-08-26T09:38:16Z
0
0
phosphobot
[ "phosphobot", "act", "robotics", "dataset:SvenBorodun/re_toy", "region:us" ]
robotics
2025-08-26T09:37:14Z
--- datasets: SvenBorodun/re_toy library_name: phosphobot pipeline_tag: robotics model_name: act tags: - phosphobot - act task_categories: - robotics --- # act Model - phospho Training Pipeline ## Error Traceback We faced an issue while training your model. ``` The obj...
[]
Iker/Latxa-Llama-3.1-8B-Instruct-w4a16_nvfp4
Iker
2025-10-31T17:01:07Z
7
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "eu", "dataset:Iker/calibration-dataset", "base_model:HiTZ/Latxa-Llama-3.1-8B-Instruct", "base_model:quantized:HiTZ/Latxa-Llama-3.1-8B-Instruct", "license:apache-2.0", "text-generation-inference", "endpoints_compatibl...
text-generation
2025-10-31T10:19:50Z
AWQ Quantization using [https://github.com/vllm-project/llm-compressor](https://github.com/vllm-project/llm-compressor). Compatible with vLLM and huggingface transformers. ```python import os from datasets import load_dataset from llmcompressor import oneshot from llmcompressor.modifiers.quantization import Quantiza...
[]
Muapi/real-faceji-photorealistic-human-face
Muapi
2025-08-28T15:24:42Z
0
0
null
[ "lora", "stable-diffusion", "flux.1-d", "license:openrail++", "region:us" ]
null
2025-08-28T15:24:23Z
# Real Faceji - Photorealistic Human Face ![preview](./preview.jpg) **Base model**: Flux.1 D **Trained words**: Real-Faceji ## 🧠 Usage (Python) 🔑 **Get your MUAPI key** from [muapi.ai/access-keys](https://muapi.ai/access-keys) ```python import requests, os url = "https://api.muapi.ai/api/v1/flux_dev_lora_image...
[]
AksaraLLM/Kiel-26M-Matured
AksaraLLM
2026-05-02T01:02:37Z
0
0
null
[ "aksarallm", "from-scratch", "indonesian", "early-experiment", "research-artifact", "text-generation", "id", "license:apache-2.0", "region:us" ]
text-generation
2026-04-13T08:24:39Z
# Kiel-26M-Matured > ⚠️ **Status: research artifact / early experiment, not a usable language model today.** > The tokenizer used at training time was not preserved in this repository, > so the checkpoint cannot be loaded end-to-end with HF `AutoModel` / > `AutoTokenizer`. Output quality on standard Indonesian benchma...
[]
asigalov61/Karaoke-Lyrics-Qwen3-0.6B
asigalov61
2026-01-21T04:37:19Z
9
1
transformers
[ "transformers", "tensorboard", "safetensors", "qwen3", "text-generation", "karaoke", "lyrics", "lyrics-template", "karaoke-lyrics", "conversational", "en", "dataset:tsterbak/lyrics-dataset", "base_model:Qwen/Qwen3-0.6B", "base_model:finetune:Qwen/Qwen3-0.6B", "license:apache-2.0", "tex...
text-generation
2026-01-20T16:06:49Z
# Karaoke Lyrics Qwen3 0.6B ## Lyrics template to lyrics model for [Orpheus Karaoke](https://huggingface.co/spaces/projectlosangeles/Orpheus-Karaoke) *** ### Model was post-trained on ~500k randomly selected clean lyrics-template pairs *** ## How to use ### Load the model ```python from transformers import AutoMo...
[]
kandinskylab/Kandinsky-5.0-I2I-Lite-pretrain
kandinskylab
2025-11-24T15:17:06Z
0
2
diffusers
[ "diffusers", "text-to-image", "arxiv:2511.14993", "arxiv:2507.13546", "license:mit", "region:us" ]
text-to-image
2025-11-19T09:05:36Z
<div align="center"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://github.com/kandinskylab/kandinsky-5/raw/main/assets/KANDINSKY_LOGO_1_WHITE.png"> <source media="(prefers-color-scheme: light)" srcset="https://github.com/kandinskylab/kandinsky-5/raw/main/assets/KANDINSKY_LOGO_1_BLACK....
[]
jskim/place_FROM_base
jskim
2025-10-19T02:44:39Z
12
0
lerobot
[ "lerobot", "safetensors", "robotics", "smolvla", "dataset:jskim/record-place-merged-01-04", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2025-10-19T02:35:42Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
juyoungggg/smolvla-0407-0408-opt-lr-as
juyoungggg
2026-04-28T04:59:57Z
34
0
lerobot
[ "lerobot", "safetensors", "smolvla", "robotics", "dataset:juyoungggg/0407-0408-merged", "arxiv:2506.01844", "base_model:lerobot/smolvla_base", "base_model:finetune:lerobot/smolvla_base", "license:apache-2.0", "region:us" ]
robotics
2026-04-19T17:31:36Z
# Model Card for smolvla <!-- Provide a quick summary of what the model is/does. --> [SmolVLA](https://huggingface.co/papers/2506.01844) is a compact, efficient vision-language-action model that achieves competitive performance at reduced computational costs and can be deployed on consumer-grade hardware. This pol...
[]
CMU-POPE/Meta-Llama-3-8B-Instruct_Mixture-of-Thoughts-all-4k-with_reasoning
CMU-POPE
2025-08-21T19:16:51Z
0
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "generated_from_trainer", "trl", "sft", "conversational", "dataset:CohenQu/Mixture-of-Thoughts-all-4k-with_reasoning", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "base_model:finetune:meta-llama/Meta-Llama-3-8B-Instruct", "text-g...
text-generation
2025-08-17T04:43:14Z
# Model Card for Meta-Llama-3-8B-Instruct_Mixture-of-Thoughts-all-4k-with_reasoning This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the [CohenQu/Mixture-of-Thoughts-all-4k-with_reasoning](https://huggingface.co/datasets/CohenQu/...
[]
neurlang/piper-onnx-zayd0-arabic-diacritized
neurlang
2025-10-10T14:23:21Z
0
0
null
[ "onnx", "piper-onnx-zayd0-arabic-diacritized", "piper", "tts", "rustruut", "ar", "license:mit", "region:us" ]
null
2025-10-10T14:08:26Z
# Piper ONNX Zayd0 Arabic TTS Model ## Requirements - rust ## Install script ```bash git clone https://github.com/neurlang/piper-rs cd piper-rs wget https://huggingface.co/neurlang/piper-onnx-zayd0-arabic-diacritized/resolve/main/piper-onnx-zayd0-arabic-diacritized.onnx -O piper-onnx-zayd0-arabic-diacritized.onnx w...
[]
flexitok/bpe_boundless_coverage_AllL_128000
flexitok
2026-04-29T08:25:58Z
0
0
null
[ "tokenizer", "bpe", "flexitok", "fineweb2", "arb", "ces", "cmn", "dan", "deu", "ell", "fra", "hun", "ind", "ita", "jpn", "nld", "pol", "por", "rus", "spa", "swe", "tur", "vie", "license:mit", "region:us" ]
null
2026-04-18T17:26:06Z
# Byte-Level BPE Tokenizer: ['arb_Arab', 'ces_Latn', 'cmn_Hani', 'dan_Latn', 'deu_Latn', 'ell_Grek', 'fra_Latn', 'fw_edu', 'hun_Latn', 'ind_Latn', 'ita_Latn', 'jpn_Jpan', 'nld_Latn', 'pol_Latn', 'por_Latn', 'rus_Cyrl', 'spa_Latn', 'swe_Latn', 'tur_Latn', 'vie_Latn'] (128K) A **Byte-Level BPE** tokenizer trained on **[...
[]
ekunish/llm2025-adv-exp004-combined-qwen25
ekunish
2026-02-22T04:52:26Z
0
0
peft
[ "peft", "safetensors", "qwen2", "lora", "agent", "tool-use", "alfworld", "dbbench", "text-generation", "conversational", "en", "dataset:u-10bei/sft_alfworld_trajectory_dataset_v5", "dataset:u-10bei/dbbench_sft_dataset_react_v4", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:adapter:...
text-generation
2026-02-22T04:50:03Z
# exp004_combined_qwen25 This repository provides a **merged model** fine-tuned from **Qwen/Qwen2.5-7B-Instruct** using **LoRA + Unsloth**. ## Training Objective This model is trained to improve **multi-turn agent task performance** on ALFWorld (household tasks) and DBBench (database operations). Loss is applied to...
[ { "start": 123, "end": 127, "text": "LoRA", "label": "training method", "score": 0.8962542414665222 }, { "start": 270, "end": 277, "text": "DBBench", "label": "training method", "score": 0.8841248154640198 }, { "start": 559, "end": 563, "text": "LoRA", ...
ciplab-robotics/my_policy_pnp_candy
ciplab-robotics
2026-03-11T03:53:10Z
33
0
lerobot
[ "lerobot", "safetensors", "robotics", "act", "dataset:ciplab-robotics/pnp_eraser", "arxiv:2304.13705", "license:apache-2.0", "region:us" ]
robotics
2026-03-11T03:52:30Z
# Model Card for act <!-- Provide a quick summary of what the model is/does. --> [Action Chunking with Transformers (ACT)](https://huggingface.co/papers/2304.13705) is an imitation-learning method that predicts short action chunks instead of single steps. It learns from teleoperated data and often achieves high succ...
[ { "start": 17, "end": 20, "text": "act", "label": "training method", "score": 0.831265389919281 }, { "start": 120, "end": 123, "text": "ACT", "label": "training method", "score": 0.8477550148963928 }, { "start": 865, "end": 868, "text": "act", "label":...
prometheus04/nemotron-terminal-pipeline
prometheus04
2026-04-30T07:43:09Z
0
0
null
[ "arxiv:2602.21193", "region:us" ]
null
2026-04-30T07:40:06Z
# Nemotron-Terminal-Corpus Processing Pipeline Scripts used to process [nvidia/Nemotron-Terminal-Corpus](https://huggingface.co/datasets/nvidia/Nemotron-Terminal-Corpus) (366K trajectories) into [prometheus04/nemotron-terminal-microagent](https://huggingface.co/datasets/prometheus04/nemotron-terminal-microagent) (50K ...
[]
mradermacher/Huihui-Qwen3-Next-80B-A3B-Thinking-abliterated-i1-GGUF
mradermacher
2025-12-22T16:00:11Z
2,977
2
transformers
[ "transformers", "gguf", "abliterated", "uncensored", "chat", "en", "zh", "base_model:huihui-ai/Huihui-Qwen3-Next-80B-A3B-Thinking-abliterated", "base_model:quantized:huihui-ai/Huihui-Qwen3-Next-80B-A3B-Thinking-abliterated", "license:mit", "endpoints_compatible", "region:us", "imatrix", "c...
null
2025-12-22T03:20:30Z
## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> <!-- ### quants: Q2_K IQ3_M Q4_K_S IQ3_XXS Q3_K_M small-IQ4_NL Q4_K_M IQ2_M Q6_K IQ4_XS Q2_K_S IQ1_M Q3_K_S IQ2_XXS Q3_K_L IQ2_XS Q5_K_S IQ2_S IQ1_S Q5_...
[]