| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | MODEL = "hf.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF:Q4_K_M" |
| |
|
| | INFO = """ |
| | <h1>Ollama Inference Playground part of the <a href="https://huggingface.co/spaces/hadadxyz/ai" target="_blank">Demo Playground</a>, and the <a href="https://huggingface.co/umint" target="_blank">UltimaX Intelligence</a> project</h1><br> |
| | |
| | This space run the <b><a href="https://huggingface.co/LiquidAI/LFM2.5-1.2B-Instruct" target="_blank">LFM2.5 (1.2B)</a></b> model from <b>LiquidAI</b>, hosted on a server using <b>Ollama</b> and accessed via the <b>OpenAI Python SDK</b>.<br><br> |
| | |
| | Official <b>documentation</b> for using Ollama with the OpenAI-Compatible API can be found <b><a href="https://docs.ollama.com/api/openai-compatibility" target="_blank">here</a></b>.<br><br> |
| | |
| | LFM2.5 (1.2B) runs entirely on a <b>dual-core CPU</b>. Thanks to its small size, the model can operate efficiently on minimal hardware.<br><br> |
| | |
| | The LFM2.5 (1.2B) model can also be viewed or downloaded from the official repository <b><a href="https://huggingface.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF" target="_blank">here</a></b>.<br><br> |
| | |
| | <b>Like this project? You can support me by buying a <a href="https://ko-fi.com/hadad" target="_blank">coffee</a></b>. |
| | """ |
| |
|
| | HOST = "0.0.0.0" |