File size: 1,566 Bytes
e2e7b98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
#
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
# SPDX-License-Identifier: Apache-2.0
#

# ---------------------------------------------
# | OLLAMA_API_BASE_URL | /v1 | ENV or SECRET |
# |---------------------|-----|---------------|
# | OLLAMA_API_KEY      |     |    SECRET     |
# ---------------------------------------------

MODEL = "hf.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF:Q4_K_M"

INFO = """
<h1>Ollama Inference Playground part of the <a href="https://huggingface.co/spaces/hadadxyz/ai" target="_blank">Demo Playground</a>, and the <a href="https://huggingface.co/umint" target="_blank">UltimaX Intelligence</a> project</h1><br>

This space run the <b><a href="https://huggingface.co/LiquidAI/LFM2.5-1.2B-Instruct" target="_blank">LFM2.5 (1.2B)</a></b> model from <b>LiquidAI</b>, hosted on a server using <b>Ollama</b> and accessed via the <b>OpenAI Python SDK</b>.<br><br>

Official <b>documentation</b> for using Ollama with the OpenAI-Compatible API can be found <b><a href="https://docs.ollama.com/api/openai-compatibility" target="_blank">here</a></b>.<br><br>

LFM2.5 (1.2B) runs entirely on a <b>dual-core CPU</b>. Thanks to its small size, the model can operate efficiently on minimal hardware.<br><br>

The LFM2.5 (1.2B) model can also be viewed or downloaded from the official repository <b><a href="https://huggingface.co/LiquidAI/LFM2.5-1.2B-Instruct-GGUF" target="_blank">here</a></b>.<br><br>

<b>Like this project? You can support me by buying a <a href="https://ko-fi.com/hadad" target="_blank">coffee</a></b>.
"""

HOST = "0.0.0.0"