File size: 1,087 Bytes
453736e
ea09339
 
368243d
 
 
 
 
453736e
ea09339
df632f9
453736e
ea09339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os

# Log CPU info at startup
cpu_info = os.popen("cat /proc/cpuinfo | grep 'model name' | head -1").read().strip()
print(f"🖥️  Running on: {cpu_info}")

# Pick any model you want to test (small for free Spaces)
MODEL_ID = "LiquidAI/LFM2-1.2B"

# Load tokenizer + model
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
    MODEL_ID,
    torch_dtype=torch.float32,  # use float32 for CPU (no GPU)
)

def chat_with_ai(user_input):
    inputs = tokenizer(user_input, return_tensors="pt")
    outputs = model.generate(**inputs, max_new_tokens=100)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response

demo = gr.Interface(
    fn=chat_with_ai,
    inputs=gr.Textbox(label="Your Message"),
    outputs=gr.Textbox(label="AI Response"),
    title="Test AI Chat Model",
    description="Type a message to chat with an LLM hosted on Hugging Face."
)

demo.launch(server_name="0.0.0.0", server_port=7860)