Hemanth-thunder's picture
added model path
8e3f470
raw
history blame
590 Bytes
import gradio as gr
from ctransformers import AutoModelForCausalLM
from huggingface_hub import hf_hub_download
model_name = "Hemanth-thunder/Tamil-Mistral-7B-Instruct-v0.1"
model_file = "tamil-mistral-7b-instruct-v0.1.Q4_K_M.gguf"
model_path = hf_hub_download(model_name, filename=model_file)
llm = AutoModelForCausalLM.from_pretrained(model_name, model_file=model_file,
model_type="mistral", gpu_layers=0)
def alternatingly_agree(message, history):
result = llm(message)
return result
gr.ChatInterface(alternatingly_agree).launch()