llama-coder / app.py
Rudrresh's picture
Update app.py
ca0d6ab verified
import streamlit
from llama_cpp import Llama
import os
import time
from huggingface_hub import hf_hub_download
# Load the LLM from GGUF file
repo_id = "Rudrresh/cdoeforces-llama-gguf"
model_file = "llama-3-3b-coder.gguf"
model_path = hf_hub_download(repo_id = repo_id, filename=model_file)
# n_threads
llm = Llama(model_path=model_path,n_gpu_layers=30,n_ctx=512,temperature=0.2,repeat_penalty=1.1,top_k_sampling=40,top_p_sampling=0.95,min_p_sampling=0.05)
def generate_llm_response(prompt):
output = llm(prompt, max_tokens=256)
return output["choices"][0]["text"]
import streamlit as st
#import speech_recognition as sr
import numpy as np
# Session state for chat history
if "messages" not in st.session_state:
st.session_state["messages"] = []
# Display previous messages
for msg in st.session_state["messages"]:
st.chat_message(msg["role"]).write(msg["content"])
# User input (text)
st.title("Competitive Programming LLM")
user_input = st.chat_input("Type a message, ask a coding question")
# Process response
if user_input:
instruction = "Give short explanation, sample input if applicable - keep it short."
st.chat_message("user").write(user_input)
st.session_state["messages"].append({"role": "user", "content": user_input})
start_time = time.time()
# Get response from GGUF LLM
response = generate_llm_response(instruction + user_input)
end_time = time.time()
inference_time = end_time - start_time
# Display response
st.chat_message("assistant").write(response)
st.session_state["messages"].append({"role": "assistant", "content": response})
st.caption(f"⏱️ Inference time: {inference_time:.2f} seconds")