|
|
import streamlit as st |
|
|
from transformers import pipeline |
|
|
from concurrent.futures import ProcessPoolExecutor |
|
|
|
|
|
prompt_template = ( |
|
|
"<|system|>\n" |
|
|
"You are a friendly chatbot who always gives helpful, detailed, and polite answers.</s>\n" |
|
|
"<|user|>\n" |
|
|
"{input_text}</s>\n" |
|
|
"<|assistant|>\n" |
|
|
) |
|
|
|
|
|
def generate_base_response(input_text): |
|
|
base_pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", max_length=512) |
|
|
return base_pipe(input_text)[0]["generated_text"] |
|
|
|
|
|
def generate_irai_response(input_text): |
|
|
irai_pipe = pipeline("text-generation", model="InvestmentResearchAI/LLM-ADE_tiny-v0.001", max_length=512) |
|
|
return irai_pipe(prompt_template.format(input_text=input_text))[0]["generated_text"].split("<|assistant|>")[1].strip() |
|
|
|
|
|
def generate_response(input_text): |
|
|
with ProcessPoolExecutor() as executor: |
|
|
try: |
|
|
future_base = executor.submit(generate_base_response, input_text) |
|
|
future_irai = executor.submit(generate_irai_response, input_text) |
|
|
base_resp = future_base.result() |
|
|
irai_resp = future_irai.result() |
|
|
except Exception as e: |
|
|
st.error(f"An error occurred: {e}") |
|
|
return None, None |
|
|
return base_resp, irai_resp |
|
|
|
|
|
st.title("IRAI LLM-ADE Model vs Base Model") |
|
|
user_input = st.text_area("Enter a financial question:", "") |
|
|
if st.button("Generate"): |
|
|
if user_input: |
|
|
base_response, irai_response = generate_response(user_input) |
|
|
col1, col2 = st.columns(2) |
|
|
with col1: |
|
|
st.header("Base Model Response") |
|
|
st.text_area("", base_response, height=300) |
|
|
with col2: |
|
|
st.header("IRAI LLM-ADE Model Response") |
|
|
st.text_area("", irai_response, height=300) |
|
|
else: |
|
|
st.warning("Please enter some text to generate a response.") |
|
|
|