|
|
import streamlit as st |
|
|
import openai |
|
|
from dotenv import load_dotenv |
|
|
import os |
|
|
from sentence_transformers import SentenceTransformer, util |
|
|
|
|
|
|
|
|
load_dotenv() |
|
|
api_key = os.getenv("") |
|
|
|
|
|
def main(): |
|
|
openai.api_key = api_key |
|
|
|
|
|
st.title('智能閱讀會問答系統') |
|
|
|
|
|
user_query = st.text_input("請輸入您的問題:", "") |
|
|
detail_level = st.select_slider("選擇回答的詳細程度:", options=['摘要', '中等', '詳細']) |
|
|
max_tokens_dict = {'摘要': 50, '中等': 100, '詳細': 150} |
|
|
|
|
|
|
|
|
model = SentenceTransformer('all-MiniLM-L6-v2') |
|
|
|
|
|
def generate_answers_with_gpt_chat(question, detail): |
|
|
|
|
|
response = openai.ChatCompletion.create( |
|
|
model="gpt-4", |
|
|
messages=[{"role": "system", "content": "您好,我是一个 AI 助手。请问有什么可以帮助您的?"}, |
|
|
{"role": "user", "content": question}], |
|
|
temperature=0.7, |
|
|
max_tokens=max_tokens_dict[detail] |
|
|
) |
|
|
return [message['content'] for message in response['messages'] if message['role'] == 'assistant'] |
|
|
|
|
|
def calculate_similarity(answers, user_query): |
|
|
query_embedding = model.encode(user_query, convert_to_tensor=True) |
|
|
answer_embeddings = model.encode(answers, convert_to_tensor=True) |
|
|
cosine_scores = util.pytorch_cos_sim(query_embedding, answer_embeddings)[0] |
|
|
return cosine_scores.tolist() |
|
|
|
|
|
if st.button('提交'): |
|
|
if user_query: |
|
|
answers = generate_answers_with_gpt_chat(user_query, detail_level) |
|
|
similarities = calculate_similarity(answers, user_query) |
|
|
answers_with_scores = sorted(zip(answers, similarities), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
st.subheader("生成並排序的答案:") |
|
|
for answer, score in answers_with_scores: |
|
|
st.text(f"答案: {answer}\n相似度分數: {score:.2f}") |
|
|
else: |
|
|
st.warning("請輸入一個問題。") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|