Spencer525 commited on
Commit
98d7a3a
·
verified ·
1 Parent(s): 3c73d0a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from langchain_core.prompts import PromptTemplate
4
+ from langchain_community.document_loaders import PyPDFLoader
5
+ from langchain_google_genai import ChatGoogleGenerativeAI
6
+ import google.generativeai as genai
7
+ from langchain.chains.question_answering import load_qa_chain
8
+ import torch
9
+ from transformers import AutoTokenizer, AutoModelForCausalLM
10
+ from PIL import Image
11
+ import io
12
+ from langsmith import LangSmith
13
+
14
+ # Configure Gemini API
15
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
16
+
17
+ # Load models
18
+ model_path_mistral = "nvidia/Mistral-NeMo-Minitron-8B-Base"
19
+ mistral_tokenizer = AutoTokenizer.from_pretrained(model_path_mistral)
20
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
21
+ dtype = torch.bfloat16
22
+ mistral_model = AutoModelForCausalLM.from_pretrained(model_path_mistral, torch_dtype=dtype, device_map=device)
23
+
24
+ openelm_270m_instruct = AutoModelForCausalLM.from_pretrained("apple/OpenELM-1_1B", trust_remote_code=True)
25
+ tokenizer = AutoTokenizer.from_pretrained("NousResearch/Llama-2-7b-hf")
26
+
27
+ # LangSmith setup
28
+ langsmith = LangSmith(api_key=os.getenv("LANGSMITH_API_KEY"))
29
+
30
+ def process_pdf(file_path, question):
31
+ model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.3)
32
+ prompt_template = """Answer the question as precise as possible using the provided context. If the answer is not contained in the context, say "answer not available in context" \n\n Context: \n {context}?\n Question: \n {question} \n Answer: """
33
+ prompt = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
34
+
35
+ pdf_loader = PyPDFLoader(file_path)
36
+ pages = pdf_loader.load_and_split()
37
+ context = "\n".join(str(page.page_content) for page in pages[:200])
38
+ stuff_chain = load_qa_chain(model, chain_type="stuff", prompt=prompt)
39
+ stuff_answer = stuff_chain({"input_documents": pages, "question": question, "context": context}, return_only_outputs=True)
40
+ return stuff_answer['output_text']
41
+
42
+ def process_image(image, question):
43
+ model = genai.GenerativeModel('gemini-pro-vision')
44
+ response = model.generate_content([image, question])
45
+ return response.text
46
+
47
+ def generate_mistral_followup(answer):
48
+ mistral_prompt = f"Based on this answer: {answer}\nGenerate a follow-up question:"
49
+ mistral_inputs = mistral_tokenizer.encode(mistral_prompt, return_tensors='pt').to(device)
50
+ with torch.no_grad():
51
+ mistral_outputs = mistral_model.generate(mistral_inputs, max_length=200)
52
+ mistral_output = mistral_tokenizer.decode(mistral_outputs[0], skip_special_tokens=True)
53
+ return mistral_output
54
+
55
+ def generate(newQuestion, num):
56
+ tokenized_prompt = tokenizer(newQuestion)
57
+ tokenized_prompt = torch.tensor(tokenized_prompt['input_ids']).unsqueeze(0)
58
+ output_ids = openelm_270m_instruct.generate(tokenized_prompt, max_length=int(num), pad_token_id=0)
59
+ output_text = tokenizer.decode(output_ids[0].tolist(), skip_special_tokens=True)
60
+ return output_text
61
+
62
+ def evaluate_with_langsmith(text):
63
+ # Hypothetical evaluation logic using LangSmith
64
+ return langsmith.evaluate_text(text)
65
+
66
+ def process_input(file, image, question, gen_length):
67
+ try:
68
+ if file is not None:
69
+ gemini_answer = process_pdf(file.name, question)
70
+ elif image is not None:
71
+ gemini_answer = process_image(image, question)
72
+ else:
73
+ return "Please upload a PDF file or an image."
74
+
75
+ mistral_followup = generate_mistral_followup(gemini_answer)
76
+ openelm_response = generate(question, gen_length)
77
+ langsmith_score = evaluate_with_langsmith(openelm_response)
78
+
79
+ combined_output = f"Gemini Answer: {gemini_answer}\n\nMistral Follow-up: {mistral_followup}\n\nOpenELM Response: {openelm_response}\n\nLangSmith Score: {langsmith_score}"
80
+ return combined_output
81
+ except Exception as e:
82
+ return f"An error occurred: {str(e)}"
83
+
84
+ # Define Gradio Interface
85
+ with gr.Blocks() as demo:
86
+ gr.Markdown("# Multi-modal RAG Knowledge Retrieval using Gemini API, Mistral, OpenELM, and LangSmith")
87
+
88
+ with gr.Row():
89
+ with gr.Column():
90
+ input_file = gr.File(label="Upload PDF File")
91
+ input_image = gr.Image(type="pil", label="Upload Image")
92
+ input_question = gr.Textbox(label="Ask about the document or image")
93
+ input_gen_length = gr.Textbox(label="Number of generated tokens", default="50")
94
+
95
+ output_text = gr.Textbox(label="Answer - Combined Outputs with LangSmith Evaluation")
96
+
97
+ submit_button = gr.Button("Submit")
98
+ submit_button.click(fn=process_input, inputs=[input_file, input_image, input_question, input_gen_length], outputs=output_text)
99
+
100
+ demo.launch()