mohAhmad commited on
Commit
2eefa77
·
verified ·
1 Parent(s): 66560e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -11
app.py CHANGED
@@ -19,7 +19,7 @@ documents = [
19
  "Streamlit is an open-source Python library that makes it easy to build beautiful custom web-apps for machine learning and data science.",
20
  "Hugging Face is a company that provides tools and models for natural language processing (NLP).",
21
  "Retrieval-Augmented Generation (RAG) is a method that combines document retrieval with a generative model for question answering.",
22
- "Letter of Recommendation August 29, 2024 Muthukumaran Azhagesan Senior Software Engineer +8326559594 I am Muthukumaran Azhagesan , a Senior Software Engineer and Lead at Cisco, USA, where I specialize in designing and developing enterprise software applications. With over two decades of experience in software engineering and enterprise security, I am the Founder, CEO, and CTO of Tame it AI...",
23
  ]
24
 
25
  # Encode the initial documents for similarity comparison
@@ -66,19 +66,24 @@ if st.button("💬 Get Answer"):
66
 
67
  # Step 3: Get the index of the most similar document
68
  top_index = similarity_scores[0].argmax()
69
- retrieved_doc = documents[top_index]
 
 
 
70
 
71
- # Log the retrieved document for debugging
72
- st.write(f"Retrieved Document: {retrieved_doc}")
73
 
74
- # Step 4: Use the Generator to Answer the Question
75
- input_ids = generator_tokenizer.encode(f"question: {query} context: {retrieved_doc}", return_tensors="pt")
76
- outputs = generator.generate(input_ids, max_length=200, num_return_sequences=1)
77
 
78
- # Decode and display the response
79
- answer = generator_tokenizer.decode(outputs[0], skip_special_tokens=True)
80
- st.write("**Answer:**")
81
- st.write(answer)
 
 
82
 
83
  except Exception as e:
84
  st.error(f"An error occurred: {str(e)}")
 
19
  "Streamlit is an open-source Python library that makes it easy to build beautiful custom web-apps for machine learning and data science.",
20
  "Hugging Face is a company that provides tools and models for natural language processing (NLP).",
21
  "Retrieval-Augmented Generation (RAG) is a method that combines document retrieval with a generative model for question answering.",
22
+ "Letter of Recommendation August 29, 2024 Muthukumaran Azhagesan Senior Software Engineer +8326559594 I am Muthukumaran Azhagesan, a Senior Software Engineer and Lead at Cisco...",
23
  ]
24
 
25
  # Encode the initial documents for similarity comparison
 
66
 
67
  # Step 3: Get the index of the most similar document
68
  top_index = similarity_scores[0].argmax()
69
+
70
+ # Check if top_index is valid
71
+ if top_index < len(documents):
72
+ retrieved_doc = documents[top_index]
73
 
74
+ # Log the retrieved document for debugging
75
+ st.write(f"Retrieved Document: {retrieved_doc}")
76
 
77
+ # Step 4: Use the Generator to Answer the Question
78
+ input_ids = generator_tokenizer.encode(f"question: {query} context: {retrieved_doc}", return_tensors="pt")
79
+ outputs = generator.generate(input_ids, max_length=200, num_return_sequences=1)
80
 
81
+ # Decode and display the response
82
+ answer = generator_tokenizer.decode(outputs[0], skip_special_tokens=True)
83
+ st.write("**Answer:**")
84
+ st.write(answer)
85
+ else:
86
+ st.error("No relevant document found.")
87
 
88
  except Exception as e:
89
  st.error(f"An error occurred: {str(e)}")