pratikshahp commited on
Commit
99677dd
·
verified ·
1 Parent(s): 354ef29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -24
app.py CHANGED
@@ -1,24 +1,14 @@
1
- import streamlit as st
2
- from llama_index.llms.huggingface import HuggingFaceLLM
3
- from transformers import AutoTokenizer
4
-
5
- # Load Hugging Face model (replace with your desired model)
6
- model_name = "openai/ada-002" # Choose a suitable model from Hugging Face Hub
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = HuggingFaceLLM(model_name, tokenizer=tokenizer)
9
-
10
- # Streamlit app
11
- st.title("Text Analysis with LlamaIndex and Hugging Face")
12
- user_input = st.text_area("Enter your text here:")
13
-
14
- if user_input:
15
- # Prompt for the model (modify as needed)
16
- prompt = f"Analyze the following text: {user_input}"
17
-
18
- # Generate response using LlamaIndex with the Hugging Face model
19
- response = model.generate_text(prompt, max_length=150)
20
-
21
- # Display the response
22
- st.write("Model Response:")
23
- st.write(response)
24
-
 
1
+ from llama_index import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
2
+ from llama_index.llms import HuggingFaceLLM
3
+ from llama_index.prompts.prompts import SimpleInputPrompt
4
+ import os
5
+ from huggingface_hub import login
6
+
7
+ documents=SimpleDirectoryReader("/state-of-the-unioon.txt").load_data()
8
+ system_prompt="""
9
+ You are a Q&A assistant. Your goal is to answer questions as
10
+ accurately as possible based on the instructions and context provided.
11
+ """
12
+ ## Default format supportable by LLama2
13
+ query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
14
+ login(token=HF_TOKEN)