namantjeaswi commited on
Commit
b325ed3
·
1 Parent(s): 3605144

fixed streamlit components

Browse files
Files changed (1) hide show
  1. app.py +17 -17
app.py CHANGED
@@ -10,8 +10,8 @@ from huggingface_hub import login
10
  #load_dotenv()
11
 
12
  #HF_TOKEN = os.environ.get("HF_API_TOKEN")
13
- HF_TOKEN = st.secrets["HF_API_TOKEN"]
14
- login(token=HF_TOKEN)
15
 
16
  # Setup logging
17
  logging.basicConfig(stream=sys.stdout, level=logging.INFO)
@@ -30,18 +30,18 @@ model = AutoModelForCausalLM.from_pretrained(model_id)
30
  # Create text generation pipeline
31
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
32
 
33
- # Continuously prompt the user for questions
34
- while True:
35
- # User input and chat interaction
36
- prompt = st.text_input("Please ask your question here (or type 'exit' to quit):")
37
- if prompt.lower() == 'exit':
38
- break
39
-
40
- # Display user message in chat message container
41
- st.markdown("<b style='color:Red;'>User:</b> {}".format(prompt), unsafe_allow_html=True)
42
-
43
- # Generate response
44
- result = pipe(prompt, max_length=50, truncation=True)
45
-
46
- # Display bot response in chat message container
47
- st.markdown("<b style='color:Green;'>Bot:</b> {}".format(result[0]['generated_text']), unsafe_allow_html=True)
 
10
  #load_dotenv()
11
 
12
  #HF_TOKEN = os.environ.get("HF_API_TOKEN")
13
+ #HF_TOKEN = st.secrets["HF_API_TOKEN"]
14
+ #login(token=HF_TOKEN)
15
 
16
  # Setup logging
17
  logging.basicConfig(stream=sys.stdout, level=logging.INFO)
 
30
  # Create text generation pipeline
31
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
32
 
33
+
34
+ prompt = st.text_input("Please ask your question here:")
35
+
36
+ # Button to submit question
37
+ if st.button("Submit"):
38
+ if prompt.strip():
39
+ # Display user message in chat message container
40
+ st.markdown("<b style='color:Red;'>User:</b> {}".format(prompt), unsafe_allow_html=True)
41
+
42
+ # Generate response
43
+ result = pipe(prompt)#, max_length=100, num_return_sequences=1)
44
+
45
+ # Display bot response in chat message container
46
+ st.markdown("<b style='color:Green;'>Bot:</b> {}".format(result[0]['generated_text']), unsafe_allow_html=True)
47
+