kinely commited on
Commit
51d8364
·
verified ·
1 Parent(s): b26ff56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -11
app.py CHANGED
@@ -36,6 +36,12 @@ titles = [
36
  st.write("Fetching Wikipedia articles...")
37
  corpus = fetch_wikipedia_articles(titles)
38
 
 
 
 
 
 
 
39
  # Generate embeddings for the corpus
40
  st.write("Generating embeddings...")
41
  embeddings = embedder.encode(corpus, convert_to_tensor=True)
@@ -68,19 +74,23 @@ if st.button("Generate Humanized Text"):
68
 
69
  context = retrieve_documents(top_k_indices)
70
 
71
- # Concatenate user input and context for model input
72
- input_text = f"{user_input} {context}"
73
-
74
- # Tokenize input and handle truncation
75
- inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
 
 
 
 
76
 
77
- # Generate output
78
- outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1)
79
 
80
- # Decode the generated text
81
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
82
 
83
- # Display the generated text
84
- st.write(generated_text)
85
  else:
86
  st.write("Please enter a valid query.")
 
36
  st.write("Fetching Wikipedia articles...")
37
  corpus = fetch_wikipedia_articles(titles)
38
 
39
+ # Check if corpus is populated
40
+ if not corpus:
41
+ st.write("No articles found. Please check the titles.")
42
+ else:
43
+ st.write("Articles fetched successfully.")
44
+
45
  # Generate embeddings for the corpus
46
  st.write("Generating embeddings...")
47
  embeddings = embedder.encode(corpus, convert_to_tensor=True)
 
74
 
75
  context = retrieve_documents(top_k_indices)
76
 
77
+ # Check if context is empty
78
+ if not context:
79
+ st.write("No relevant context found. Please try a different query.")
80
+ else:
81
+ # Concatenate user input and context for model input
82
+ input_text = f"{user_input} {context}"
83
+
84
+ # Tokenize input and handle truncation
85
+ inputs = tokenizer(input_text, return_tensors="pt", max_length=1024, truncation=True)
86
 
87
+ # Generate output
88
+ outputs = model.generate(inputs.input_ids, max_length=2000, num_return_sequences=1)
89
 
90
+ # Decode the generated text
91
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
92
 
93
+ # Display the generated text
94
+ st.write(generated_text)
95
  else:
96
  st.write("Please enter a valid query.")