andreska commited on
Commit
1e56a53
·
verified ·
1 Parent(s): 8a0b4e9

Updated to use different model, specifically designed for text-answers

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -11,7 +11,11 @@ def read_docx(file_path):
11
  text.append(paragraph.text)
12
  return "\n".join(text)
13
 
14
- pipe = pipeline("question-answering")
 
 
 
 
15
  #pipe = pipeline("text-generation")
16
 
17
  st.title("Adrega AI Help")
@@ -26,8 +30,12 @@ if st.button("Submit"):
26
  #text_inputs = f"Context: {context}\nQuestion: {user_input}\nAnswer:"
27
  #result = pipe(text_inputs, max_length=200, num_return_sequences=1)[0]['generated_text']
28
  #answer = result.split("Answer:")[1].strip()
29
-
30
- answer = pipe(question=user_input, context=context)
 
 
 
 
31
  st.write(f"Adrega AI: {answer}")
32
  else:
33
  st.write("Please enter a question.")
 
11
  text.append(paragraph.text)
12
  return "\n".join(text)
13
 
14
+ # Load model and processor
15
+ processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False)
16
+ model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large")
17
+
18
+ #pipe = pipeline("question-answering")
19
  #pipe = pipeline("text-generation")
20
 
21
  st.title("Adrega AI Help")
 
30
  #text_inputs = f"Context: {context}\nQuestion: {user_input}\nAnswer:"
31
  #result = pipe(text_inputs, max_length=200, num_return_sequences=1)[0]['generated_text']
32
  #answer = result.split("Answer:")[1].strip()
33
+ encoding = processor(context, user_input, return_tensors="pt")
34
+
35
+ #answer = pipe(question=user_input, context=context)
36
+ predicted_ids = model.generate(**encoding)
37
+ answer = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
38
+
39
  st.write(f"Adrega AI: {answer}")
40
  else:
41
  st.write("Please enter a question.")