Spaces:
Sleeping
Sleeping
Change the Space to become a service accepting input
Browse files
app.py
CHANGED
|
@@ -1,39 +1,7 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
from transformers import pipeline
|
| 3 |
-
import docx
|
| 4 |
-
#from datasets import load_dataset
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
# text.append(paragraph.text)
|
| 12 |
-
# return "\n".join(text)
|
| 13 |
-
|
| 14 |
-
#pipe = pipeline("question-answering")
|
| 15 |
-
#pipe = pipeline("text-generation")
|
| 16 |
-
#pipe = pipeline("question-answering", model="deepset/roberta-base-squad2")
|
| 17 |
-
#pipe = pipeline("text-generation", model="Qwen/Qwen2.5-Coder-32B-Instruct")
|
| 18 |
-
pipe = pipeline("text2text-generation")
|
| 19 |
-
|
| 20 |
-
st.title("Adrega AI Help")
|
| 21 |
-
#dataset = load_dataset("andreska/adregadocs", split="test")
|
| 22 |
-
#context = read_docx(file_path)
|
| 23 |
-
#context = dataset[0]["text"]
|
| 24 |
-
context = "Adrega is a very cool company, that implements AI. Rett fra Rio is a company that specializes in body waxing and is owned by Cintia" #dataset[0]["text"]
|
| 25 |
-
|
| 26 |
-
user_input = st.text_input('Ask me a question')
|
| 27 |
-
if st.button("Submit"):
|
| 28 |
-
if user_input:
|
| 29 |
-
#answer = pipe(f"Context: {context}\nQuestion: {user_input}\nAnswer:")
|
| 30 |
-
answer = pipe("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
|
| 31 |
-
#result = pipe(text_inputs, max_length=200, num_return_sequences=1)[0]['generated_text']
|
| 32 |
-
#answer = result.split("Answer:")[1].strip()
|
| 33 |
-
#answer = pipe(question=user_input, context=context)
|
| 34 |
-
|
| 35 |
-
st.write(f"Adrega AI: {answer[0]['generated_text']}")
|
| 36 |
-
#st.write(f"Adrega AI: {answer}")
|
| 37 |
-
else:
|
| 38 |
-
st.write("Please enter a question.")
|
| 39 |
-
|
|
|
|
|
|
|
| 1 |
from transformers import pipeline
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
def analyze_project(project_data, question):
|
| 4 |
+
nlp = pipeline("text-generation", model="gpt2")
|
| 5 |
+
prompt = f"Analyze this project: {project_data}\n\nQuestion: {question}"
|
| 6 |
+
output = nlp(prompt, max_length=50, num_return_sequences=1)
|
| 7 |
+
return output[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|