Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,18 +1,44 @@
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import pandas as pd
|
| 4 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
processing = False
|
| 6 |
|
| 7 |
data = pd.read_csv('anomalies.csv')
|
| 8 |
|
| 9 |
-
def response(
|
| 10 |
global processing
|
| 11 |
processing = True
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
processing = False
|
| 17 |
return "", history
|
| 18 |
|
|
|
|
| 1 |
import os
|
| 2 |
import gradio as gr
|
| 3 |
import pandas as pd
|
| 4 |
+
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
| 5 |
+
|
| 6 |
+
# Carregando o modelo e o tokenizador do GPT-2
|
| 7 |
+
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
|
| 8 |
+
model = GPT2LMHeadModel.from_pretrained('gpt2')
|
| 9 |
+
|
| 10 |
processing = False
|
| 11 |
|
| 12 |
data = pd.read_csv('anomalies.csv')
|
| 13 |
|
| 14 |
+
def response(question, history):
|
| 15 |
global processing
|
| 16 |
processing = True
|
| 17 |
+
|
| 18 |
+
if tokenizer.pad_token is None:
|
| 19 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 20 |
+
|
| 21 |
+
prompt = f"Considerando os dados: {df.to_string(index=False)}. Pergunta: {question} Resposta:"
|
| 22 |
+
inputs = tokenizer(prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=512)
|
| 23 |
+
attention_mask = inputs['attention_mask']
|
| 24 |
+
input_ids = inputs['input_ids']
|
| 25 |
+
|
| 26 |
+
generated_ids = model.generate(
|
| 27 |
+
input_ids,
|
| 28 |
+
attention_mask=attention_mask,
|
| 29 |
+
max_length=len(input_ids[0]) + 100, # Aumentar o limite de gera莽茫o
|
| 30 |
+
temperature=0.65, # Ajustar a criatividade
|
| 31 |
+
top_p=0.9, # Usar nucleus sampling
|
| 32 |
+
no_repeat_ngram_size=2 # Evitar repeti莽玫es desnecess谩rias
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
|
| 36 |
+
# Processando para extrair apenas a resposta ap贸s "Resposta:"
|
| 37 |
+
response_part = generated_text.split("Resposta:")[1] if "Resposta:" in generated_text else "Resposta n茫o encontrada."
|
| 38 |
+
# Limpeza adicional para remover qualquer texto indesejado ap贸s a resposta
|
| 39 |
+
final_response = response_part.split(".")[0] + "." # Isso assume que a resposta termina na primeira senten莽a.
|
| 40 |
+
|
| 41 |
+
history.append((question, final_response))
|
| 42 |
processing = False
|
| 43 |
return "", history
|
| 44 |
|