GABASSI commited on
Commit
78ec58b
·
verified ·
1 Parent(s): 50c4096

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -31
app.py CHANGED
@@ -1,33 +1,44 @@
1
  import os
2
  import gradio as gr
3
- from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
 
 
4
  from llama_parse import LlamaParse
 
 
 
5
 
6
  # --- CONFIGURAÇÃO VISUAL ---
7
  theme_css = """
8
  body { background-color: #0b0c10; color: #c5c6c7; font-family: 'Roboto', sans-serif; }
9
- .header-container {
10
- display: flex; align-items: center; justify-content: center; gap: 15px;
11
- padding: 20px; border-bottom: 1px solid #1f2833; margin-bottom: 20px;
12
- }
13
  .logo-img { height: 60px; width: auto; filter: drop-shadow(0 0 10px rgba(102, 252, 241, 0.5)); }
14
  .brand-name { font-size: 32px; font-weight: bold; color: #66fcf1; letter-spacing: 4px; font-family: 'Impact', sans-serif; }
15
  .chatbot-area { height: 500px !important; background-color: #1f2833; border: 1px solid #45a29e; border-radius: 10px; }
16
  """
17
 
18
- # --- VARIÁVEIS GLOBAIS ---
19
  global_query_engine = None
20
 
21
- # --- FUNÇÃO 1: PROCESSAR ---
22
  def processar_pdf(files, api_key_llama, api_key_openai):
23
  global global_query_engine
24
- if not files:
25
- return "⚠️ Por favor, envie um arquivo PDF."
26
 
 
 
 
 
 
 
 
 
 
27
  os.environ["LLAMA_CLOUD_API_KEY"] = api_key_llama
28
  os.environ["OPENAI_API_KEY"] = api_key_openai
29
 
30
  try:
 
 
 
 
31
  parser = LlamaParse(result_type="markdown", language="pt")
32
  file_extractor = {".pdf": parser}
33
  filepaths = [f.name if hasattr(f, 'name') else f for f in files]
@@ -36,52 +47,40 @@ def processar_pdf(files, api_key_llama, api_key_openai):
36
  index = VectorStoreIndex.from_documents(documents)
37
  global_query_engine = index.as_query_engine()
38
 
39
- return "✅ Sistema COGNILINE Online."
 
40
  except Exception as e:
41
- return f"❌ Erro: {str(e)}"
42
 
43
- # --- FUNÇÃO 2: RESPONDER ---
44
  def responder(message, history):
45
  global global_query_engine
46
- if global_query_engine is None:
47
- return "⚠️ O sistema está offline. Faça o upload do manual."
48
  try:
49
  response = global_query_engine.query(message)
50
  return str(response)
51
  except Exception as e:
52
  return f"Erro: {str(e)}"
53
 
54
- # --- INTERFACE (PADRÃO NOVO) ---
55
  with gr.Blocks() as demo:
56
-
57
- # Cabeçalho
58
  with gr.Row(elem_classes="header-container"):
59
  if os.path.exists("logo.png"):
60
  gr.Image("logo.png", elem_classes="logo-img", show_label=False, show_download_button=False)
61
  gr.Markdown("<div class='brand-name'>COGNILINE</div>")
62
 
63
- # Layout
64
  with gr.Row():
65
  with gr.Column(scale=1, min_width=300):
66
- gr.Markdown("### ⚙️ Configuração")
67
  txt_llama = gr.Textbox(label="LlamaCloud Key", type="password")
68
- txt_openai = gr.Textbox(label="OpenAI Key", type="password")
69
- file_up = gr.File(label="Manual PDF", file_count="multiple", file_types=[".pdf"])
70
- btn_start = gr.Button("INICIAR", variant="primary")
 
71
  lbl_status = gr.Textbox(label="Status", interactive=False)
72
 
73
  with gr.Column(scale=3):
74
- # REMOVIDOS OS ARGUMENTOS QUE CAUSAVAM ERRO (retry_btn, clear_btn, etc)
75
- # Nas versões novas, o Gradio simplificou a ChatInterface
76
- chat = gr.ChatInterface(
77
- fn=responder,
78
- chatbot=gr.Chatbot(elem_classes="chatbot-area"),
79
- textbox=gr.Textbox(placeholder="Digite sua pergunta...", scale=7)
80
- )
81
 
82
  btn_start.click(processar_pdf, inputs=[file_up, txt_llama, txt_openai], outputs=lbl_status)
83
 
84
- # --- LANÇAMENTO (AQUI É O SEGREDO DO GRADIO 5/6) ---
85
  if __name__ == "__main__":
86
- # Movemos o CSS e o TEMA para cá conforme o aviso do erro
87
  demo.launch(css=theme_css, theme=gr.themes.Soft())
 
1
  import os
2
  import gradio as gr
3
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
4
+ from llama_index.llms.openai import OpenAI
5
+ from llama_index.embeddings.openai import OpenAIEmbedding
6
  from llama_parse import LlamaParse
7
+ import nest_asyncio
8
+
9
+ nest_asyncio.apply()
10
 
11
  # --- CONFIGURAÇÃO VISUAL ---
12
  theme_css = """
13
  body { background-color: #0b0c10; color: #c5c6c7; font-family: 'Roboto', sans-serif; }
14
+ .header-container { display: flex; align-items: center; justify-content: center; gap: 15px; padding: 20px; border-bottom: 1px solid #1f2833; margin-bottom: 20px; }
 
 
 
15
  .logo-img { height: 60px; width: auto; filter: drop-shadow(0 0 10px rgba(102, 252, 241, 0.5)); }
16
  .brand-name { font-size: 32px; font-weight: bold; color: #66fcf1; letter-spacing: 4px; font-family: 'Impact', sans-serif; }
17
  .chatbot-area { height: 500px !important; background-color: #1f2833; border: 1px solid #45a29e; border-radius: 10px; }
18
  """
19
 
 
20
  global_query_engine = None
21
 
 
22
  def processar_pdf(files, api_key_llama, api_key_openai):
23
  global global_query_engine
 
 
24
 
25
+ # Limpa espaços em branco acidentais
26
+ api_key_openai = api_key_openai.strip()
27
+ api_key_llama = api_key_llama.strip()
28
+
29
+ if not files: return "⚠️ Envie um PDF."
30
+ if not api_key_openai.startswith("sk-"):
31
+ return f"⚠️ Erro: A chave digitada não parece uma chave OpenAI válida. Ela começa com: {api_key_openai[:7]}..."
32
+
33
+ # FORÇA AS CHAVES NO AMBIENTE
34
  os.environ["LLAMA_CLOUD_API_KEY"] = api_key_llama
35
  os.environ["OPENAI_API_KEY"] = api_key_openai
36
 
37
  try:
38
+ # CONFIGURAÇÃO DIRETA (Ignora variáveis de ambiente globais do Hugging Face)
39
+ Settings.llm = OpenAI(model="gpt-4o", api_key=api_key_openai)
40
+ Settings.embed_model = OpenAIEmbedding(api_key=api_key_openai)
41
+
42
  parser = LlamaParse(result_type="markdown", language="pt")
43
  file_extractor = {".pdf": parser}
44
  filepaths = [f.name if hasattr(f, 'name') else f for f in files]
 
47
  index = VectorStoreIndex.from_documents(documents)
48
  global_query_engine = index.as_query_engine()
49
 
50
+ return f"✅ CONECTADO! Chave detectada: {api_key_openai[:7]}***"
51
+
52
  except Exception as e:
53
+ return f"❌ Erro Técnico: {str(e)}"
54
 
 
55
  def responder(message, history):
56
  global global_query_engine
57
+ if global_query_engine is None: return "⚠️ Sistema Offline. Configure acima."
 
58
  try:
59
  response = global_query_engine.query(message)
60
  return str(response)
61
  except Exception as e:
62
  return f"Erro: {str(e)}"
63
 
 
64
  with gr.Blocks() as demo:
 
 
65
  with gr.Row(elem_classes="header-container"):
66
  if os.path.exists("logo.png"):
67
  gr.Image("logo.png", elem_classes="logo-img", show_label=False, show_download_button=False)
68
  gr.Markdown("<div class='brand-name'>COGNILINE</div>")
69
 
 
70
  with gr.Row():
71
  with gr.Column(scale=1, min_width=300):
72
+ gr.Markdown("### ⚙️ Painel")
73
  txt_llama = gr.Textbox(label="LlamaCloud Key", type="password")
74
+ # Adicionado o autocomplete="off" para o navegador não interferir
75
+ txt_openai = gr.Textbox(label="OpenAI Key (sk-...)", type="password")
76
+ file_up = gr.File(label="PDF", file_count="multiple", file_types=[".pdf"])
77
+ btn_start = gr.Button("ATIVAR", variant="primary")
78
  lbl_status = gr.Textbox(label="Status", interactive=False)
79
 
80
  with gr.Column(scale=3):
81
+ gr.ChatInterface(fn=responder, chatbot=gr.Chatbot(elem_classes="chatbot-area"))
 
 
 
 
 
 
82
 
83
  btn_start.click(processar_pdf, inputs=[file_up, txt_llama, txt_openai], outputs=lbl_status)
84
 
 
85
  if __name__ == "__main__":
 
86
  demo.launch(css=theme_css, theme=gr.themes.Soft())