Rifqidits commited on
Commit
43b346b
·
1 Parent(s): 795555f

Remove context, change questions to prompt, and add upload select box

Browse files
Files changed (1) hide show
  1. app.py +36 -24
app.py CHANGED
@@ -14,8 +14,9 @@ LICENSE = """
14
  # This is a dummy generation function
15
  import torch
16
  import gradio as gr
 
17
  from transformers import AutoTokenizer, AutoModelForCausalLM
18
- from peft import PeftModel
19
 
20
  # === [1] Model and Tokenizer Loading ===
21
  base_model_id = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO"
@@ -32,22 +33,36 @@ model.eval()
32
  # Load tokenizer
33
  tokenizer = AutoTokenizer.from_pretrained(lora_path)
34
 
35
- # === [2] Prompt Formatting Function ===
36
- def create_prompt(table, context, question):
37
- return f"""You are a financial assistant. Given the table and context, answer the question.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  Table:
39
- {table}
40
- Context:
41
- {context}
42
- Question:
43
- {question}
44
  Answer:"""
45
 
46
- # === [3] Inference Function ===
47
- def answer_question(table, context, question):
48
- prompt = create_prompt(table, context, question)
49
- inputs = tokenizer(prompt, return_tensors="pt").to(device) # <-- use .to(device)
50
-
51
  with torch.no_grad():
52
  outputs = model.generate(
53
  **inputs,
@@ -56,24 +71,21 @@ def answer_question(table, context, question):
56
  eos_token_id=tokenizer.eos_token_id
57
  )
58
 
59
- # --- Decode and clean up ---
60
  output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
61
  answer = output_text.split("Answer:")[-1].strip()
62
  return answer
63
 
64
- # === [4] Gradio UI Layout ===
65
- with gr.Blocks(title="TAT-LLM Table & Text QA") as demo:
66
- gr.Markdown("## TAT-LLM: Table-and-Text Question Answering\nUpload a table (Markdown format), provide context, and ask your question.")
67
 
68
- with gr.Row():
69
- table_input = gr.Textbox(label="Table (Markdown)", lines=10, placeholder="| Quarter | Revenue |\n|--------|---------|\n| Q1 | 100 | ...")
70
- context_input = gr.Textbox(label="Context", lines=10, placeholder="PT ABC mengalami peningkatan pendapatan dari Q1 ke Q4.")
71
- question_input = gr.Textbox(label="Question", lines=2, placeholder="Berapa persentase kenaikan dari Q1 ke Q4?")
72
  output_box = gr.Textbox(label="Answer", lines=5)
73
 
74
  submit_btn = gr.Button("Generate Answer")
75
- submit_btn.click(fn=answer_question, inputs=[table_input, context_input, question_input], outputs=output_box)
76
 
77
- # === [5] Launch ===
78
  if __name__ == "__main__":
79
  demo.queue().launch()
 
14
  # This is a dummy generation function
15
  import torch
16
  import gradio as gr
17
+ import pandas as pd
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
19
+ from peft import PeftMode
20
 
21
  # === [1] Model and Tokenizer Loading ===
22
  base_model_id = "NousResearch/Nous-Hermes-2-Mistral-7B-DPO"
 
33
  # Load tokenizer
34
  tokenizer = AutoTokenizer.from_pretrained(lora_path)
35
 
36
+ # Prompt formatting function
37
+
38
+ # def create_prompt(table, context, question):
39
+ # return f"""You are a financial assistant. Given the table and context, answer the question.
40
+ # Table:
41
+ # {table}
42
+ # Context:
43
+ # {context}
44
+ # Question:
45
+ # {question}
46
+ # Answer:"""
47
+
48
+ def create_prompt(table_file, prompt_text):
49
+ df = pd.read_csv(table_file.name)
50
+ markdown_table = df.to_markdown(index=False)
51
+ return f"""You are a financial assistant. Given the table and prompt, answer accordingly.
52
+
53
  Table:
54
+ {markdown_table}
55
+
56
+ Prompt:
57
+ {prompt_text}
58
+
59
  Answer:"""
60
 
61
+ # Inference function
62
+ def generate_answer(table_file, prompt_text):
63
+ full_prompt = create_prompt(table_file, prompt_text)
64
+ inputs = tokenizer(full_prompt, return_tensors="pt").to(device)
65
+
66
  with torch.no_grad():
67
  outputs = model.generate(
68
  **inputs,
 
71
  eos_token_id=tokenizer.eos_token_id
72
  )
73
 
 
74
  output_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
75
  answer = output_text.split("Answer:")[-1].strip()
76
  return answer
77
 
78
+ # Gradio UI layout
79
+ with gr.Blocks(title="TAT-LLM Table & Prompt QA") as demo:
80
+ gr.Markdown("## TAT-LLM: Table & Prompt QA\nUpload your table (CSV format) and enter your prompt below.")
81
 
82
+ table_input = gr.File(label="Upload Table (.csv only)", file_types=[".csv"])
83
+ prompt_input = gr.Textbox(label="Prompt", lines=4, placeholder="Tuliskan pertanyaanmu di sini, termasuk konteks jika perlu.")
 
 
84
  output_box = gr.Textbox(label="Answer", lines=5)
85
 
86
  submit_btn = gr.Button("Generate Answer")
87
+ submit_btn.click(fn=generate_answer, inputs=[table_input, prompt_input], outputs=output_box)
88
 
89
+ # Launch
90
  if __name__ == "__main__":
91
  demo.queue().launch()