Diggz10 commited on
Commit
0fb620e
·
verified ·
1 Parent(s): 05c1957

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -69
app.py CHANGED
@@ -1,14 +1,27 @@
1
  import gradio as gr
2
  import graphviz
3
  import os
4
- from huggingface_hub import InferenceClient
5
  from PIL import Image, ImageDraw, ImageFont
 
6
 
7
- # --- Constants and Configuration ---
8
- # NOW USING a Text-Generation model which is more economical with free credits.
9
- MODEL_ID = "google/flan-t5-large"
 
10
 
11
- # The system prompt needs to be a single, direct instruction for this type of model.
 
 
 
 
 
 
 
 
 
 
 
12
  SYSTEM_PROMPT_TEMPLATE = """Task: Generate a flowchart description in the Graphviz DOT language based on the following text.
13
  Your response MUST be ONLY the Graphviz DOT language source code for a directed graph (digraph).
14
  - The graph should be top-to-bottom (`rankdir=TB`).
@@ -20,67 +33,46 @@ Text: "{user_prompt}"
20
 
21
  DOT Language Code:"""
22
 
23
- # --- Helper Functions ---
24
  def create_placeholder_image(text="Flowchart will be generated here", size=(600, 800), path="placeholder.png"):
25
  # (This function remains unchanged)
26
  try:
27
  img = Image.new('RGB', size, color=(255, 255, 255))
28
  draw = ImageDraw.Draw(img)
29
- try:
30
- font = ImageFont.truetype("DejaVuSans.ttf", 24)
31
- except IOError:
32
- font = ImageFont.load_default()
33
-
34
  bbox = draw.textbbox((0, 0), text, font=font)
35
- text_width = bbox[2] - bbox[0]
36
- text_height = bbox[3] - bbox[1]
37
  position = ((size[0] - text_width) / 2, (size[1] - text_height) / 2)
38
  draw.text(position, text, fill=(200, 200, 200), font=font)
39
  img.save(path)
40
  return path
41
- except Exception:
42
- return None
43
 
44
- # --- Core AI and Rendering Logic ---
45
- def generate_flowchart(prompt: str, hf_token: str):
 
46
  """
47
- Calls the Hugging Face Inference API to generate DOT code and then renders it.
48
- Returns the file path of the generated PNG image.
49
  """
50
- if not hf_token:
51
- return create_placeholder_image("Error: Hugging Face API Token is not set.\nPlease add it to your Space's secrets."), None
52
-
53
  if not prompt:
54
  return create_placeholder_image("Please enter a prompt to generate a flowchart."), None
55
 
56
  try:
57
- # 1. Prepare the full prompt for the text-generation model
58
  full_prompt = SYSTEM_PROMPT_TEMPLATE.format(user_prompt=prompt)
 
59
 
60
- client = InferenceClient(model=MODEL_ID, token=hf_token)
61
-
62
- # --- THIS IS THE KEY CHANGE ---
63
- # Use client.text_generation instead of client.chat_completion
64
- dot_code = client.text_generation(
65
- prompt=full_prompt,
66
- max_new_tokens=1024, # Flan-T5 can generate a lot, let's give it space
67
- temperature=0.7,
68
- do_sample=True,
69
- )
70
- # --- END OF KEY CHANGE ---
71
-
72
- # The result is a direct string, so we just need to strip it.
73
- dot_code = dot_code.strip()
74
-
75
- # Sometimes the model still adds markdown, let's strip it just in case
76
- if dot_code.startswith("```dot"):
77
- dot_code = dot_code[len("```dot"):].strip()
78
- if dot_code.startswith("```"):
79
- dot_code = dot_code[len("```"):].strip()
80
- if dot_code.endswith("```"):
81
- dot_code = dot_code[:-len("```")].strip()
82
-
83
- # 2. Render the DOT code using Graphviz
84
  graph = graphviz.Source(dot_code)
85
  output_path = graph.render(os.path.join("outputs", "flowchart"), format='png', cleanup=True)
86
 
@@ -88,11 +80,12 @@ def generate_flowchart(prompt: str, hf_token: str):
88
 
89
  except Exception as e:
90
  print(f"An error occurred: {e}")
91
- error_message = f"An error occurred.\nThis could be due to an invalid API token,\nan issue with the AI model, or invalid generated DOT code.\n\nDetails: {str(e)}"
92
  return create_placeholder_image(error_message), gr.update(visible=False)
93
 
94
- # --- Gradio UI ---
95
- # (The entire Gradio UI block remains unchanged)
 
96
  css = """
97
  footer {display: none !important}
98
  .gradio-container {background-color: #f8f9fa}
@@ -100,13 +93,9 @@ footer {display: none !important}
100
  """
101
 
102
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
103
- hf_token = os.environ.get("HF_TOKEN")
104
  gr.Markdown("# AI Flowchart Generator")
105
  gr.Markdown(
106
- "Our AI Flowchart Generator allows you to create detailed flowcharts instantly. Whether you need a free "
107
- "online AI flowchart generator from text or an intuitive flowchart maker AI, this tool delivers accurate and "
108
- "visually engaging results. Discover how AI can enhance your workflow with the best flow chart generator "
109
- "AI solution available online."
110
  )
111
  with gr.Group():
112
  with gr.Row(equal_height=False):
@@ -121,21 +110,17 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
121
  status_display = gr.Markdown("", elem_id="status_display")
122
  with gr.Column(scale=1):
123
  output_image = gr.Image(
124
- label="Generated Flowchart",
125
- type="filepath",
126
- interactive=False,
127
- value=create_placeholder_image(),
128
- height=600,
129
- show_label=False
130
  )
131
  download_btn = gr.DownloadButton(
132
- "⬇️ Download",
133
- variant="primary",
134
- visible=False,
135
  )
136
- def on_generate_click(prompt):
137
- yield (gr.update(interactive=False), gr.update(visible=False), create_placeholder_image("🧠 Generating with AI... Please wait."), "Generating... this can take up to 30 seconds.")
138
- img_path, download_btn_update = generate_flowchart(prompt, hf_token)
 
 
139
  yield (gr.update(interactive=True), download_btn_update, img_path, "")
140
 
141
  generate_btn.click(
@@ -145,6 +130,5 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
145
  )
146
 
147
  if __name__ == "__main__":
148
- if not os.path.exists("outputs"):
149
- os.makedirs("outputs")
150
  demo.launch()
 
1
  import gradio as gr
2
  import graphviz
3
  import os
4
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
5
  from PIL import Image, ImageDraw, ImageFont
6
+ import torch
7
 
8
+ # --- 1. MODEL LOADING (LOCALLY INSIDE THE SPACE) ---
9
+ # No more Inference API! We are loading the model directly.
10
+ print("--- Initializing Local Model ---")
11
+ MODEL_ID = "google/flan-t5-base" # A powerful model small enough to run on a free CPU Space
12
 
13
+ # Check for device
14
+ DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
15
+ print(f"--- Using device: {DEVICE} ---")
16
+
17
+ # Load the tokenizer and model from the Hub into the Space's memory
18
+ tokenizer = T5Tokenizer.from_pretrained(MODEL_ID)
19
+ model = T5ForConditionalGeneration.from_pretrained(MODEL_ID).to(DEVICE)
20
+ print(f"--- Model {MODEL_ID} Initialized Successfully ---")
21
+
22
+
23
+ # --- 2. SETUP ---
24
+ # The prompt template for our instruction-tuned model
25
  SYSTEM_PROMPT_TEMPLATE = """Task: Generate a flowchart description in the Graphviz DOT language based on the following text.
26
  Your response MUST be ONLY the Graphviz DOT language source code for a directed graph (digraph).
27
  - The graph should be top-to-bottom (`rankdir=TB`).
 
33
 
34
  DOT Language Code:"""
35
 
36
+ # --- Helper function for placeholder images
37
  def create_placeholder_image(text="Flowchart will be generated here", size=(600, 800), path="placeholder.png"):
38
  # (This function remains unchanged)
39
  try:
40
  img = Image.new('RGB', size, color=(255, 255, 255))
41
  draw = ImageDraw.Draw(img)
42
+ try: font = ImageFont.truetype("DejaVuSans.ttf", 24)
43
+ except IOError: font = ImageFont.load_default()
 
 
 
44
  bbox = draw.textbbox((0, 0), text, font=font)
45
+ text_width, text_height = bbox[2] - bbox[0], bbox[3] - bbox[1]
 
46
  position = ((size[0] - text_width) / 2, (size[1] - text_height) / 2)
47
  draw.text(position, text, fill=(200, 200, 200), font=font)
48
  img.save(path)
49
  return path
50
+ except Exception: return None
 
51
 
52
+
53
+ # --- 3. CORE AI AND RENDERING LOGIC ---
54
+ def generate_flowchart(prompt: str):
55
  """
56
+ Generates a flowchart using the LOCALLY loaded model. No API token is needed.
 
57
  """
 
 
 
58
  if not prompt:
59
  return create_placeholder_image("Please enter a prompt to generate a flowchart."), None
60
 
61
  try:
62
+ # 1. Prepare the full prompt and tokenize it
63
  full_prompt = SYSTEM_PROMPT_TEMPLATE.format(user_prompt=prompt)
64
+ inputs = tokenizer(full_prompt, return_tensors="pt").input_ids.to(DEVICE)
65
 
66
+ # 2. Generate the output from the local model
67
+ outputs = model.generate(inputs, max_new_tokens=1024, temperature=0.8, do_sample=True)
68
+ dot_code = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
69
+
70
+ # 3. Clean up the generated code
71
+ if dot_code.startswith("```dot"): dot_code = dot_code[len("```dot"):].strip()
72
+ if dot_code.startswith("```"): dot_code = dot_code[len("```"):].strip()
73
+ if dot_code.endswith("```"): dot_code = dot_code[:-len("```")].strip()
74
+
75
+ # 4. Render the DOT code using Graphviz
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
  graph = graphviz.Source(dot_code)
77
  output_path = graph.render(os.path.join("outputs", "flowchart"), format='png', cleanup=True)
78
 
 
80
 
81
  except Exception as e:
82
  print(f"An error occurred: {e}")
83
+ error_message = f"An error occurred during generation.\nThe AI might have produced invalid flowchart code, or another issue occurred.\n\nDetails: {str(e)}"
84
  return create_placeholder_image(error_message), gr.update(visible=False)
85
 
86
+
87
+ # --- 4. GRADIO UI ---
88
+ # (The Gradio UI block remains mostly unchanged, just removing the token logic)
89
  css = """
90
  footer {display: none !important}
91
  .gradio-container {background-color: #f8f9fa}
 
93
  """
94
 
95
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
 
96
  gr.Markdown("# AI Flowchart Generator")
97
  gr.Markdown(
98
+ "Our AI Flowchart Generator allows you to create detailed flowcharts instantly. This version runs a self-contained model directly in this Space."
 
 
 
99
  )
100
  with gr.Group():
101
  with gr.Row(equal_height=False):
 
110
  status_display = gr.Markdown("", elem_id="status_display")
111
  with gr.Column(scale=1):
112
  output_image = gr.Image(
113
+ label="Generated Flowchart", type="filepath", interactive=False,
114
+ value=create_placeholder_image(), height=600, show_label=False
 
 
 
 
115
  )
116
  download_btn = gr.DownloadButton(
117
+ "⬇️ Download", variant="primary", visible=False,
 
 
118
  )
119
+
120
+ def on_generate_click(prompt, progress=gr.Progress(track_tqdm=True)):
121
+ yield (gr.update(interactive=False), gr.update(visible=False), create_placeholder_image("🧠 Thinking... Please wait."), "Generating...")
122
+ # Note: The 'hf_token' is no longer passed here
123
+ img_path, download_btn_update = generate_flowchart(prompt)
124
  yield (gr.update(interactive=True), download_btn_update, img_path, "")
125
 
126
  generate_btn.click(
 
130
  )
131
 
132
  if __name__ == "__main__":
133
+ if not os.path.exists("outputs"): os.makedirs("outputs")
 
134
  demo.launch()