kdevoe commited on
Commit
9a5e669
·
verified ·
1 Parent(s): aa34841

Adding memory usage widget

Browse files
Files changed (1) hide show
  1. app.py +47 -15
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 
 
3
 
4
  # Load the shared tokenizer (can be reused across all models)
5
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
@@ -28,6 +30,20 @@ def load_model(model_name):
28
 
29
  return current_model
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  def respond(
32
  message,
33
  history: list[tuple[str, str]],
@@ -61,20 +77,36 @@ def respond(
61
  response = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
62
  yield response
63
 
64
- # Define the Gradio interface
65
- demo = gr.ChatInterface(
66
- respond,
67
- additional_inputs=[
68
- gr.Dropdown(
69
- choices=["Flan-T5-small", "Flan-T5-base", "Flan-T5-large", "Flan-T5-XL"],
70
- value="Flan-T5-base", # Default selection
71
- label="Model"
72
- ),
73
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
74
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
75
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
76
- ],
77
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  if __name__ == "__main__":
80
- demo.launch()
 
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
+ import psutil # For tracking CPU memory usage
4
+ import torch # For tracking GPU memory usage
5
 
6
  # Load the shared tokenizer (can be reused across all models)
7
  tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
 
30
 
31
  return current_model
32
 
33
+ def get_memory_usage():
34
+ """Return current CPU and GPU memory usage as a formatted string."""
35
+ memory_info = psutil.virtual_memory()
36
+ cpu_memory = f"CPU Memory: {memory_info.used / (1024**3):.2f} GB / {memory_info.total / (1024**3):.2f} GB"
37
+
38
+ if torch.cuda.is_available():
39
+ gpu_memory = torch.cuda.memory_allocated() / (1024**3)
40
+ gpu_total = torch.cuda.get_device_properties(0).total_memory / (1024**3)
41
+ gpu_memory_info = f" | GPU Memory: {gpu_memory:.2f} GB / {gpu_total:.2f} GB"
42
+ else:
43
+ gpu_memory_info = " | GPU Memory: Not available"
44
+
45
+ return cpu_memory + gpu_memory_info
46
+
47
  def respond(
48
  message,
49
  history: list[tuple[str, str]],
 
77
  response = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
78
  yield response
79
 
80
+ # Define the Gradio interface with memory usage widget
81
+ def update_memory_widget():
82
+ """Update the memory usage widget dynamically."""
83
+ return get_memory_usage()
84
+
85
+ with gr.Blocks() as interface:
86
+ gr.Markdown("### Model Selection and Memory Usage")
87
+
88
+ # Render the main chat interface
89
+ demo = gr.ChatInterface(
90
+ respond,
91
+ additional_inputs=[
92
+ gr.Dropdown(
93
+ choices=["Flan-T5-small", "Flan-T5-base", "Flan-T5-large", "Flan-T5-XL"],
94
+ value="Flan-T5-base", # Default selection
95
+ label="Model"
96
+ ),
97
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
98
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
99
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
100
+ ],
101
+ )
102
+ demo.render()
103
+
104
+ # Add the memory usage widget
105
+ memory_widget = gr.Textbox(label="Memory Usage", interactive=False, value=get_memory_usage())
106
+ gr.Row([memory_widget])
107
+
108
+ # Set up a timer to update memory usage every second
109
+ interface.load(update_memory_widget, None, memory_widget, every=1)
110
 
111
  if __name__ == "__main__":
112
+ interface.launch()