sam133 commited on
Commit
f0febc6
·
1 Parent(s): 2fbee2e

Finalize th rest of the files

Browse files
Files changed (3) hide show
  1. app.py +1 -2
  2. llm_interface_enhanced.py +21 -16
  3. requirements.txt +3 -3
app.py CHANGED
@@ -372,8 +372,7 @@ def create_agent2robot_interface():
372
 
373
  with gr.Row():
374
  current_design_specs_output = gr.JSON(
375
- label="⚙️ Current Design Specs Being Tested",
376
- interactive=False
377
  )
378
 
379
  progress_bar_output = gr.Slider(
 
372
 
373
  with gr.Row():
374
  current_design_specs_output = gr.JSON(
375
+ label="⚙️ Current Design Specs Being Tested"
 
376
  )
377
 
378
  progress_bar_output = gr.Slider(
llm_interface_enhanced.py CHANGED
@@ -1,23 +1,28 @@
1
  import json
2
  import re
3
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
- import torch
5
 
6
  # Initialize the LLM pipeline (using a free model from Hugging Face)
7
- model_name = "microsoft/DialoGPT-medium" # Fallback to a smaller model if needed
8
- try:
9
- # Try to use a more capable model if available
10
- model_name = "microsoft/DialoGPT-large"
11
- tokenizer = AutoTokenizer.from_pretrained(model_name)
12
- model = AutoModelForCausalLM.from_pretrained(model_name)
13
- # Temporarily disable LLM pipeline to use improved fallback logic
14
- llm_pipeline = None # pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
15
- except:
16
- # Fallback to a simpler approach
17
- try:
18
- llm_pipeline = None # pipeline("text-generation", model="gpt2", device=0 if torch.cuda.is_available() else -1)
19
- except:
20
- llm_pipeline = None
 
 
 
 
 
21
 
22
  def generate_initial_robot_design_prompt():
23
  """Generate the initial prompt for LLM robot design"""
 
1
  import json
2
  import re
3
+ # from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
+ # import torch
5
 
6
  # Initialize the LLM pipeline (using a free model from Hugging Face)
7
+ # Commented out to avoid large model downloads during Space startup
8
+ # model_name = "microsoft/DialoGPT-medium" # Fallback to a smaller model if needed
9
+ # try:
10
+ # # Try to use a more capable model if available
11
+ # model_name = "microsoft/DialoGPT-large"
12
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
14
+ # # Temporarily disable LLM pipeline to use improved fallback logic
15
+ # llm_pipeline = None # pipeline("text-generation", model=model, tokenizer=tokenizer, device=0 if torch.cuda.is_available() else -1)
16
+ # except:
17
+ # # Fallback to a simpler approach
18
+ # try:
19
+ # llm_pipeline = None # pipeline("text-generation", model="gpt2", device=0 if torch.cuda.is_available() else -1)
20
+ # except:
21
+ # llm_pipeline = None
22
+
23
+ # Disable local model loading to prevent large downloads during Space startup
24
+ # Using fallback logic instead for demonstration purposes
25
+ llm_pipeline = None
26
 
27
  def generate_initial_robot_design_prompt():
28
  """Generate the initial prompt for LLM robot design"""
requirements.txt CHANGED
@@ -1,8 +1,8 @@
1
  pybullet>=3.2.5
2
- gradio>=4.0.0
3
  imageio>=2.20.0
4
- transformers>=4.21.0
5
- torch>=1.12.0
6
  Pillow>=9.0.0
7
  numpy>=1.21.0
8
  requests>=2.28.0
 
1
  pybullet>=3.2.5
2
+ gradio==4.44.1
3
  imageio>=2.20.0
4
+ # transformers>=4.21.0
5
+ # torch>=1.12.0
6
  Pillow>=9.0.0
7
  numpy>=1.21.0
8
  requests>=2.28.0