gokul-pv commited on
Commit
37e7c47
·
1 Parent(s): 3320d8c

minor changes

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. app.py +2 -2
README.md CHANGED
@@ -1,13 +1,13 @@
1
  ---
2
- title: TeSo
3
- emoji: 😻
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.16.0
8
  app_file: app.py
9
  pinned: false
10
- short_description: Tech stack optimizer
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Code Architect
3
+ emoji: 💻📚
4
  colorFrom: green
5
  colorTo: blue
6
  sdk: gradio
7
  sdk_version: 5.16.0
8
  app_file: app.py
9
  pinned: false
10
+ short_description: Code Architect is a Gen-AI-powered tool designed to analyze a project's current architecture and recommend an optimized tech stack.
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py CHANGED
@@ -3,7 +3,7 @@ import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
4
 
5
  # Initialize model and tokenizer
6
- MODEL_PATH = "gokul-pv/Llama-3.2-1B-Instruct-16bit-TeSO"
7
 
8
  def load_model():
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
@@ -91,7 +91,7 @@ def analyze_architecture(code_input, temperature=1.5, max_tokens=512):
91
  # Create Gradio interface
92
  def create_gradio_interface():
93
  with gr.Blocks() as demo:
94
- gr.Markdown("# Tech Stack Optimizer - TeSO")
95
 
96
  with gr.Row():
97
  with gr.Column():
 
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer
4
 
5
  # Initialize model and tokenizer
6
+ MODEL_PATH = "gokul-pv/Llama-3.2-1B-Instruct-16bit-CodeArchitect"
7
 
8
  def load_model():
9
  tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
 
91
  # Create Gradio interface
92
  def create_gradio_interface():
93
  with gr.Blocks() as demo:
94
+ gr.Markdown("# Code Architect")
95
 
96
  with gr.Row():
97
  with gr.Column():