Rahul-8799 commited on
Commit
9533566
·
verified ·
1 Parent(s): 073c4fc

Upload 7 files

Browse files
src/agents/product_manager_agent.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ MODEL_REPO = "Rahul-8799/product_manager_mistral"
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, device_map="auto", torch_dtype=torch.float16)
8
+ model.eval()
9
+
10
+ def run(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
12
+ with torch.no_grad():
13
+ outputs = model.generate(**inputs, max_new_tokens=512)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
src/agents/project_manager_agent.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ MODEL_REPO = "Rahul-8799/project_manager_gemma3"
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, device_map="auto", torch_dtype=torch.float16)
8
+ model.eval()
9
+
10
+ def run(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
12
+ with torch.no_grad():
13
+ outputs = model.generate(**inputs, max_new_tokens=512)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
src/agents/quality_assurance_agent.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ MODEL_REPO = "Rahul-8799/quality_assurance_stablecode"
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, device_map="auto", torch_dtype=torch.float16)
8
+ model.eval()
9
+
10
+ def run(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
12
+ with torch.no_grad():
13
+ outputs = model.generate(**inputs, max_new_tokens=512)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
src/agents/software_architect_agent.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
2
+ import torch
3
+
4
+ MODEL_REPO = "Rahul-8799/software_architect_command_r"
5
+
6
+ bnb_config = BitsAndBytesConfig(
7
+ load_in_4bit=True,
8
+ bnb_4bit_compute_dtype=torch.float16,
9
+ bnb_4bit_use_double_quant=True,
10
+ bnb_4bit_quant_type="nf4"
11
+ )
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
14
+
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ MODEL_REPO,
17
+ quantization_config=bnb_config,
18
+ device_map={"": 0}, # force full GPU load
19
+ torch_dtype=torch.float16
20
+ )
21
+
22
+ model.eval()
23
+
24
+ def run(prompt):
25
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
26
+ with torch.no_grad():
27
+ outputs = model.generate(**inputs, max_new_tokens=512)
28
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
src/agents/software_engineer_agent.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ MODEL_REPO = "Rahul-8799/software_engineer_mellum"
5
+
6
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_REPO)
7
+ model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, device_map="auto", torch_dtype=torch.float16)
8
+ model.eval()
9
+
10
+ def run(prompt):
11
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
12
+ with torch.no_grad():
13
+ outputs = model.generate(**inputs, max_new_tokens=512)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
src/utils/run_pipeline_and_save.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ def run_pipeline_and_save(prompt):
5
+ os.makedirs("output", exist_ok=True)
6
+
7
+ # Simulate agent conversation
8
+ conversation = [
9
+ {"Product Manager": "Turn the idea into a basic spec."},
10
+ {"Project Manager": "Planned a timeline with 3 milestones."},
11
+ {"Software Architect": "Created folder structure and basic APIs."},
12
+ {"Software Engineer": "<html><body><h1>Hello World</h1></body></html>"},
13
+ {"Quality Assurance": "HTML renders correctly. No issues found."}
14
+ ]
15
+
16
+ fake_html = "<html><body><h1>Hello World</h1></body></html>"
17
+
18
+ with open("output/generated_ui.html", "w") as f:
19
+ f.write(fake_html)
20
+
21
+ with open("output/agent_log.json", "w") as f:
22
+ json.dump(conversation, f, indent=2)
23
+
24
+ return conversation, fake_html
src/utils/zip_output.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import zipfile
2
+ import os
3
+
4
+ def zip_output():
5
+ zip_path = "output_bundle.zip"
6
+ with zipfile.ZipFile(zip_path, "w") as zipf:
7
+ for root, dirs, files in os.walk("output"):
8
+ for file in files:
9
+ file_path = os.path.join(root, file)
10
+ arcname = os.path.relpath(file_path, "output")
11
+ zipf.write(file_path, arcname=os.path.join("output", arcname))
12
+ return zip_path