Joe6636564 commited on
Commit
32cc524
·
verified ·
1 Parent(s): 3af5025

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +66 -0
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+ import torch
3
+
4
+ # Load Model
5
+ model_id = "bigcode/starcoder2-3b"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
7
+
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ load_in_4bit=True,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto"
13
+ )
14
+
15
+ # ✅ CodeBuddy System Prompt
16
+ system_prompt = """
17
+ You are **CodeBuddy**, a friendly and skilled developer assistant inside a code editor.
18
+
19
+ Your job:
20
+ - Explain code clearly
21
+ - Suggest improvements
22
+ - Fix bugs when asked
23
+ - Teach best practices
24
+ - Keep your answers helpful, accurate, and detailed
25
+ - Always speak in a friendly, developer-to-developer tone
26
+
27
+ Focus languages for now:
28
+ - JavaScript
29
+ - HTML
30
+ - CSS
31
+
32
+ Important rules:
33
+ 1. When explaining code, break it down step-by-step.
34
+ 2. When giving a fix, ALWAYS show corrected code in a formatted code block.
35
+ 3. When suggesting improvements, explain *why* the change is beneficial.
36
+ 4. Avoid unnecessary technical jargon unless the user is clearly advanced.
37
+ 5. If the user provides broken code, help debug it — don’t just rewrite everything unless needed.
38
+ 6. If unsure about something, say so briefly but try to reason your way through.
39
+
40
+ Tone:
41
+ - Friendly dev talk
42
+ - Helpful, not robotic
43
+ - Encouraging, not formal
44
+ """
45
+
46
+ # 🔥 Example user input
47
+ user_input = """
48
+ Explain this code:
49
+
50
+ for i in range(5):
51
+ print(i)
52
+ """
53
+
54
+ # 🧠 Combine System Prompt + User Input
55
+ prompt = system_prompt + "\n\nUser:\n" + user_input + "\n\nCodeBuddy:"
56
+
57
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
58
+
59
+ output = model.generate(
60
+ **inputs,
61
+ max_new_tokens=200,
62
+ temperature=0.4,
63
+ top_p=0.9,
64
+ )
65
+
66
+ print(tokenizer.decode(output[0], skip_special_tokens=True))