nihardon commited on
Commit
43200d9
·
verified ·
1 Parent(s): 6d28ada

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -44
app.py CHANGED
@@ -1,67 +1,42 @@
1
  import gradio as gr
2
- from peft import PeftModel
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- import torch
5
 
6
- # --- CONFIGURATION ---
7
- # The base model
8
- BASE_MODEL = "unsloth/llama-3-8b"
9
- # Fine-tuned adapter
10
- ADAPTER_MODEL = "nihardon/fine-tuned-unit-test-generator"
11
-
12
- print(f"Loading {ADAPTER_MODEL} on CPU... (This might take a minute)")
13
-
14
- # Load tokenizer
15
- tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
16
-
17
- # Load model (CPU optimized loading)
18
- model = AutoModelForCausalLM.from_pretrained(
19
- BASE_MODEL,
20
- device_map="cpu",
21
- torch_dtype=torch.float32,
22
- low_cpu_mem_usage=True
23
  )
24
 
25
- # Apply fine-tuned adapters
26
- model = PeftModel.from_pretrained(model, ADAPTER_MODEL)
 
 
 
 
27
 
28
  def generate_test(user_code):
29
- alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
30
 
31
  ### Instruction:
32
  You are an expert Python QA engineer. Write a pytest unit test for the following function.
33
 
34
  ### Input:
35
- {}
36
 
37
  ### Response:
38
  """
39
- # Format and tokenize
40
- prompt = alpaca_prompt.format(user_code)
41
- inputs = tokenizer(prompt, return_tensors="pt")
42
-
43
- # Generate
44
- with torch.no_grad():
45
- outputs = model.generate(
46
- **inputs,
47
- max_new_tokens=256,
48
- use_cache=True,
49
- temperature=0.1
50
- )
51
-
52
- # Decode
53
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
54
- return response.split("### Response:")[-1].strip()
55
 
56
- # UI
57
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
58
  gr.Markdown("# 🧪 AI Unit Test Generator")
59
- gr.Markdown(f"**Model:** {ADAPTER_MODEL} (Llama-3 Fine-Tune) | **Status:** Running on CPU")
60
- gr.Markdown("Paste your Python function below, and the AI will write a Pytest case for it.")
61
 
62
  with gr.Row():
63
  with gr.Column():
64
- input_box = gr.Code(language="python", label="Paste Python Function Here", value="def add(a, b):\n return a + b")
65
  btn = gr.Button("Generate Pytest", variant="primary")
66
  with gr.Column():
67
  output_box = gr.Code(language="python", label="Generated Test Case")
 
1
  import gradio as gr
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
 
4
 
5
+ # 1. Download your specific GGUF file
6
+ model_path = hf_hub_download(
7
+ repo_id="nihardon/fine-tuned-unit-test-generator",
8
+ filename="llama-3-8b.Q4_K_M.gguf", # <--- DOUBLE CHECK THIS NAME!
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  )
10
 
11
+ # 2. Load the Model (CPU Optimized)
12
+ llm = Llama(
13
+ model_path=model_path,
14
+ n_ctx=2048,
15
+ n_threads=2,
16
+ )
17
 
18
  def generate_test(user_code):
19
+ prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
20
 
21
  ### Instruction:
22
  You are an expert Python QA engineer. Write a pytest unit test for the following function.
23
 
24
  ### Input:
25
+ {user_code}
26
 
27
  ### Response:
28
  """
29
+ output = llm(prompt, max_tokens=256, stop=["### Instruction:"], echo=False)
30
+ return output['choices'][0]['text'].strip()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ # 3. The UI
33
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
34
  gr.Markdown("# 🧪 AI Unit Test Generator")
35
+ gr.Markdown("**Model:** Custom Fine-Tuned Llama-3 (GGUF) | **Status:** Running Locally")
 
36
 
37
  with gr.Row():
38
  with gr.Column():
39
+ input_box = gr.Code(language="python", value="def add(a, b):\n return a + b", label="Function")
40
  btn = gr.Button("Generate Pytest", variant="primary")
41
  with gr.Column():
42
  output_box = gr.Code(language="python", label="Generated Test Case")