File size: 1,280 Bytes
721ae99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

class ZewAI3:
    def __init__(self, model_name="microsoft/phi-2"):
        # We use phi-2 as a base because it's tiny but "super good" at coding
        print(f"Initializing ZewAI 3 based on {model_name}...")
        self.tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
        self.model = AutoModelForCausalLM.from_pretrained(
            model_name, 
            torch_dtype=torch.float32, 
            trust_remote_code=True
        )
        self.pipe = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer)

    def generate_code(self, prompt, max_length=512):
        # Specific formatting to help the AI focus on coding logic
        formatted_prompt = f"Instruct: Write the following code: {prompt}\nOutput:"
        
        results = self.pipe(
            formatted_prompt, 
            max_new_tokens=max_length, 
            do_sample=True, 
            temperature=0.7
        )
        return results[0]['generated_text']

# Example usage for the editor:
if __name__ == "__main__":
    zew_model = ZewAI3()
    test_prompt = "Create a single-file HTML app with a dark mode toggle."
    print(zew_model.generate_code(test_prompt))