sunnynazir commited on
Commit
75f2ac0
·
verified ·
1 Parent(s): a2ae6d0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from groq import Groq
4
+ from transformers import AutoTokenizer
5
+ import faiss
6
+ import numpy as np
7
+
8
+ # Authenticate Groq API using the API key stored on Hugging Face
9
+ os.environ["GROQ_API_KEY"] = "MY_API_KEY"
10
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
11
+
12
+ # Supported Microcontrollers/Processors
13
+ devices = {
14
+ "Arduino": {
15
+ "template": """
16
+ // Arduino Syntax Template
17
+ void setup() {
18
+ // Setup code here, to run once:
19
+ }
20
+
21
+ void loop() {
22
+ // Main code here, to run repeatedly:
23
+ }
24
+ """,
25
+ },
26
+ "PIC18": {
27
+ "template": """
28
+ // PIC18 Syntax Template
29
+ #include <xc.h>
30
+ void main() {
31
+ // Initialization code
32
+ while(1) {
33
+ // Main program loop
34
+ }
35
+ }
36
+ """,
37
+ },
38
+ "8085": {
39
+ "template": """
40
+ ; 8085 Assembly Language Template
41
+ START: MVI A, 00H ; Load Accumulator with 0
42
+ OUT PORT1 ; Output data to port
43
+ HLT ; Halt program
44
+ """,
45
+ },
46
+ }
47
+
48
+ # Function to Generate Code
49
+ def generate_code(prompt, model="llama-3.3-70b-versatile"):
50
+ chat_completion = client.chat.completions.create(
51
+ messages=[{"role": "user", "content": prompt}],
52
+ model=model,
53
+ )
54
+ return chat_completion.choices[0].message.content
55
+
56
+ # Function to Chunk and Tokenize Code
57
+ def tokenize_and_chunk_code(code, model_name="gpt2", max_length=512):
58
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
59
+ tokens = tokenizer(code, truncation=True, return_tensors="pt")
60
+ chunks = [
61
+ tokens.input_ids[0][i : i + max_length]
62
+ for i in range(0, len(tokens.input_ids[0]), max_length)
63
+ ]
64
+ return chunks
65
+
66
+ # Function to Store Code in FAISS
67
+ def store_code_in_faiss(chunks):
68
+ dimension = 512
69
+ embeddings = [np.random.rand(dimension) for _ in chunks] # Replace with actual embeddings
70
+ faiss_index = faiss.IndexFlatL2(dimension)
71
+ faiss_index.add(np.array(embeddings, dtype=np.float32))
72
+ return faiss_index
73
+
74
+ # Gradio Interface Logic
75
+ def code_generator(device, prompt):
76
+ if device not in devices:
77
+ return f"Error: {device} is not supported."
78
+
79
+ # Show Syntax Template
80
+ syntax_template = devices[device]["template"]
81
+
82
+ # Generate Code
83
+ generated_code = generate_code(f"Write {device} code for: {prompt}")
84
+
85
+ # Chunk, Tokenize, and Store Code in FAISS
86
+ chunks = tokenize_and_chunk_code(generated_code)
87
+ store_code_in_faiss(chunks)
88
+
89
+ return f"### Syntax Template:\n{syntax_template}\n\n### Generated Code:\n{generated_code}"
90
+
91
+ # Gradio UI
92
+ device_dropdown = gr.Dropdown(choices=["Arduino", "PIC18", "8085"], label="Select Microcontroller/Processor")
93
+ prompt_input = gr.Textbox(label="Enter Your Code Requirements")
94
+ output_display = gr.Textbox(label="Generated Code", lines=20)
95
+
96
+ interface = gr.Interface(
97
+ fn=code_generator,
98
+ inputs=[device_dropdown, prompt_input],
99
+ outputs=output_display,
100
+ title="Microcontroller Code Generator",
101
+ description="Generate code for Arduino, PIC18, or 8085 based on your requirements.",
102
+ )
103
+
104
+ # Run Gradio App
105
+ if __name__ == "__main__":
106
+ interface.launch()