sunnynazir commited on
Commit
ac054fb
·
verified ·
1 Parent(s): 4866f9e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -0
app.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from groq import Groq
4
+ from transformers import AutoTokenizer
5
+ import faiss
6
+ import numpy as np
7
+
8
+ # Set the Hugging Face API Key
9
+ os.environ["GROQ_API_KEY"] = "MY_API_KEY" # Replace with your Hugging Face API Key
10
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
11
+
12
+ # Supported Microcontrollers/Processors
13
+ devices = {
14
+ "Arduino": {
15
+ "template": """
16
+ // Arduino Syntax Template
17
+ void setup() {
18
+ // Setup code here, to run once:
19
+ }
20
+
21
+ void loop() {
22
+ // Main code here, to run repeatedly:
23
+ }
24
+ """,
25
+ },
26
+ "PIC18": {
27
+ "template": """
28
+ // PIC18 Syntax Template
29
+ #include <xc.h>
30
+ void main() {
31
+ // Initialization code
32
+ while(1) {
33
+ // Main program loop
34
+ }
35
+ }
36
+ """,
37
+ },
38
+ "8085": {
39
+ "template": """
40
+ ; 8085 Assembly Language Template
41
+ START: MVI A, 00H ; Load Accumulator with 0
42
+ OUT PORT1 ; Output data to port
43
+ HLT ; Halt program
44
+ """,
45
+ },
46
+ }
47
+
48
+ # Function to Generate Code using Groq API
49
+ def generate_code(prompt, model="llama-3.3-70b-versatile"):
50
+ chat_completion = client.chat.completions.create(
51
+ messages=[{"role": "user", "content": prompt}],
52
+ model=model,
53
+ )
54
+ return chat_completion.choices[0].message.content
55
+
56
+ # Function to Chunk and Tokenize Code
57
+ def tokenize_and_chunk_code(code, model_name="gpt2", max_length=512):
58
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
59
+ tokens = tokenizer(code, truncation=True, return_tensors="pt")
60
+ chunks = [
61
+ tokens.input_ids[0][i : i + max_length]
62
+ for i in range(0, len(tokens.input_ids[0]), max_length)
63
+ ]
64
+ return chunks
65
+
66
+ # Function to Store Code in FAISS
67
+ def store_code_in_faiss(chunks):
68
+ dimension = 512
69
+ embeddings = [np.random.rand(dimension) for _ in chunks] # Replace with actual embeddings
70
+ faiss_index = faiss.IndexFlatL2(dimension)
71
+ faiss_index.add(np.array(embeddings, dtype=np.float32))
72
+ return faiss_index
73
+
74
+ # Streamlit App
75
+ def main():
76
+ st.title("Microcontroller Code Generator")
77
+ st.write("Select the microcontroller and provide code requirements.")
78
+
79
+ # Step 1: Ask User for Microcontroller/Processor Selection
80
+ device = st.selectbox("Select a microcontroller/processor", options=["Arduino", "PIC18", "8085"])
81
+
82
+ # Step 2: Show Syntax Template
83
+ st.subheader("Coding Syntax Template")
84
+ st.code(devices[device]["template"])
85
+
86
+ # Step 3: Get User Prompt and Generate Code
87
+ prompt = st.text_input("Enter your code requirements:")
88
+
89
+ if prompt:
90
+ st.write("Generating code...")
91
+ code = generate_code(f"Write {device} code for: {prompt}")
92
+
93
+ # Step 4: Chunk, Tokenize, and Store Code in FAISS
94
+ chunks = tokenize_and_chunk_code(code)
95
+ store_code_in_faiss(chunks)
96
+
97
+ # Step 5: Display Generated Code
98
+ st.subheader("Generated Code")
99
+ st.code(code)
100
+
101
+ # Run the Streamlit App
102
+ if __name__ == "__main__":
103
+ main()