Spaces:
Sleeping
Sleeping
File size: 3,449 Bytes
75f2ac0 8dfe6c7 75f2ac0 8dfe6c7 75f2ac0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import gradio as gr
from groq import Groq
from transformers import AutoTokenizer
import faiss
import numpy as np
# Authenticate Groq API using the API key stored on Hugging Face
os.environ["GROQ_API_KEY"] = "MY_API_KEY"
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
# Supported Microcontrollers/Processors
devices = {
"Arduino": {
"template": """
// Arduino Syntax Template
void setup() {
// Setup code here, to run once:
}
void loop() {
// Main code here, to run repeatedly:
}
""",
},
"PIC18": {
"template": """
// PIC18 Syntax Template
#include <xc.h>
void main() {
// Initialization code
while(1) {
// Main program loop
}
}
""",
},
"8085": {
"template": """
; 8085 Assembly Language Template
START: MVI A, 00H ; Load Accumulator with 0
OUT PORT1 ; Output data to port
HLT ; Halt program
""",
},
}
# Function to Generate Code
def generate_code(prompt, model="llama-3.3-70b-versatile"):
try:
chat_completion = client.chat.completions.create(
messages=[{"role": "user", "content": prompt}],
model=model,
)
return chat_completion.choices[0].message.content
except Exception as e:
return f"Error during code generation: {str(e)}"
# Function to Chunk and Tokenize Code
def tokenize_and_chunk_code(code, model_name="gpt2", max_length=512):
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokens = tokenizer(code, truncation=True, return_tensors="pt")
chunks = [
tokens.input_ids[0][i : i + max_length]
for i in range(0, len(tokens.input_ids[0]), max_length)
]
return chunks
# Function to Store Code in FAISS
def store_code_in_faiss(chunks):
dimension = 512
embeddings = [np.random.rand(dimension) for _ in chunks] # Replace with actual embeddings
faiss_index = faiss.IndexFlatL2(dimension)
faiss_index.add(np.array(embeddings, dtype=np.float32))
return faiss_index
# Gradio Interface Logic
def code_generator(device, prompt):
if device not in devices:
return f"Error: {device} is not supported."
# Show Syntax Template
syntax_template = devices[device]["template"]
# Generate Code
generated_code = generate_code(f"Write {device} code for: {prompt}")
# Handle if code generation fails
if "Error" in generated_code:
return generated_code # Return the error message if the generation fails
# Chunk, Tokenize, and Store Code in FAISS
chunks = tokenize_and_chunk_code(generated_code)
store_code_in_faiss(chunks)
return f"### Syntax Template:\n{syntax_template}\n\n### Generated Code:\n{generated_code}"
# Gradio UI
device_dropdown = gr.Dropdown(choices=["Arduino", "PIC18", "8085"], label="Select Microcontroller/Processor")
prompt_input = gr.Textbox(label="Enter Your Code Requirements")
output_display = gr.Textbox(label="Generated Code", lines=20)
interface = gr.Interface(
fn=code_generator,
inputs=[device_dropdown, prompt_input],
outputs=output_display,
title="Microcontroller Code Generator",
description="Generate code for Arduino, PIC18, or 8085 based on your requirements.",
)
# Run Gradio App
if __name__ == "__main__":
interface.launch()
|