Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -47,11 +47,14 @@ devices = {
|
|
| 47 |
|
| 48 |
# Function to Generate Code
|
| 49 |
def generate_code(prompt, model="llama-3.3-70b-versatile"):
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
# Function to Chunk and Tokenize Code
|
| 57 |
def tokenize_and_chunk_code(code, model_name="gpt2", max_length=512):
|
|
@@ -82,6 +85,10 @@ def code_generator(device, prompt):
|
|
| 82 |
# Generate Code
|
| 83 |
generated_code = generate_code(f"Write {device} code for: {prompt}")
|
| 84 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
# Chunk, Tokenize, and Store Code in FAISS
|
| 86 |
chunks = tokenize_and_chunk_code(generated_code)
|
| 87 |
store_code_in_faiss(chunks)
|
|
|
|
| 47 |
|
| 48 |
# Function to Generate Code
|
| 49 |
def generate_code(prompt, model="llama-3.3-70b-versatile"):
|
| 50 |
+
try:
|
| 51 |
+
chat_completion = client.chat.completions.create(
|
| 52 |
+
messages=[{"role": "user", "content": prompt}],
|
| 53 |
+
model=model,
|
| 54 |
+
)
|
| 55 |
+
return chat_completion.choices[0].message.content
|
| 56 |
+
except Exception as e:
|
| 57 |
+
return f"Error during code generation: {str(e)}"
|
| 58 |
|
| 59 |
# Function to Chunk and Tokenize Code
|
| 60 |
def tokenize_and_chunk_code(code, model_name="gpt2", max_length=512):
|
|
|
|
| 85 |
# Generate Code
|
| 86 |
generated_code = generate_code(f"Write {device} code for: {prompt}")
|
| 87 |
|
| 88 |
+
# Handle if code generation fails
|
| 89 |
+
if "Error" in generated_code:
|
| 90 |
+
return generated_code # Return the error message if the generation fails
|
| 91 |
+
|
| 92 |
# Chunk, Tokenize, and Store Code in FAISS
|
| 93 |
chunks = tokenize_and_chunk_code(generated_code)
|
| 94 |
store_code_in_faiss(chunks)
|