Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,9 +5,9 @@ import pandas as pd
|
|
| 5 |
import torch
|
| 6 |
|
| 7 |
# β
Get Hugging Face token securely from Space Secrets
|
| 8 |
-
HUGGINGFACE_TOKEN = os.getenv("HF_TOKEN") #
|
| 9 |
|
| 10 |
-
# β
Load Llama-2 Model & Tokenizer (
|
| 11 |
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HUGGINGFACE_TOKEN)
|
| 13 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -15,25 +15,38 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 15 |
)
|
| 16 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 17 |
|
| 18 |
-
# β
Function to analyze CSV data
|
| 19 |
def analyze_csv(file):
|
| 20 |
df = pd.read_csv(file.name) # Read uploaded CSV
|
| 21 |
-
benchmark_text = df.to_string() # Convert DataFrame to text
|
| 22 |
|
| 23 |
-
|
|
|
|
| 24 |
|
| 25 |
-
#
|
| 26 |
-
|
| 27 |
-
|
| 28 |
|
| 29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
|
| 31 |
# β
Gradio Interface
|
| 32 |
iface = gr.Interface(
|
| 33 |
fn=analyze_csv,
|
| 34 |
inputs=gr.File(label="Upload CSV File"),
|
| 35 |
outputs="text",
|
| 36 |
-
title="Llama-2 CSV
|
| 37 |
)
|
| 38 |
|
| 39 |
iface.launch()
|
|
|
|
| 5 |
import torch
|
| 6 |
|
| 7 |
# β
Get Hugging Face token securely from Space Secrets
|
| 8 |
+
HUGGINGFACE_TOKEN = os.getenv("HF_TOKEN") # Ensure this is set in HF Secrets
|
| 9 |
|
| 10 |
+
# β
Load Llama-2 Model & Tokenizer (optimized)
|
| 11 |
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HUGGINGFACE_TOKEN)
|
| 13 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 15 |
)
|
| 16 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 17 |
|
| 18 |
+
# β
Function to analyze CSV data based on accuracy
|
| 19 |
def analyze_csv(file):
|
| 20 |
df = pd.read_csv(file.name) # Read uploaded CSV
|
|
|
|
| 21 |
|
| 22 |
+
# Identify the dataset with the highest accuracy
|
| 23 |
+
best_model = df.loc[df["Accuracy (%)"].idxmax()] # Find row with max accuracy
|
| 24 |
|
| 25 |
+
# Optimized prompt for accuracy analysis
|
| 26 |
+
prompt = f"""
|
| 27 |
+
Given the following test benchmark results, determine which dataset performed best in terms of accuracy.
|
| 28 |
|
| 29 |
+
Data Summary:
|
| 30 |
+
{df[["Dataset", "Accuracy (%)", "Latency (ms)", "Throughput (samples/sec)", "Memory Usage (GB)", "GPU Utilization (%)"]].to_string(index=False)}
|
| 31 |
+
|
| 32 |
+
- Which test achieved the highest accuracy?
|
| 33 |
+
- What were its corresponding latency, throughput, and resource utilization?
|
| 34 |
+
- Provide a brief recommendation based on the findings.
|
| 35 |
+
|
| 36 |
+
Highlight the most accurate test.
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
# Generate response using Llama-2 (optimized settings)
|
| 40 |
+
output = pipe(prompt, max_length=300, do_sample=True, temperature=0.5)
|
| 41 |
+
|
| 42 |
+
return output[0]['generated_text'] # Return optimized analysis
|
| 43 |
|
| 44 |
# β
Gradio Interface
|
| 45 |
iface = gr.Interface(
|
| 46 |
fn=analyze_csv,
|
| 47 |
inputs=gr.File(label="Upload CSV File"),
|
| 48 |
outputs="text",
|
| 49 |
+
title="Llama-2 Accuracy-Based CSV Analyzer"
|
| 50 |
)
|
| 51 |
|
| 52 |
iface.launch()
|