Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
## Deploying on HuggingFace
|
| 2 |
-
|
| 3 |
import streamlit as st
|
| 4 |
import pandas as pd
|
| 5 |
import torch
|
|
@@ -41,7 +41,7 @@ def load_model():
|
|
| 41 |
model, tokenizer = load_model()
|
| 42 |
|
| 43 |
# Prediction function
|
| 44 |
-
|
| 45 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 46 |
|
| 47 |
def get_prediction(prompt):
|
|
@@ -67,7 +67,7 @@ def get_prediction(prompt):
|
|
| 67 |
# Generate output using the model with streaming
|
| 68 |
output = model.generate(
|
| 69 |
inputs["input_ids"], # Use the tokenized input
|
| 70 |
-
max_new_tokens=
|
| 71 |
temperature=0.7, # Control randomness of output
|
| 72 |
top_p=0.95, # Sampling parameter
|
| 73 |
do_sample=True, # Ensure sampling for diverse output
|
|
@@ -139,3 +139,4 @@ with tab2:
|
|
| 139 |
csv_output = df.to_csv(index=False).encode("utf-8")
|
| 140 |
st.download_button("📤 Download Predictions", data=csv_output, file_name="predictions.csv")
|
| 141 |
|
|
|
|
|
|
| 1 |
## Deploying on HuggingFace
|
| 2 |
+
|
| 3 |
import streamlit as st
|
| 4 |
import pandas as pd
|
| 5 |
import torch
|
|
|
|
| 41 |
model, tokenizer = load_model()
|
| 42 |
|
| 43 |
# Prediction function
|
| 44 |
+
|
| 45 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 46 |
|
| 47 |
def get_prediction(prompt):
|
|
|
|
| 67 |
# Generate output using the model with streaming
|
| 68 |
output = model.generate(
|
| 69 |
inputs["input_ids"], # Use the tokenized input
|
| 70 |
+
max_new_tokens=250, # Limit the number of tokens
|
| 71 |
temperature=0.7, # Control randomness of output
|
| 72 |
top_p=0.95, # Sampling parameter
|
| 73 |
do_sample=True, # Ensure sampling for diverse output
|
|
|
|
| 139 |
csv_output = df.to_csv(index=False).encode("utf-8")
|
| 140 |
st.download_button("📤 Download Predictions", data=csv_output, file_name="predictions.csv")
|
| 141 |
|
| 142 |
+
|