Update app.py
Browse files
app.py
CHANGED
|
@@ -411,41 +411,72 @@ def style_metric_container(label, value):
|
|
| 411 |
</div>
|
| 412 |
""", unsafe_allow_html=True)
|
| 413 |
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
| 417 |
-
|
| 418 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 419 |
|
| 420 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 421 |
system_message = {
|
| 422 |
"role": "system",
|
| 423 |
"content": (
|
| 424 |
-
"You are a helpful car shopping assistant. "
|
| 425 |
-
"
|
| 426 |
-
"Include car makes, models, years, and approximate prices. "
|
| 427 |
-
"Be friendly and informative."
|
| 428 |
)
|
| 429 |
}
|
| 430 |
-
|
| 431 |
messages = [system_message, {"role": "user", "content": prompt}]
|
| 432 |
-
|
| 433 |
-
# Call the OpenAI ChatCompletion API
|
| 434 |
response = openai.ChatCompletion.create(
|
| 435 |
-
model="gpt-3.5-turbo",
|
| 436 |
messages=messages,
|
| 437 |
max_tokens=500,
|
| 438 |
-
n=1,
|
| 439 |
-
stop=None,
|
| 440 |
temperature=0.7,
|
| 441 |
)
|
|
|
|
|
|
|
| 442 |
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
return assistant_reply
|
| 447 |
-
|
| 448 |
-
def create_assistant_section():
|
| 449 |
st.markdown("""
|
| 450 |
<div style='background-color: #f8f9fa; padding: 1.5rem; border-radius: 10px; margin-bottom: 1rem;'>
|
| 451 |
<h2 style='color: #1E1E1E; margin-top: 0;'>🤖 Car Shopping Assistant</h2>
|
|
@@ -456,13 +487,12 @@ def create_assistant_section():
|
|
| 456 |
if "assistant_responses" not in st.session_state:
|
| 457 |
st.session_state.assistant_responses = []
|
| 458 |
|
| 459 |
-
prompt = st.text_input("Ask about car recommendations...",
|
| 460 |
placeholder="Type your question here...")
|
| 461 |
|
| 462 |
if prompt:
|
| 463 |
try:
|
| 464 |
-
|
| 465 |
-
response = generate_gpt_response(prompt)
|
| 466 |
st.session_state.assistant_responses.append(response)
|
| 467 |
except Exception as e:
|
| 468 |
response = f"Sorry, I encountered an error: {str(e)}"
|
|
@@ -481,7 +511,6 @@ def create_assistant_section():
|
|
| 481 |
if st.button("Clear Chat"):
|
| 482 |
st.session_state.assistant_responses = []
|
| 483 |
st.experimental_rerun()
|
| 484 |
-
|
| 485 |
# --- Prediction Interface ---
|
| 486 |
def create_prediction_interface():
|
| 487 |
with st.sidebar:
|
|
@@ -633,7 +662,7 @@ def predict_with_ranges(inputs, model, label_encoders):
|
|
| 633 |
'max_price': max_price
|
| 634 |
}
|
| 635 |
# --- Main Application ---
|
| 636 |
-
def main(model, label_encoders):
|
| 637 |
col1, col2 = st.columns([2, 1])
|
| 638 |
|
| 639 |
with col1:
|
|
@@ -646,46 +675,25 @@ def main(model, label_encoders):
|
|
| 646 |
|
| 647 |
inputs, predict_button = create_prediction_interface()
|
| 648 |
|
| 649 |
-
# Prepare base inputs
|
| 650 |
-
base_inputs = {
|
| 651 |
-
"year": inputs.get("year", 2022),
|
| 652 |
-
"make": inputs.get("make", "toyota").lower(),
|
| 653 |
-
"model": inputs.get("model", "camry"),
|
| 654 |
-
"odometer": inputs.get("odometer", 20000),
|
| 655 |
-
"condition": inputs.get("condition", "good"),
|
| 656 |
-
"fuel": inputs.get("fuel", "gas"),
|
| 657 |
-
"title_status": inputs.get("title_status", "clean"),
|
| 658 |
-
"transmission": inputs.get("transmission", "automatic"),
|
| 659 |
-
"drive": inputs.get("drive", "fwd"),
|
| 660 |
-
"size": inputs.get("size", "mid-size"),
|
| 661 |
-
"paint_color": inputs.get("paint_color", "black"),
|
| 662 |
-
"type": inputs.get("type", "sedan")
|
| 663 |
-
}
|
| 664 |
-
|
| 665 |
-
if base_inputs["condition"] == "new":
|
| 666 |
-
base_inputs["odometer"] = 0
|
| 667 |
-
|
| 668 |
if predict_button:
|
| 669 |
-
st.write(f"Analyzing {
|
| 670 |
-
prediction_results = predict_with_ranges(
|
| 671 |
|
| 672 |
st.markdown(f"""
|
| 673 |
### Price Analysis
|
| 674 |
- **Estimated Range**: ${prediction_results['min_price']:,.2f} - ${prediction_results['max_price']:,.2f}
|
| 675 |
- **Model Prediction**: ${prediction_results['predicted_price']:,.2f}
|
| 676 |
-
|
| 677 |
-
*Note: Range based on market data, condition, and mileage*
|
| 678 |
""")
|
| 679 |
|
| 680 |
# Generate and display the graph
|
| 681 |
-
fig = create_market_trends_plot_with_model(model,
|
| 682 |
if fig:
|
| 683 |
st.pyplot(fig)
|
| 684 |
else:
|
| 685 |
st.warning("No graph generated. Please check your data or selection.")
|
| 686 |
|
| 687 |
with col2:
|
| 688 |
-
create_assistant_section()
|
| 689 |
|
| 690 |
if __name__ == "__main__":
|
| 691 |
try:
|
|
@@ -693,11 +701,8 @@ if __name__ == "__main__":
|
|
| 693 |
original_data = load_datasets()
|
| 694 |
model, label_encoders = load_model_and_encodings()
|
| 695 |
|
| 696 |
-
# Inspect model features
|
| 697 |
-
inspect_model_features(model)
|
| 698 |
-
|
| 699 |
# Call the main function
|
| 700 |
-
main(model, label_encoders)
|
| 701 |
except Exception as e:
|
| 702 |
st.error(f"Error loading data or models: {str(e)}")
|
| 703 |
-
st.stop()
|
|
|
|
| 411 |
</div>
|
| 412 |
""", unsafe_allow_html=True)
|
| 413 |
|
| 414 |
+
def search_dataset(dataset, make, model=None):
|
| 415 |
+
"""
|
| 416 |
+
Search the dataset for the specified make and model. If no model is provided,
|
| 417 |
+
search by make only. Return relevant information if found.
|
| 418 |
+
"""
|
| 419 |
+
# Filter by make and model
|
| 420 |
+
query = dataset[dataset['Make'].str.lower() == make.lower()]
|
| 421 |
+
if model:
|
| 422 |
+
query = query[query['Model'].str.lower() == model.lower()]
|
| 423 |
+
|
| 424 |
+
if not query.empty:
|
| 425 |
+
# If matching rows exist, return a formatted response
|
| 426 |
+
results = query[['Year', 'Make', 'Model', 'Price']].head(5) # Adjust columns as needed
|
| 427 |
+
return results
|
| 428 |
+
else:
|
| 429 |
+
# No relevant data found in the dataset
|
| 430 |
+
return None
|
| 431 |
|
| 432 |
+
# --- Updated GPT Functionality ---
|
| 433 |
+
def generate_gpt_response(prompt, dataset):
|
| 434 |
+
"""
|
| 435 |
+
First look up the dataset for relevant information. If no matches are found,
|
| 436 |
+
generate a GPT response.
|
| 437 |
+
"""
|
| 438 |
+
# Extract make and model from the prompt (simplified NLP parsing)
|
| 439 |
+
prompt_lower = prompt.lower()
|
| 440 |
+
make = None
|
| 441 |
+
model = None
|
| 442 |
+
|
| 443 |
+
# Example: Parse make and model from user query
|
| 444 |
+
for word in prompt_lower.split():
|
| 445 |
+
if word in dataset['Make'].str.lower().unique():
|
| 446 |
+
make = word
|
| 447 |
+
elif word in dataset['Model'].str.lower().unique():
|
| 448 |
+
model = word
|
| 449 |
+
|
| 450 |
+
# If we find relevant data, use it to respond
|
| 451 |
+
if make:
|
| 452 |
+
dataset_response = search_dataset(dataset, make, model)
|
| 453 |
+
if dataset_response is not None:
|
| 454 |
+
st.write("### Dataset Match Found")
|
| 455 |
+
st.dataframe(dataset_response) # Show results to the user
|
| 456 |
+
return f"I found some information in our dataset about {make.title()} {model.title() if model else ''}. Please see the details above."
|
| 457 |
+
|
| 458 |
+
# If no match is found, fall back to GPT response
|
| 459 |
+
openai.api_key = "sk-your-api-key" # Ensure the API key is set
|
| 460 |
system_message = {
|
| 461 |
"role": "system",
|
| 462 |
"content": (
|
| 463 |
+
"You are a helpful car shopping assistant. Provide car recommendations or pricing estimates. "
|
| 464 |
+
"If the dataset lacks information, generate an appropriate response."
|
|
|
|
|
|
|
| 465 |
)
|
| 466 |
}
|
|
|
|
| 467 |
messages = [system_message, {"role": "user", "content": prompt}]
|
| 468 |
+
|
|
|
|
| 469 |
response = openai.ChatCompletion.create(
|
| 470 |
+
model="gpt-3.5-turbo",
|
| 471 |
messages=messages,
|
| 472 |
max_tokens=500,
|
|
|
|
|
|
|
| 473 |
temperature=0.7,
|
| 474 |
)
|
| 475 |
+
|
| 476 |
+
return response['choices'][0]['message']['content']
|
| 477 |
|
| 478 |
+
# --- Assistant Section ---
|
| 479 |
+
def create_assistant_section(dataset):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 480 |
st.markdown("""
|
| 481 |
<div style='background-color: #f8f9fa; padding: 1.5rem; border-radius: 10px; margin-bottom: 1rem;'>
|
| 482 |
<h2 style='color: #1E1E1E; margin-top: 0;'>🤖 Car Shopping Assistant</h2>
|
|
|
|
| 487 |
if "assistant_responses" not in st.session_state:
|
| 488 |
st.session_state.assistant_responses = []
|
| 489 |
|
| 490 |
+
prompt = st.text_input("Ask about car recommendations or pricing...",
|
| 491 |
placeholder="Type your question here...")
|
| 492 |
|
| 493 |
if prompt:
|
| 494 |
try:
|
| 495 |
+
response = generate_gpt_response(prompt, dataset)
|
|
|
|
| 496 |
st.session_state.assistant_responses.append(response)
|
| 497 |
except Exception as e:
|
| 498 |
response = f"Sorry, I encountered an error: {str(e)}"
|
|
|
|
| 511 |
if st.button("Clear Chat"):
|
| 512 |
st.session_state.assistant_responses = []
|
| 513 |
st.experimental_rerun()
|
|
|
|
| 514 |
# --- Prediction Interface ---
|
| 515 |
def create_prediction_interface():
|
| 516 |
with st.sidebar:
|
|
|
|
| 662 |
'max_price': max_price
|
| 663 |
}
|
| 664 |
# --- Main Application ---
|
| 665 |
+
def main(model, label_encoders, dataset):
|
| 666 |
col1, col2 = st.columns([2, 1])
|
| 667 |
|
| 668 |
with col1:
|
|
|
|
| 675 |
|
| 676 |
inputs, predict_button = create_prediction_interface()
|
| 677 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 678 |
if predict_button:
|
| 679 |
+
st.write(f"Analyzing {inputs['year']} {inputs['make'].title()} {inputs['model'].title()}...")
|
| 680 |
+
prediction_results = predict_with_ranges(inputs, model, label_encoders)
|
| 681 |
|
| 682 |
st.markdown(f"""
|
| 683 |
### Price Analysis
|
| 684 |
- **Estimated Range**: ${prediction_results['min_price']:,.2f} - ${prediction_results['max_price']:,.2f}
|
| 685 |
- **Model Prediction**: ${prediction_results['predicted_price']:,.2f}
|
|
|
|
|
|
|
| 686 |
""")
|
| 687 |
|
| 688 |
# Generate and display the graph
|
| 689 |
+
fig = create_market_trends_plot_with_model(model, inputs["make"], inputs, label_encoders)
|
| 690 |
if fig:
|
| 691 |
st.pyplot(fig)
|
| 692 |
else:
|
| 693 |
st.warning("No graph generated. Please check your data or selection.")
|
| 694 |
|
| 695 |
with col2:
|
| 696 |
+
create_assistant_section(dataset)
|
| 697 |
|
| 698 |
if __name__ == "__main__":
|
| 699 |
try:
|
|
|
|
| 701 |
original_data = load_datasets()
|
| 702 |
model, label_encoders = load_model_and_encodings()
|
| 703 |
|
|
|
|
|
|
|
|
|
|
| 704 |
# Call the main function
|
| 705 |
+
main(model, label_encoders, original_data)
|
| 706 |
except Exception as e:
|
| 707 |
st.error(f"Error loading data or models: {str(e)}")
|
| 708 |
+
st.stop()
|