Faham
commited on
Commit
·
d919881
1
Parent(s):
4b35e49
UPDATE: loading the model from google drive
Browse files- .gitignore +58 -7
- app.py +144 -154
- debug_drive.py +185 -0
- {models → notebooks}/audio_sentiment_analysis.ipynb +0 -0
- {models → notebooks}/vision_sentiment_analysis.ipynb +0 -0
- pyproject.toml +4 -1
- requirements.txt +5 -2
- simple_model_manager.py +410 -0
- test_download.py +49 -0
- test_drive_links.py +96 -0
- uv.lock +226 -0
.gitignore
CHANGED
|
@@ -1,14 +1,65 @@
|
|
| 1 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
__pycache__/
|
| 3 |
-
*.py[
|
|
|
|
|
|
|
|
|
|
| 4 |
build/
|
|
|
|
| 5 |
dist/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
wheels/
|
| 7 |
-
*.egg-info
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# Virtual environments
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Model files
|
| 2 |
+
*.pth
|
| 3 |
+
models/*.pth
|
| 4 |
+
*.pt
|
| 5 |
+
*.pkl
|
| 6 |
+
*.h5
|
| 7 |
+
*.onnx
|
| 8 |
+
*.pb
|
| 9 |
+
|
| 10 |
+
# Environment variables
|
| 11 |
+
.env
|
| 12 |
+
.env.local
|
| 13 |
+
.env.*.local
|
| 14 |
+
|
| 15 |
+
# Python
|
| 16 |
__pycache__/
|
| 17 |
+
*.py[cod]
|
| 18 |
+
*$py.class
|
| 19 |
+
*.so
|
| 20 |
+
.Python
|
| 21 |
build/
|
| 22 |
+
develop-eggs/
|
| 23 |
dist/
|
| 24 |
+
downloads/
|
| 25 |
+
eggs/
|
| 26 |
+
.eggs/
|
| 27 |
+
lib/
|
| 28 |
+
lib64/
|
| 29 |
+
parts/
|
| 30 |
+
sdist/
|
| 31 |
+
var/
|
| 32 |
wheels/
|
| 33 |
+
*.egg-info/
|
| 34 |
+
.installed.cfg
|
| 35 |
+
*.egg
|
| 36 |
+
MANIFEST
|
| 37 |
|
| 38 |
# Virtual environments
|
| 39 |
+
venv/
|
| 40 |
+
env/
|
| 41 |
+
ENV/
|
| 42 |
+
.venv/
|
| 43 |
+
.env/
|
| 44 |
|
| 45 |
+
# IDE
|
| 46 |
+
.vscode/
|
| 47 |
+
.idea/
|
| 48 |
+
*.swp
|
| 49 |
+
*.swo
|
| 50 |
+
*~
|
| 51 |
+
|
| 52 |
+
# OS
|
| 53 |
+
.DS_Store
|
| 54 |
+
Thumbs.db
|
| 55 |
+
desktop.ini
|
| 56 |
+
|
| 57 |
+
# Logs
|
| 58 |
+
*.log
|
| 59 |
+
logs/
|
| 60 |
+
|
| 61 |
+
# Cache
|
| 62 |
+
.cache/
|
| 63 |
+
.pytest_cache/
|
| 64 |
+
|
| 65 |
+
models/
|
app.py
CHANGED
|
@@ -10,6 +10,9 @@ import torch.nn as nn
|
|
| 10 |
from torchvision import transforms, models
|
| 11 |
import torch.nn.functional as F
|
| 12 |
|
|
|
|
|
|
|
|
|
|
| 13 |
# Page configuration
|
| 14 |
st.set_page_config(
|
| 15 |
page_title="Sentiment Analysis Testing Ground",
|
|
@@ -57,48 +60,39 @@ st.markdown(
|
|
| 57 |
)
|
| 58 |
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
# Global variables for models
|
| 61 |
@st.cache_resource
|
| 62 |
def load_vision_model():
|
| 63 |
-
"""Load the pre-trained ResNet-50 vision sentiment model"""
|
| 64 |
try:
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
return None
|
| 70 |
-
|
| 71 |
-
# Load the model weights first to check the architecture
|
| 72 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 73 |
-
checkpoint = torch.load(model_path, map_location=device)
|
| 74 |
-
|
| 75 |
-
# Check the number of classes from the checkpoint
|
| 76 |
-
if "fc.weight" in checkpoint:
|
| 77 |
-
num_classes = checkpoint["fc.weight"].shape[0]
|
| 78 |
-
st.info(f"📊 Model checkpoint has {num_classes} output classes")
|
| 79 |
-
else:
|
| 80 |
-
# Fallback: try to infer from the last layer
|
| 81 |
-
num_classes = 3 # Default assumption
|
| 82 |
-
st.warning(
|
| 83 |
-
"⚠️ Could not determine number of classes from checkpoint, assuming 3"
|
| 84 |
-
)
|
| 85 |
-
|
| 86 |
-
# Initialize ResNet-50 model with the correct number of classes
|
| 87 |
-
# Note: Your model was trained with RGB images, so we keep 3 channels
|
| 88 |
-
model = models.resnet50(weights=None) # Don't load ImageNet weights
|
| 89 |
|
| 90 |
-
|
| 91 |
-
model
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
model.eval()
|
| 97 |
|
| 98 |
-
st.success(f"
|
| 99 |
return model, device, num_classes
|
| 100 |
except Exception as e:
|
| 101 |
-
st.error(f"
|
| 102 |
return None, None, None
|
| 103 |
|
| 104 |
|
|
@@ -154,7 +148,7 @@ def detect_and_preprocess_face(image, crop_tightness=0.05):
|
|
| 154 |
)
|
| 155 |
|
| 156 |
if len(faces) == 0:
|
| 157 |
-
st.warning("
|
| 158 |
# Fallback: center crop and resize
|
| 159 |
if isinstance(image, Image.Image):
|
| 160 |
# Convert to RGB first
|
|
@@ -213,7 +207,7 @@ def detect_and_preprocess_face(image, crop_tightness=0.05):
|
|
| 213 |
|
| 214 |
except ImportError:
|
| 215 |
st.error(
|
| 216 |
-
"
|
| 217 |
)
|
| 218 |
st.info("Falling back to basic preprocessing...")
|
| 219 |
# Fallback: basic grayscale conversion and resize
|
|
@@ -226,7 +220,7 @@ def detect_and_preprocess_face(image, crop_tightness=0.05):
|
|
| 226 |
return gray_rgb_pil
|
| 227 |
return None
|
| 228 |
except Exception as e:
|
| 229 |
-
st.error(f"
|
| 230 |
st.info("Falling back to basic preprocessing...")
|
| 231 |
# Fallback: basic grayscale conversion and resize
|
| 232 |
if isinstance(image, Image.Image):
|
|
@@ -299,50 +293,39 @@ def predict_text_sentiment(text):
|
|
| 299 |
return sentiment, confidence
|
| 300 |
|
| 301 |
except ImportError:
|
| 302 |
-
st.error(
|
| 303 |
-
"❌ TextBlob not installed. Please install it with: pip install textblob"
|
| 304 |
-
)
|
| 305 |
return "TextBlob not available", 0.0
|
| 306 |
except Exception as e:
|
| 307 |
-
st.error(f"
|
| 308 |
return "Error occurred", 0.0
|
| 309 |
|
| 310 |
|
| 311 |
@st.cache_resource
|
| 312 |
def load_audio_model():
|
| 313 |
-
"""Load the pre-trained Wav2Vec2 audio sentiment model"""
|
| 314 |
try:
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
st.error(f"❌ Audio model file not found at: {model_path}")
|
| 319 |
return None, None, None, None
|
| 320 |
|
| 321 |
-
# Load the model
|
| 322 |
-
device =
|
| 323 |
-
checkpoint = torch.load(model_path, map_location=device)
|
| 324 |
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
st.info(f"📊 Audio model checkpoint has {num_classes} output classes")
|
| 329 |
-
else:
|
| 330 |
-
num_classes = 3 # Default assumption
|
| 331 |
-
st.warning(
|
| 332 |
-
"⚠️ Could not determine number of classes from checkpoint, assuming 3"
|
| 333 |
-
)
|
| 334 |
-
|
| 335 |
-
# Initialize Wav2Vec2 model with the correct number of classes
|
| 336 |
-
from transformers import AutoModelForAudioClassification
|
| 337 |
-
|
| 338 |
-
model = AutoModelForAudioClassification.from_pretrained(
|
| 339 |
-
"facebook/wav2vec2-base", num_labels=num_classes
|
| 340 |
-
)
|
| 341 |
|
| 342 |
-
#
|
| 343 |
-
model
|
| 344 |
-
|
| 345 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 346 |
|
| 347 |
# Load feature extractor
|
| 348 |
from transformers import AutoFeatureExtractor
|
|
@@ -351,10 +334,10 @@ def load_audio_model():
|
|
| 351 |
"facebook/wav2vec2-base"
|
| 352 |
)
|
| 353 |
|
| 354 |
-
st.success(f"
|
| 355 |
return model, device, num_classes, feature_extractor
|
| 356 |
except Exception as e:
|
| 357 |
-
st.error(f"
|
| 358 |
return None, None, None, None
|
| 359 |
|
| 360 |
|
|
@@ -430,11 +413,11 @@ def predict_audio_sentiment(audio_bytes):
|
|
| 430 |
os.unlink(tmp_file_path)
|
| 431 |
|
| 432 |
except ImportError as e:
|
| 433 |
-
st.error(f"
|
| 434 |
st.info("Please install: pip install librosa transformers")
|
| 435 |
return "Library not available", 0.0
|
| 436 |
except Exception as e:
|
| 437 |
-
st.error(f"
|
| 438 |
return "Error occurred", 0.0
|
| 439 |
|
| 440 |
|
|
@@ -457,7 +440,7 @@ def predict_vision_sentiment(image, crop_tightness=0.05):
|
|
| 457 |
|
| 458 |
# Preprocess image to match FER2013 format
|
| 459 |
st.info(
|
| 460 |
-
"
|
| 461 |
)
|
| 462 |
preprocessed_image = detect_and_preprocess_face(image, crop_tightness=0.0)
|
| 463 |
|
|
@@ -482,7 +465,7 @@ def predict_vision_sentiment(image, crop_tightness=0.05):
|
|
| 482 |
outputs = model(image_tensor)
|
| 483 |
|
| 484 |
# Debug: print output shape
|
| 485 |
-
st.info(f"
|
| 486 |
|
| 487 |
probabilities = F.softmax(outputs, dim=1)
|
| 488 |
confidence, predicted = torch.max(probabilities, 1)
|
|
@@ -541,23 +524,23 @@ def predict_fused_sentiment(text=None, audio_bytes=None, image=None):
|
|
| 541 |
|
| 542 |
|
| 543 |
# Sidebar navigation
|
| 544 |
-
st.sidebar.title("
|
| 545 |
st.sidebar.markdown("---")
|
| 546 |
|
| 547 |
# Navigation
|
| 548 |
page = st.sidebar.selectbox(
|
| 549 |
"Choose a page:",
|
| 550 |
[
|
| 551 |
-
"
|
| 552 |
-
"
|
| 553 |
-
"
|
| 554 |
-
"
|
| 555 |
-
"
|
| 556 |
],
|
| 557 |
)
|
| 558 |
|
| 559 |
# Home Page
|
| 560 |
-
if page == "
|
| 561 |
st.markdown(
|
| 562 |
'<h1 class="main-header">Sentiment Analysis Testing Ground</h1>',
|
| 563 |
unsafe_allow_html=True,
|
|
@@ -579,8 +562,8 @@ if page == "��� Home":
|
|
| 579 |
st.markdown(
|
| 580 |
"""
|
| 581 |
<div class="model-card">
|
| 582 |
-
<h3
|
| 583 |
-
<p
|
| 584 |
<ul>
|
| 585 |
<li>Process any text input</li>
|
| 586 |
<li>Get sentiment classification (Positive/Negative/Neutral)</li>
|
|
@@ -596,12 +579,12 @@ if page == "🏠 Home":
|
|
| 596 |
st.markdown(
|
| 597 |
"""
|
| 598 |
<div class="model-card">
|
| 599 |
-
<h3
|
| 600 |
-
<p
|
| 601 |
<ul>
|
| 602 |
<li>Upload audio files (.wav, .mp3, .m4a, .flac)</li>
|
| 603 |
-
<li
|
| 604 |
-
<li
|
| 605 |
<li>Listen to uploaded/recorded audio</li>
|
| 606 |
<li>Get sentiment predictions</li>
|
| 607 |
<li>Real-time audio analysis</li>
|
|
@@ -615,14 +598,14 @@ if page == "🏠 Home":
|
|
| 615 |
st.markdown(
|
| 616 |
"""
|
| 617 |
<div class="model-card">
|
| 618 |
-
<h3
|
| 619 |
<p>Analyze sentiment from images using fine-tuned ResNet-50</p>
|
| 620 |
<ul>
|
| 621 |
<li>Upload image files (.png, .jpg, .jpeg, .bmp, .tiff)</li>
|
| 622 |
-
<li
|
| 623 |
-
<li
|
| 624 |
-
<li
|
| 625 |
-
<li
|
| 626 |
<li>Preview original & preprocessed images</li>
|
| 627 |
<li>Get sentiment predictions</li>
|
| 628 |
</ul>
|
|
@@ -634,7 +617,7 @@ if page == "🏠 Home":
|
|
| 634 |
st.markdown(
|
| 635 |
"""
|
| 636 |
<div class="model-card">
|
| 637 |
-
<h3
|
| 638 |
<p>Combine predictions from all three models for enhanced accuracy</p>
|
| 639 |
<ul>
|
| 640 |
<li>Multi-modal input processing</li>
|
|
@@ -650,16 +633,17 @@ if page == "🏠 Home":
|
|
| 650 |
st.markdown(
|
| 651 |
"""
|
| 652 |
<div style="text-align: center; color: #666;">
|
| 653 |
-
<p><strong>Note:</strong> This application now has <strong>ALL THREE MODELS</strong> fully integrated and ready to use
|
| 654 |
<p><strong>TextBlob</strong> (Text) + <strong>Wav2Vec2</strong> (Audio) + <strong>ResNet-50</strong> (Vision)</p>
|
|
|
|
| 655 |
</div>
|
| 656 |
""",
|
| 657 |
unsafe_allow_html=True,
|
| 658 |
)
|
| 659 |
|
| 660 |
# Text Sentiment Page
|
| 661 |
-
elif page == "
|
| 662 |
-
st.title("
|
| 663 |
st.markdown("Analyze the sentiment of your text using our TextBlob-based model.")
|
| 664 |
|
| 665 |
# Text input
|
|
@@ -670,7 +654,7 @@ elif page == "📝 Text Sentiment":
|
|
| 670 |
)
|
| 671 |
|
| 672 |
# Analyze button
|
| 673 |
-
if st.button("
|
| 674 |
if text_input and text_input.strip():
|
| 675 |
with st.spinner("Analyzing text sentiment..."):
|
| 676 |
sentiment, confidence = predict_text_sentiment(text_input)
|
|
@@ -707,37 +691,41 @@ elif page == "📝 Text Sentiment":
|
|
| 707 |
st.error("Please enter some text to analyze.")
|
| 708 |
|
| 709 |
# Audio Sentiment Page
|
| 710 |
-
elif page == "
|
| 711 |
-
st.title("
|
| 712 |
st.markdown(
|
| 713 |
"Analyze the sentiment of your audio files using our fine-tuned Wav2Vec2 model."
|
| 714 |
)
|
| 715 |
|
| 716 |
# Preprocessing information
|
| 717 |
st.info(
|
| 718 |
-
"
|
| 719 |
"16kHz sampling rate, max 5 seconds, with automatic resampling and feature extraction."
|
| 720 |
)
|
| 721 |
|
| 722 |
# Model status
|
| 723 |
model, device, num_classes, feature_extractor = load_audio_model()
|
| 724 |
if model is None:
|
| 725 |
-
st.error(
|
| 726 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 727 |
else:
|
| 728 |
st.success(
|
| 729 |
-
f"
|
| 730 |
)
|
| 731 |
|
| 732 |
# Input method selection
|
| 733 |
-
st.subheader("
|
| 734 |
input_method = st.radio(
|
| 735 |
"Select how you want to provide audio:",
|
| 736 |
-
["
|
| 737 |
horizontal=True,
|
| 738 |
)
|
| 739 |
|
| 740 |
-
if input_method == "
|
| 741 |
# File uploader
|
| 742 |
uploaded_audio = st.file_uploader(
|
| 743 |
"Choose an audio file",
|
|
@@ -752,7 +740,7 @@ elif page == "🎵 Audio Sentiment":
|
|
| 752 |
st.markdown(
|
| 753 |
"""
|
| 754 |
<div class="model-card">
|
| 755 |
-
<h3
|
| 756 |
<p>Record audio directly with your microphone (max 5 seconds).</p>
|
| 757 |
<p><strong>Note:</strong> Make sure your microphone is accessible and you have permission to use it.</p>
|
| 758 |
</div>
|
|
@@ -769,7 +757,7 @@ elif page == "🎵 Audio Sentiment":
|
|
| 769 |
if recorded_audio is not None:
|
| 770 |
# Display recorded audio
|
| 771 |
st.audio(recorded_audio, format="audio/wav")
|
| 772 |
-
st.success("
|
| 773 |
|
| 774 |
# Convert recorded audio to bytes for processing
|
| 775 |
uploaded_audio = recorded_audio
|
|
@@ -784,21 +772,21 @@ elif page == "🎵 Audio Sentiment":
|
|
| 784 |
# Display audio player
|
| 785 |
if audio_source == "recorded":
|
| 786 |
st.audio(uploaded_audio, format="audio/wav")
|
| 787 |
-
st.info(f"
|
| 788 |
else:
|
| 789 |
st.audio(
|
| 790 |
uploaded_audio, format=f'audio/{uploaded_audio.name.split(".")[-1]}'
|
| 791 |
)
|
| 792 |
# File info for uploaded files
|
| 793 |
file_size = len(uploaded_audio.getvalue()) / 1024 # KB
|
| 794 |
-
st.info(f"
|
| 795 |
|
| 796 |
# Analyze button
|
| 797 |
if st.button(
|
| 798 |
-
"
|
| 799 |
):
|
| 800 |
if model is None:
|
| 801 |
-
st.error("
|
| 802 |
else:
|
| 803 |
with st.spinner("Analyzing audio sentiment..."):
|
| 804 |
audio_bytes = uploaded_audio.getvalue()
|
|
@@ -828,46 +816,48 @@ elif page == "🎵 Audio Sentiment":
|
|
| 828 |
unsafe_allow_html=True,
|
| 829 |
)
|
| 830 |
else:
|
| 831 |
-
if input_method == "
|
| 832 |
-
st.info("
|
| 833 |
else:
|
| 834 |
-
st.info("
|
| 835 |
|
| 836 |
# Vision Sentiment Page
|
| 837 |
-
elif page == "
|
| 838 |
-
st.title("
|
| 839 |
st.markdown(
|
| 840 |
"Analyze the sentiment of your images using our fine-tuned ResNet-50 model."
|
| 841 |
)
|
| 842 |
|
| 843 |
st.info(
|
| 844 |
-
"
|
| 845 |
)
|
| 846 |
|
| 847 |
# Face cropping is set to 0% (no padding) for tightest crop
|
| 848 |
-
st.info(
|
| 849 |
-
"🎯 **Face Cropping**: Set to 0% padding for tightest crop on facial features"
|
| 850 |
-
)
|
| 851 |
|
| 852 |
# Model status
|
| 853 |
model, device, num_classes = load_vision_model()
|
| 854 |
if model is None:
|
| 855 |
-
st.error(
|
| 856 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 857 |
else:
|
| 858 |
st.success(
|
| 859 |
-
f"
|
| 860 |
)
|
| 861 |
|
| 862 |
# Input method selection
|
| 863 |
-
st.subheader("
|
| 864 |
input_method = st.radio(
|
| 865 |
"Select how you want to provide an image:",
|
| 866 |
-
["
|
| 867 |
horizontal=True,
|
| 868 |
)
|
| 869 |
|
| 870 |
-
if input_method == "
|
| 871 |
# File uploader
|
| 872 |
uploaded_image = st.file_uploader(
|
| 873 |
"Choose an image file",
|
|
@@ -887,15 +877,15 @@ elif page == "🖼️ Vision Sentiment":
|
|
| 887 |
# File info
|
| 888 |
file_size = len(uploaded_image.getvalue()) / 1024 # KB
|
| 889 |
st.info(
|
| 890 |
-
f"
|
| 891 |
)
|
| 892 |
|
| 893 |
# Analyze button
|
| 894 |
if st.button(
|
| 895 |
-
"
|
| 896 |
):
|
| 897 |
if model is None:
|
| 898 |
-
st.error("
|
| 899 |
else:
|
| 900 |
with st.spinner("Analyzing image sentiment..."):
|
| 901 |
sentiment, confidence = predict_vision_sentiment(image)
|
|
@@ -932,7 +922,7 @@ elif page == "🖼️ Vision Sentiment":
|
|
| 932 |
st.markdown(
|
| 933 |
"""
|
| 934 |
<div class="model-card">
|
| 935 |
-
<h3
|
| 936 |
<p>Take a photo directly with your camera to analyze its sentiment.</p>
|
| 937 |
<p><strong>Note:</strong> Make sure your camera is accessible and you have permission to use it.</p>
|
| 938 |
</div>
|
|
@@ -957,15 +947,15 @@ elif page == "🖼️ Vision Sentiment":
|
|
| 957 |
|
| 958 |
# Image info
|
| 959 |
st.info(
|
| 960 |
-
f"
|
| 961 |
)
|
| 962 |
|
| 963 |
# Analyze button
|
| 964 |
if st.button(
|
| 965 |
-
"
|
| 966 |
):
|
| 967 |
if model is None:
|
| 968 |
-
st.error("
|
| 969 |
else:
|
| 970 |
with st.spinner("Analyzing photo sentiment..."):
|
| 971 |
sentiment, confidence = predict_vision_sentiment(image)
|
|
@@ -999,14 +989,14 @@ elif page == "🖼️ Vision Sentiment":
|
|
| 999 |
)
|
| 1000 |
|
| 1001 |
# Show info if no image is provided
|
| 1002 |
-
if input_method == "
|
| 1003 |
-
st.info("
|
| 1004 |
-
elif input_method == "
|
| 1005 |
-
st.info("
|
| 1006 |
|
| 1007 |
# Fused Model Page
|
| 1008 |
-
elif page == "
|
| 1009 |
-
st.title("
|
| 1010 |
st.markdown(
|
| 1011 |
"Combine predictions from all three models for enhanced sentiment analysis."
|
| 1012 |
)
|
|
@@ -1026,30 +1016,30 @@ elif page == "🔗 Fused Model":
|
|
| 1026 |
col1, col2 = st.columns(2)
|
| 1027 |
|
| 1028 |
with col1:
|
| 1029 |
-
st.subheader("
|
| 1030 |
text_input = st.text_area(
|
| 1031 |
"Enter text (optional):",
|
| 1032 |
height=100,
|
| 1033 |
placeholder="Type or paste your text here...",
|
| 1034 |
)
|
| 1035 |
|
| 1036 |
-
st.subheader("
|
| 1037 |
|
| 1038 |
# Audio preprocessing information for fused model
|
| 1039 |
st.info(
|
| 1040 |
-
"
|
| 1041 |
"16kHz sampling rate, max 5 seconds, with automatic resampling and feature extraction."
|
| 1042 |
)
|
| 1043 |
|
| 1044 |
# Audio input method for fused model
|
| 1045 |
audio_input_method = st.radio(
|
| 1046 |
"Audio input method:",
|
| 1047 |
-
["
|
| 1048 |
key="fused_audio_method",
|
| 1049 |
horizontal=True,
|
| 1050 |
)
|
| 1051 |
|
| 1052 |
-
if audio_input_method == "
|
| 1053 |
uploaded_audio = st.file_uploader(
|
| 1054 |
"Upload audio file (optional):",
|
| 1055 |
type=["wav", "mp3", "m4a", "flac"],
|
|
@@ -1067,7 +1057,7 @@ elif page == "🔗 Fused Model":
|
|
| 1067 |
|
| 1068 |
if recorded_audio is not None:
|
| 1069 |
st.audio(recorded_audio, format="audio/wav")
|
| 1070 |
-
st.success("
|
| 1071 |
uploaded_audio = recorded_audio
|
| 1072 |
audio_source = "recorded"
|
| 1073 |
audio_name = "Recorded Audio"
|
|
@@ -1077,22 +1067,22 @@ elif page == "🔗 Fused Model":
|
|
| 1077 |
audio_name = None
|
| 1078 |
|
| 1079 |
with col2:
|
| 1080 |
-
st.subheader("
|
| 1081 |
|
| 1082 |
# Face cropping is set to 0% (no padding) for tightest crop
|
| 1083 |
st.info(
|
| 1084 |
-
"
|
| 1085 |
)
|
| 1086 |
|
| 1087 |
# Image input method for fused model
|
| 1088 |
image_input_method = st.radio(
|
| 1089 |
"Image input method:",
|
| 1090 |
-
["
|
| 1091 |
key="fused_image_method",
|
| 1092 |
horizontal=True,
|
| 1093 |
)
|
| 1094 |
|
| 1095 |
-
if image_input_method == "
|
| 1096 |
uploaded_image = st.file_uploader(
|
| 1097 |
"Upload image file (optional):",
|
| 1098 |
type=["png", "jpg", "jpeg", "bmp", "tiff"],
|
|
@@ -1122,7 +1112,7 @@ elif page == "🔗 Fused Model":
|
|
| 1122 |
)
|
| 1123 |
|
| 1124 |
# Analyze button
|
| 1125 |
-
if st.button("
|
| 1126 |
if text_input or uploaded_audio or uploaded_image:
|
| 1127 |
with st.spinner("Running fused sentiment analysis..."):
|
| 1128 |
# Prepare inputs
|
|
@@ -1154,7 +1144,7 @@ elif page == "🔗 Fused Model":
|
|
| 1154 |
text_sentiment, text_conf = predict_text_sentiment(text_input)
|
| 1155 |
results_data.append(
|
| 1156 |
{
|
| 1157 |
-
"Model": "Text (TextBlob)
|
| 1158 |
"Input": f"Text: {text_input[:50]}...",
|
| 1159 |
"Sentiment": text_sentiment,
|
| 1160 |
"Confidence": f"{text_conf:.2f}",
|
|
@@ -1165,7 +1155,7 @@ elif page == "🔗 Fused Model":
|
|
| 1165 |
audio_sentiment, audio_conf = predict_audio_sentiment(audio_bytes)
|
| 1166 |
results_data.append(
|
| 1167 |
{
|
| 1168 |
-
"Model": "Audio (Wav2Vec2)
|
| 1169 |
"Input": f"Audio: {audio_name}",
|
| 1170 |
"Sentiment": audio_sentiment,
|
| 1171 |
"Confidence": f"{audio_conf:.2f}",
|
|
@@ -1205,7 +1195,7 @@ elif page == "🔗 Fused Model":
|
|
| 1205 |
)
|
| 1206 |
else:
|
| 1207 |
st.warning(
|
| 1208 |
-
"
|
| 1209 |
)
|
| 1210 |
|
| 1211 |
# Footer
|
|
|
|
| 10 |
from torchvision import transforms, models
|
| 11 |
import torch.nn.functional as F
|
| 12 |
|
| 13 |
+
# Import the Google Drive model manager
|
| 14 |
+
from simple_model_manager import SimpleModelManager
|
| 15 |
+
|
| 16 |
# Page configuration
|
| 17 |
st.set_page_config(
|
| 18 |
page_title="Sentiment Analysis Testing Ground",
|
|
|
|
| 60 |
)
|
| 61 |
|
| 62 |
|
| 63 |
+
# Initialize the Google Drive model manager
|
| 64 |
+
@st.cache_resource
|
| 65 |
+
def get_model_manager():
|
| 66 |
+
"""Get the Google Drive model manager instance"""
|
| 67 |
+
try:
|
| 68 |
+
manager = SimpleModelManager()
|
| 69 |
+
return manager
|
| 70 |
+
except Exception as e:
|
| 71 |
+
st.error(f"Failed to initialize model manager: {e}")
|
| 72 |
+
return None
|
| 73 |
+
|
| 74 |
+
|
| 75 |
# Global variables for models
|
| 76 |
@st.cache_resource
|
| 77 |
def load_vision_model():
|
| 78 |
+
"""Load the pre-trained ResNet-50 vision sentiment model from Google Drive"""
|
| 79 |
try:
|
| 80 |
+
manager = get_model_manager()
|
| 81 |
+
if manager is None:
|
| 82 |
+
st.error("Model manager not available")
|
| 83 |
+
return None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
+
# Load the model using the Google Drive manager
|
| 86 |
+
model, device, num_classes = manager.load_vision_model()
|
| 87 |
|
| 88 |
+
if model is None:
|
| 89 |
+
st.error("Failed to load vision model from Google Drive")
|
| 90 |
+
return None, None, None
|
|
|
|
| 91 |
|
| 92 |
+
st.success(f"Vision model loaded successfully with {num_classes} classes!")
|
| 93 |
return model, device, num_classes
|
| 94 |
except Exception as e:
|
| 95 |
+
st.error(f"Error loading vision model: {str(e)}")
|
| 96 |
return None, None, None
|
| 97 |
|
| 98 |
|
|
|
|
| 148 |
)
|
| 149 |
|
| 150 |
if len(faces) == 0:
|
| 151 |
+
st.warning("No face detected in the image. Using center crop instead.")
|
| 152 |
# Fallback: center crop and resize
|
| 153 |
if isinstance(image, Image.Image):
|
| 154 |
# Convert to RGB first
|
|
|
|
| 207 |
|
| 208 |
except ImportError:
|
| 209 |
st.error(
|
| 210 |
+
"OpenCV not installed. Please install it with: pip install opencv-python"
|
| 211 |
)
|
| 212 |
st.info("Falling back to basic preprocessing...")
|
| 213 |
# Fallback: basic grayscale conversion and resize
|
|
|
|
| 220 |
return gray_rgb_pil
|
| 221 |
return None
|
| 222 |
except Exception as e:
|
| 223 |
+
st.error(f"Error in face detection: {str(e)}")
|
| 224 |
st.info("Falling back to basic preprocessing...")
|
| 225 |
# Fallback: basic grayscale conversion and resize
|
| 226 |
if isinstance(image, Image.Image):
|
|
|
|
| 293 |
return sentiment, confidence
|
| 294 |
|
| 295 |
except ImportError:
|
| 296 |
+
st.error("TextBlob not installed. Please install it with: pip install textblob")
|
|
|
|
|
|
|
| 297 |
return "TextBlob not available", 0.0
|
| 298 |
except Exception as e:
|
| 299 |
+
st.error(f"Error in text sentiment analysis: {str(e)}")
|
| 300 |
return "Error occurred", 0.0
|
| 301 |
|
| 302 |
|
| 303 |
@st.cache_resource
|
| 304 |
def load_audio_model():
|
| 305 |
+
"""Load the pre-trained Wav2Vec2 audio sentiment model from Google Drive"""
|
| 306 |
try:
|
| 307 |
+
manager = get_model_manager()
|
| 308 |
+
if manager is None:
|
| 309 |
+
st.error("Model manager not available")
|
|
|
|
| 310 |
return None, None, None, None
|
| 311 |
|
| 312 |
+
# Load the model using the Google Drive manager
|
| 313 |
+
model, device = manager.load_audio_model()
|
|
|
|
| 314 |
|
| 315 |
+
if model is None:
|
| 316 |
+
st.error("Failed to load audio model from Google Drive")
|
| 317 |
+
return None, None, None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
|
| 319 |
+
# For Wav2Vec2 models, we need to determine the number of classes
|
| 320 |
+
# This is typically available in the model configuration
|
| 321 |
+
try:
|
| 322 |
+
num_classes = model.config.num_labels
|
| 323 |
+
except:
|
| 324 |
+
# Fallback: try to infer from the model
|
| 325 |
+
try:
|
| 326 |
+
num_classes = model.classifier.out_features
|
| 327 |
+
except:
|
| 328 |
+
num_classes = 3 # Default assumption
|
| 329 |
|
| 330 |
# Load feature extractor
|
| 331 |
from transformers import AutoFeatureExtractor
|
|
|
|
| 334 |
"facebook/wav2vec2-base"
|
| 335 |
)
|
| 336 |
|
| 337 |
+
st.success(f"Audio model loaded successfully with {num_classes} classes!")
|
| 338 |
return model, device, num_classes, feature_extractor
|
| 339 |
except Exception as e:
|
| 340 |
+
st.error(f"Error loading audio model: {str(e)}")
|
| 341 |
return None, None, None, None
|
| 342 |
|
| 343 |
|
|
|
|
| 413 |
os.unlink(tmp_file_path)
|
| 414 |
|
| 415 |
except ImportError as e:
|
| 416 |
+
st.error(f"Required library not installed: {str(e)}")
|
| 417 |
st.info("Please install: pip install librosa transformers")
|
| 418 |
return "Library not available", 0.0
|
| 419 |
except Exception as e:
|
| 420 |
+
st.error(f"Error in audio sentiment prediction: {str(e)}")
|
| 421 |
return "Error occurred", 0.0
|
| 422 |
|
| 423 |
|
|
|
|
| 440 |
|
| 441 |
# Preprocess image to match FER2013 format
|
| 442 |
st.info(
|
| 443 |
+
"Detecting face and preprocessing image to match training data format..."
|
| 444 |
)
|
| 445 |
preprocessed_image = detect_and_preprocess_face(image, crop_tightness=0.0)
|
| 446 |
|
|
|
|
| 465 |
outputs = model(image_tensor)
|
| 466 |
|
| 467 |
# Debug: print output shape
|
| 468 |
+
st.info(f"Model output shape: {outputs.shape}")
|
| 469 |
|
| 470 |
probabilities = F.softmax(outputs, dim=1)
|
| 471 |
confidence, predicted = torch.max(probabilities, 1)
|
|
|
|
| 524 |
|
| 525 |
|
| 526 |
# Sidebar navigation
|
| 527 |
+
st.sidebar.title("Sentiment Analysis")
|
| 528 |
st.sidebar.markdown("---")
|
| 529 |
|
| 530 |
# Navigation
|
| 531 |
page = st.sidebar.selectbox(
|
| 532 |
"Choose a page:",
|
| 533 |
[
|
| 534 |
+
"Home",
|
| 535 |
+
"Text Sentiment",
|
| 536 |
+
"Audio Sentiment",
|
| 537 |
+
"Vision Sentiment",
|
| 538 |
+
"Fused Model",
|
| 539 |
],
|
| 540 |
)
|
| 541 |
|
| 542 |
# Home Page
|
| 543 |
+
if page == "Home":
|
| 544 |
st.markdown(
|
| 545 |
'<h1 class="main-header">Sentiment Analysis Testing Ground</h1>',
|
| 546 |
unsafe_allow_html=True,
|
|
|
|
| 562 |
st.markdown(
|
| 563 |
"""
|
| 564 |
<div class="model-card">
|
| 565 |
+
<h3>Text Sentiment Model</h3>
|
| 566 |
+
<p>READY TO USE - Analyze sentiment from text input using TextBlob</p>
|
| 567 |
<ul>
|
| 568 |
<li>Process any text input</li>
|
| 569 |
<li>Get sentiment classification (Positive/Negative/Neutral)</li>
|
|
|
|
| 579 |
st.markdown(
|
| 580 |
"""
|
| 581 |
<div class="model-card">
|
| 582 |
+
<h3>Audio Sentiment Model</h3>
|
| 583 |
+
<p>READY TO USE - Analyze sentiment from audio files using fine-tuned Wav2Vec2</p>
|
| 584 |
<ul>
|
| 585 |
<li>Upload audio files (.wav, .mp3, .m4a, .flac)</li>
|
| 586 |
+
<li>Record audio directly with microphone (max 5s)</li>
|
| 587 |
+
<li>Automatic preprocessing: 16kHz sampling, 5s max duration (CREMA-D + RAVDESS format)</li>
|
| 588 |
<li>Listen to uploaded/recorded audio</li>
|
| 589 |
<li>Get sentiment predictions</li>
|
| 590 |
<li>Real-time audio analysis</li>
|
|
|
|
| 598 |
st.markdown(
|
| 599 |
"""
|
| 600 |
<div class="model-card">
|
| 601 |
+
<h3>Vision Sentiment Model</h3>
|
| 602 |
<p>Analyze sentiment from images using fine-tuned ResNet-50</p>
|
| 603 |
<ul>
|
| 604 |
<li>Upload image files (.png, .jpg, .jpeg, .bmp, .tiff)</li>
|
| 605 |
+
<li>Automatic face detection & preprocessing</li>
|
| 606 |
+
<li>Fixed 0% padding for tightest face crop</li>
|
| 607 |
+
<li>Convert to 224x224 grayscale → 3-channel RGB (FER2013 format)</li>
|
| 608 |
+
<li>Transforms: Resize(224) → CenterCrop(224) → ImageNet Normalization</li>
|
| 609 |
<li>Preview original & preprocessed images</li>
|
| 610 |
<li>Get sentiment predictions</li>
|
| 611 |
</ul>
|
|
|
|
| 617 |
st.markdown(
|
| 618 |
"""
|
| 619 |
<div class="model-card">
|
| 620 |
+
<h3>Fused Model</h3>
|
| 621 |
<p>Combine predictions from all three models for enhanced accuracy</p>
|
| 622 |
<ul>
|
| 623 |
<li>Multi-modal input processing</li>
|
|
|
|
| 633 |
st.markdown(
|
| 634 |
"""
|
| 635 |
<div style="text-align: center; color: #666;">
|
| 636 |
+
<p><strong>Note:</strong> This application now has <strong>ALL THREE MODELS</strong> fully integrated and ready to use!</p>
|
| 637 |
<p><strong>TextBlob</strong> (Text) + <strong>Wav2Vec2</strong> (Audio) + <strong>ResNet-50</strong> (Vision)</p>
|
| 638 |
+
<p><strong>Models are now loaded from Google Drive automatically!</strong></p>
|
| 639 |
</div>
|
| 640 |
""",
|
| 641 |
unsafe_allow_html=True,
|
| 642 |
)
|
| 643 |
|
| 644 |
# Text Sentiment Page
|
| 645 |
+
elif page == "Text Sentiment":
|
| 646 |
+
st.title("Text Sentiment Analysis")
|
| 647 |
st.markdown("Analyze the sentiment of your text using our TextBlob-based model.")
|
| 648 |
|
| 649 |
# Text input
|
|
|
|
| 654 |
)
|
| 655 |
|
| 656 |
# Analyze button
|
| 657 |
+
if st.button("Analyze Sentiment", type="primary", use_container_width=True):
|
| 658 |
if text_input and text_input.strip():
|
| 659 |
with st.spinner("Analyzing text sentiment..."):
|
| 660 |
sentiment, confidence = predict_text_sentiment(text_input)
|
|
|
|
| 691 |
st.error("Please enter some text to analyze.")
|
| 692 |
|
| 693 |
# Audio Sentiment Page
|
| 694 |
+
elif page == "Audio Sentiment":
|
| 695 |
+
st.title("Audio Sentiment Analysis")
|
| 696 |
st.markdown(
|
| 697 |
"Analyze the sentiment of your audio files using our fine-tuned Wav2Vec2 model."
|
| 698 |
)
|
| 699 |
|
| 700 |
# Preprocessing information
|
| 701 |
st.info(
|
| 702 |
+
"**Audio Preprocessing**: Audio will be automatically processed to match CREMA-D + RAVDESS training format: "
|
| 703 |
"16kHz sampling rate, max 5 seconds, with automatic resampling and feature extraction."
|
| 704 |
)
|
| 705 |
|
| 706 |
# Model status
|
| 707 |
model, device, num_classes, feature_extractor = load_audio_model()
|
| 708 |
if model is None:
|
| 709 |
+
st.error(
|
| 710 |
+
"Audio model could not be loaded. Please check the Google Drive setup."
|
| 711 |
+
)
|
| 712 |
+
st.info(
|
| 713 |
+
"Expected: Models should be configured in Google Drive and accessible via the model manager."
|
| 714 |
+
)
|
| 715 |
else:
|
| 716 |
st.success(
|
| 717 |
+
f"Audio model loaded successfully on {device} with {num_classes} classes!"
|
| 718 |
)
|
| 719 |
|
| 720 |
# Input method selection
|
| 721 |
+
st.subheader("Choose Input Method")
|
| 722 |
input_method = st.radio(
|
| 723 |
"Select how you want to provide audio:",
|
| 724 |
+
["Upload Audio File", "Record Audio"],
|
| 725 |
horizontal=True,
|
| 726 |
)
|
| 727 |
|
| 728 |
+
if input_method == "Upload Audio File":
|
| 729 |
# File uploader
|
| 730 |
uploaded_audio = st.file_uploader(
|
| 731 |
"Choose an audio file",
|
|
|
|
| 740 |
st.markdown(
|
| 741 |
"""
|
| 742 |
<div class="model-card">
|
| 743 |
+
<h3>Audio Recording</h3>
|
| 744 |
<p>Record audio directly with your microphone (max 5 seconds).</p>
|
| 745 |
<p><strong>Note:</strong> Make sure your microphone is accessible and you have permission to use it.</p>
|
| 746 |
</div>
|
|
|
|
| 757 |
if recorded_audio is not None:
|
| 758 |
# Display recorded audio
|
| 759 |
st.audio(recorded_audio, format="audio/wav")
|
| 760 |
+
st.success("Audio recorded successfully!")
|
| 761 |
|
| 762 |
# Convert recorded audio to bytes for processing
|
| 763 |
uploaded_audio = recorded_audio
|
|
|
|
| 772 |
# Display audio player
|
| 773 |
if audio_source == "recorded":
|
| 774 |
st.audio(uploaded_audio, format="audio/wav")
|
| 775 |
+
st.info(f"{audio_name} | Source: Microphone Recording")
|
| 776 |
else:
|
| 777 |
st.audio(
|
| 778 |
uploaded_audio, format=f'audio/{uploaded_audio.name.split(".")[-1]}'
|
| 779 |
)
|
| 780 |
# File info for uploaded files
|
| 781 |
file_size = len(uploaded_audio.getvalue()) / 1024 # KB
|
| 782 |
+
st.info(f"File: {uploaded_audio.name} | Size: {file_size:.1f} KB")
|
| 783 |
|
| 784 |
# Analyze button
|
| 785 |
if st.button(
|
| 786 |
+
"Analyze Audio Sentiment", type="primary", use_container_width=True
|
| 787 |
):
|
| 788 |
if model is None:
|
| 789 |
+
st.error("Model not loaded. Cannot analyze audio.")
|
| 790 |
else:
|
| 791 |
with st.spinner("Analyzing audio sentiment..."):
|
| 792 |
audio_bytes = uploaded_audio.getvalue()
|
|
|
|
| 816 |
unsafe_allow_html=True,
|
| 817 |
)
|
| 818 |
else:
|
| 819 |
+
if input_method == "Upload Audio File":
|
| 820 |
+
st.info("Please upload an audio file to begin analysis.")
|
| 821 |
else:
|
| 822 |
+
st.info("Click the microphone button above to record audio for analysis.")
|
| 823 |
|
| 824 |
# Vision Sentiment Page
|
| 825 |
+
elif page == "Vision Sentiment":
|
| 826 |
+
st.title("Vision Sentiment Analysis")
|
| 827 |
st.markdown(
|
| 828 |
"Analyze the sentiment of your images using our fine-tuned ResNet-50 model."
|
| 829 |
)
|
| 830 |
|
| 831 |
st.info(
|
| 832 |
+
"**Note**: Images will be automatically preprocessed to match FER2013 format: face detection, grayscale conversion, and 224x224 resize (converted to 3-channel RGB)."
|
| 833 |
)
|
| 834 |
|
| 835 |
# Face cropping is set to 0% (no padding) for tightest crop
|
| 836 |
+
st.info("**Face Cropping**: Set to 0% padding for tightest crop on facial features")
|
|
|
|
|
|
|
| 837 |
|
| 838 |
# Model status
|
| 839 |
model, device, num_classes = load_vision_model()
|
| 840 |
if model is None:
|
| 841 |
+
st.error(
|
| 842 |
+
"Vision model could not be loaded. Please check the Google Drive setup."
|
| 843 |
+
)
|
| 844 |
+
st.info(
|
| 845 |
+
"Expected: Models should be configured in Google Drive and accessible via the model manager."
|
| 846 |
+
)
|
| 847 |
else:
|
| 848 |
st.success(
|
| 849 |
+
f"Vision model loaded successfully on {device} with {num_classes} classes!"
|
| 850 |
)
|
| 851 |
|
| 852 |
# Input method selection
|
| 853 |
+
st.subheader("Choose Input Method")
|
| 854 |
input_method = st.radio(
|
| 855 |
"Select how you want to provide an image:",
|
| 856 |
+
["Upload Image File", "Take Photo with Camera"],
|
| 857 |
horizontal=True,
|
| 858 |
)
|
| 859 |
|
| 860 |
+
if input_method == "Upload Image File":
|
| 861 |
# File uploader
|
| 862 |
uploaded_image = st.file_uploader(
|
| 863 |
"Choose an image file",
|
|
|
|
| 877 |
# File info
|
| 878 |
file_size = len(uploaded_image.getvalue()) / 1024 # KB
|
| 879 |
st.info(
|
| 880 |
+
f"File: {uploaded_image.name} | Size: {file_size:.1f} KB | Dimensions: {image.size[0]}x{image.size[1]}"
|
| 881 |
)
|
| 882 |
|
| 883 |
# Analyze button
|
| 884 |
if st.button(
|
| 885 |
+
"Analyze Image Sentiment", type="primary", use_container_width=True
|
| 886 |
):
|
| 887 |
if model is None:
|
| 888 |
+
st.error("Model not loaded. Cannot analyze image.")
|
| 889 |
else:
|
| 890 |
with st.spinner("Analyzing image sentiment..."):
|
| 891 |
sentiment, confidence = predict_vision_sentiment(image)
|
|
|
|
| 922 |
st.markdown(
|
| 923 |
"""
|
| 924 |
<div class="model-card">
|
| 925 |
+
<h3>Camera Capture</h3>
|
| 926 |
<p>Take a photo directly with your camera to analyze its sentiment.</p>
|
| 927 |
<p><strong>Note:</strong> Make sure your camera is accessible and you have permission to use it.</p>
|
| 928 |
</div>
|
|
|
|
| 947 |
|
| 948 |
# Image info
|
| 949 |
st.info(
|
| 950 |
+
f"Captured Photo | Dimensions: {image.size[0]}x{image.size[1]} | Format: {image.format}"
|
| 951 |
)
|
| 952 |
|
| 953 |
# Analyze button
|
| 954 |
if st.button(
|
| 955 |
+
"Analyze Photo Sentiment", type="primary", use_container_width=True
|
| 956 |
):
|
| 957 |
if model is None:
|
| 958 |
+
st.error("Model not loaded. Cannot analyze image.")
|
| 959 |
else:
|
| 960 |
with st.spinner("Analyzing photo sentiment..."):
|
| 961 |
sentiment, confidence = predict_vision_sentiment(image)
|
|
|
|
| 989 |
)
|
| 990 |
|
| 991 |
# Show info if no image is provided
|
| 992 |
+
if input_method == "Upload Image File" and "uploaded_image" not in locals():
|
| 993 |
+
st.info("Please upload an image file to begin analysis.")
|
| 994 |
+
elif input_method == "Take Photo with Camera" and "camera_photo" not in locals():
|
| 995 |
+
st.info("Click the camera button above to take a photo for analysis.")
|
| 996 |
|
| 997 |
# Fused Model Page
|
| 998 |
+
elif page == "Fused Model":
|
| 999 |
+
st.title("Fused Model Analysis")
|
| 1000 |
st.markdown(
|
| 1001 |
"Combine predictions from all three models for enhanced sentiment analysis."
|
| 1002 |
)
|
|
|
|
| 1016 |
col1, col2 = st.columns(2)
|
| 1017 |
|
| 1018 |
with col1:
|
| 1019 |
+
st.subheader("Text Input")
|
| 1020 |
text_input = st.text_area(
|
| 1021 |
"Enter text (optional):",
|
| 1022 |
height=100,
|
| 1023 |
placeholder="Type or paste your text here...",
|
| 1024 |
)
|
| 1025 |
|
| 1026 |
+
st.subheader("Audio Input")
|
| 1027 |
|
| 1028 |
# Audio preprocessing information for fused model
|
| 1029 |
st.info(
|
| 1030 |
+
"**Audio Preprocessing**: Audio will be automatically processed to match CREMA-D + RAVDESS training format: "
|
| 1031 |
"16kHz sampling rate, max 5 seconds, with automatic resampling and feature extraction."
|
| 1032 |
)
|
| 1033 |
|
| 1034 |
# Audio input method for fused model
|
| 1035 |
audio_input_method = st.radio(
|
| 1036 |
"Audio input method:",
|
| 1037 |
+
["Upload File", "Record Audio"],
|
| 1038 |
key="fused_audio_method",
|
| 1039 |
horizontal=True,
|
| 1040 |
)
|
| 1041 |
|
| 1042 |
+
if audio_input_method == "Upload File":
|
| 1043 |
uploaded_audio = st.file_uploader(
|
| 1044 |
"Upload audio file (optional):",
|
| 1045 |
type=["wav", "mp3", "m4a", "flac"],
|
|
|
|
| 1057 |
|
| 1058 |
if recorded_audio is not None:
|
| 1059 |
st.audio(recorded_audio, format="audio/wav")
|
| 1060 |
+
st.success("Audio recorded successfully!")
|
| 1061 |
uploaded_audio = recorded_audio
|
| 1062 |
audio_source = "recorded"
|
| 1063 |
audio_name = "Recorded Audio"
|
|
|
|
| 1067 |
audio_name = None
|
| 1068 |
|
| 1069 |
with col2:
|
| 1070 |
+
st.subheader("Image Input")
|
| 1071 |
|
| 1072 |
# Face cropping is set to 0% (no padding) for tightest crop
|
| 1073 |
st.info(
|
| 1074 |
+
"**Face Cropping**: Set to 0% padding for tightest crop on facial features"
|
| 1075 |
)
|
| 1076 |
|
| 1077 |
# Image input method for fused model
|
| 1078 |
image_input_method = st.radio(
|
| 1079 |
"Image input method:",
|
| 1080 |
+
["Upload File", "Take Photo"],
|
| 1081 |
key="fused_image_method",
|
| 1082 |
horizontal=True,
|
| 1083 |
)
|
| 1084 |
|
| 1085 |
+
if image_input_method == "Upload File":
|
| 1086 |
uploaded_image = st.file_uploader(
|
| 1087 |
"Upload image file (optional):",
|
| 1088 |
type=["png", "jpg", "jpeg", "bmp", "tiff"],
|
|
|
|
| 1112 |
)
|
| 1113 |
|
| 1114 |
# Analyze button
|
| 1115 |
+
if st.button("Run Fused Analysis", type="primary", use_container_width=True):
|
| 1116 |
if text_input or uploaded_audio or uploaded_image:
|
| 1117 |
with st.spinner("Running fused sentiment analysis..."):
|
| 1118 |
# Prepare inputs
|
|
|
|
| 1144 |
text_sentiment, text_conf = predict_text_sentiment(text_input)
|
| 1145 |
results_data.append(
|
| 1146 |
{
|
| 1147 |
+
"Model": "Text (TextBlob)",
|
| 1148 |
"Input": f"Text: {text_input[:50]}...",
|
| 1149 |
"Sentiment": text_sentiment,
|
| 1150 |
"Confidence": f"{text_conf:.2f}",
|
|
|
|
| 1155 |
audio_sentiment, audio_conf = predict_audio_sentiment(audio_bytes)
|
| 1156 |
results_data.append(
|
| 1157 |
{
|
| 1158 |
+
"Model": "Audio (Wav2Vec2)",
|
| 1159 |
"Input": f"Audio: {audio_name}",
|
| 1160 |
"Sentiment": audio_sentiment,
|
| 1161 |
"Confidence": f"{audio_conf:.2f}",
|
|
|
|
| 1195 |
)
|
| 1196 |
else:
|
| 1197 |
st.warning(
|
| 1198 |
+
"Please provide at least one input (text, audio, or image) for fused analysis."
|
| 1199 |
)
|
| 1200 |
|
| 1201 |
# Footer
|
debug_drive.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Debug Google Drive download issues
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import requests
|
| 8 |
+
import re
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Load .env file manually
|
| 13 |
+
def load_env():
|
| 14 |
+
env_file = Path(".env")
|
| 15 |
+
if env_file.exists():
|
| 16 |
+
with open(env_file, "r") as f:
|
| 17 |
+
for line in f:
|
| 18 |
+
line = line.strip()
|
| 19 |
+
if line and not line.startswith("#") and "=" in line:
|
| 20 |
+
key, value = line.split("=", 1)
|
| 21 |
+
os.environ[key.strip()] = value.strip().strip('"')
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_drive_bypass(file_id):
|
| 25 |
+
"""Test different bypass methods"""
|
| 26 |
+
print(f"Testing file ID: {file_id}")
|
| 27 |
+
print("=" * 50)
|
| 28 |
+
|
| 29 |
+
# Method 1: Direct bypass
|
| 30 |
+
print("\n1. Testing direct bypass...")
|
| 31 |
+
try:
|
| 32 |
+
url = f"https://drive.usercontent.google.com/download?id={file_id}&export=download&confirm=t"
|
| 33 |
+
response = requests.get(url, stream=True)
|
| 34 |
+
print(f"Status: {response.status_code}")
|
| 35 |
+
print(f"Content-Type: {response.headers.get('content-type', 'Unknown')}")
|
| 36 |
+
|
| 37 |
+
first_chunk = next(response.iter_content(chunk_size=1024), b"")
|
| 38 |
+
if first_chunk.startswith(b"<!DOCTYPE") or first_chunk.startswith(b"<html"):
|
| 39 |
+
print("❌ Still getting HTML")
|
| 40 |
+
html_content = first_chunk.decode("utf-8", errors="ignore")
|
| 41 |
+
print(f"HTML preview: {html_content[:200]}...")
|
| 42 |
+
else:
|
| 43 |
+
print("✅ Got file content!")
|
| 44 |
+
print(f"First bytes: {first_chunk[:50]}")
|
| 45 |
+
return True
|
| 46 |
+
except Exception as e:
|
| 47 |
+
print(f"❌ Error: {e}")
|
| 48 |
+
|
| 49 |
+
# Method 2: Session-based approach
|
| 50 |
+
print("\n2. Testing session-based approach...")
|
| 51 |
+
try:
|
| 52 |
+
session = requests.Session()
|
| 53 |
+
session.headers.update(
|
| 54 |
+
{
|
| 55 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
| 56 |
+
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
| 57 |
+
"Accept-Language": "en-US,en;q=0.5",
|
| 58 |
+
}
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
# First get the virus scan page
|
| 62 |
+
virus_url = f"https://drive.google.com/uc?export=download&id={file_id}"
|
| 63 |
+
response = session.get(virus_url)
|
| 64 |
+
print(f"Virus page status: {response.status_code}")
|
| 65 |
+
|
| 66 |
+
# Extract confirm and UUID
|
| 67 |
+
html_content = response.text
|
| 68 |
+
confirm_match = re.search(r'name="confirm" value="([^"]+)"', html_content)
|
| 69 |
+
uuid_match = re.search(r'name="uuid" value="([^"]+)"', html_content)
|
| 70 |
+
|
| 71 |
+
if confirm_match and uuid_match:
|
| 72 |
+
confirm_value = confirm_match.group(1)
|
| 73 |
+
uuid_value = uuid_match.group(1)
|
| 74 |
+
print(f"Found confirm: {confirm_value}")
|
| 75 |
+
print(f"Found UUID: {uuid_value}")
|
| 76 |
+
|
| 77 |
+
# Submit form
|
| 78 |
+
form_data = {
|
| 79 |
+
"id": file_id,
|
| 80 |
+
"export": "download",
|
| 81 |
+
"confirm": confirm_value,
|
| 82 |
+
"uuid": uuid_value,
|
| 83 |
+
}
|
| 84 |
+
form_url = "https://drive.usercontent.google.com/download"
|
| 85 |
+
response = session.post(form_url, data=form_data, stream=True)
|
| 86 |
+
|
| 87 |
+
print(f"Form submission status: {response.status_code}")
|
| 88 |
+
first_chunk = next(response.iter_content(chunk_size=1024), b"")
|
| 89 |
+
|
| 90 |
+
if first_chunk.startswith(b"<!DOCTYPE") or first_chunk.startswith(b"<html"):
|
| 91 |
+
print("❌ Form submission still returned HTML")
|
| 92 |
+
else:
|
| 93 |
+
print("✅ Form submission successful!")
|
| 94 |
+
return True
|
| 95 |
+
else:
|
| 96 |
+
print("❌ Could not extract confirm/UUID")
|
| 97 |
+
print(f"HTML preview: {html_content[:300]}...")
|
| 98 |
+
|
| 99 |
+
except Exception as e:
|
| 100 |
+
print(f"❌ Error: {e}")
|
| 101 |
+
|
| 102 |
+
# Method 3: Extract download URL from file page
|
| 103 |
+
print("\n3. Testing file page extraction...")
|
| 104 |
+
try:
|
| 105 |
+
session = requests.Session()
|
| 106 |
+
session.headers.update(
|
| 107 |
+
{
|
| 108 |
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
|
| 109 |
+
"Referer": "https://drive.google.com/",
|
| 110 |
+
}
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
file_url = f"https://drive.google.com/file/d/{file_id}/view"
|
| 114 |
+
response = session.get(file_url)
|
| 115 |
+
print(f"File page status: {response.status_code}")
|
| 116 |
+
|
| 117 |
+
if response.status_code == 200:
|
| 118 |
+
# Look for download URL in the page
|
| 119 |
+
download_match = re.search(r'"downloadUrl":"([^"]+)"', response.text)
|
| 120 |
+
if download_match:
|
| 121 |
+
download_url = (
|
| 122 |
+
download_match.group(1)
|
| 123 |
+
.replace("\\u003d", "=")
|
| 124 |
+
.replace("\\u0026", "&")
|
| 125 |
+
)
|
| 126 |
+
print(f"Found download URL: {download_url}")
|
| 127 |
+
|
| 128 |
+
# Try downloading from this URL
|
| 129 |
+
response = session.get(download_url, stream=True)
|
| 130 |
+
first_chunk = next(response.iter_content(chunk_size=1024), b"")
|
| 131 |
+
|
| 132 |
+
if first_chunk.startswith(b"<!DOCTYPE") or first_chunk.startswith(
|
| 133 |
+
b"<html"
|
| 134 |
+
):
|
| 135 |
+
print("❌ Download URL still returned HTML")
|
| 136 |
+
else:
|
| 137 |
+
print("✅ Download URL successful!")
|
| 138 |
+
return True
|
| 139 |
+
else:
|
| 140 |
+
print("❌ Could not find download URL in page")
|
| 141 |
+
else:
|
| 142 |
+
print(f"❌ Could not access file page")
|
| 143 |
+
|
| 144 |
+
except Exception as e:
|
| 145 |
+
print(f"❌ Error: {e}")
|
| 146 |
+
|
| 147 |
+
print("\n❌ All methods failed")
|
| 148 |
+
return False
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def main():
|
| 152 |
+
print("Google Drive Bypass Debug Tool")
|
| 153 |
+
print("=" * 50)
|
| 154 |
+
|
| 155 |
+
# Load environment variables
|
| 156 |
+
load_env()
|
| 157 |
+
|
| 158 |
+
# Get file ID from environment or user input
|
| 159 |
+
vision_url = os.getenv("VISION_MODEL_DRIVE_LINK", "")
|
| 160 |
+
audio_url = os.getenv("AUDIO_MODEL_DRIVE_LINK", "")
|
| 161 |
+
|
| 162 |
+
if not vision_url and not audio_url:
|
| 163 |
+
print("❌ No environment variables found!")
|
| 164 |
+
print("Please set VISION_MODEL_DRIVE_LINK or AUDIO_MODEL_DRIVE_LINK")
|
| 165 |
+
return
|
| 166 |
+
|
| 167 |
+
if vision_url:
|
| 168 |
+
print(f"\nTesting Vision Model URL: {vision_url}")
|
| 169 |
+
if "/file/d/" in vision_url:
|
| 170 |
+
file_id = vision_url.split("/file/d/")[1].split("/")[0]
|
| 171 |
+
test_drive_bypass(file_id)
|
| 172 |
+
else:
|
| 173 |
+
print("❌ Invalid vision model URL format")
|
| 174 |
+
|
| 175 |
+
if audio_url:
|
| 176 |
+
print(f"\nTesting Audio Model URL: {audio_url}")
|
| 177 |
+
if "/file/d/" in audio_url:
|
| 178 |
+
file_id = audio_url.split("/file/d/")[1].split("/")[0]
|
| 179 |
+
test_drive_bypass(file_id)
|
| 180 |
+
else:
|
| 181 |
+
print("❌ Invalid audio model URL format")
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
if __name__ == "__main__":
|
| 185 |
+
main()
|
{models → notebooks}/audio_sentiment_analysis.ipynb
RENAMED
|
File without changes
|
{models → notebooks}/vision_sentiment_analysis.ipynb
RENAMED
|
File without changes
|
pyproject.toml
CHANGED
|
@@ -4,4 +4,7 @@ version = "0.1.0"
|
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
requires-python = ">=3.9"
|
| 7 |
-
dependencies = [
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
| 6 |
requires-python = ">=3.9"
|
| 7 |
+
dependencies = [
|
| 8 |
+
"gdown>=5.2.0",
|
| 9 |
+
"python-dotenv>=1.1.1",
|
| 10 |
+
]
|
requirements.txt
CHANGED
|
@@ -3,10 +3,13 @@ pandas>=1.5.0
|
|
| 3 |
Pillow>=9.0.0
|
| 4 |
numpy>=1.21.0
|
| 5 |
textblob>=0.17.0
|
| 6 |
-
torch>=
|
| 7 |
-
torchvision>=0.
|
| 8 |
transformers>=4.21.0
|
| 9 |
librosa>=0.9.0
|
| 10 |
soundfile>=0.12.0
|
| 11 |
opencv-python>=4.5.0
|
| 12 |
accelerate>=0.20.0
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
Pillow>=9.0.0
|
| 4 |
numpy>=1.21.0
|
| 5 |
textblob>=0.17.0
|
| 6 |
+
torch>=2.0.0
|
| 7 |
+
torchvision>=0.15.0
|
| 8 |
transformers>=4.21.0
|
| 9 |
librosa>=0.9.0
|
| 10 |
soundfile>=0.12.0
|
| 11 |
opencv-python>=4.5.0
|
| 12 |
accelerate>=0.20.0
|
| 13 |
+
requests>=2.25.0
|
| 14 |
+
python-dotenv>=0.19.0
|
| 15 |
+
gdown>=4.7.0
|
simple_model_manager.py
ADDED
|
@@ -0,0 +1,410 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Simple Model Manager - Downloads models from Google Drive using gdown
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import gdown
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
import logging
|
| 10 |
+
from typing import Tuple, Any
|
| 11 |
+
import torch
|
| 12 |
+
import torch.nn as nn
|
| 13 |
+
from torchvision import models
|
| 14 |
+
|
| 15 |
+
# Try to load .env file if it exists
|
| 16 |
+
try:
|
| 17 |
+
from dotenv import load_dotenv
|
| 18 |
+
|
| 19 |
+
load_dotenv()
|
| 20 |
+
except ImportError:
|
| 21 |
+
# If python-dotenv is not installed, try to load .env manually
|
| 22 |
+
env_file = Path(".env")
|
| 23 |
+
if env_file.exists():
|
| 24 |
+
with open(env_file, "r") as f:
|
| 25 |
+
for line in f:
|
| 26 |
+
line = line.strip()
|
| 27 |
+
if line and not line.startswith("#") and "=" in line:
|
| 28 |
+
key, value = line.split("=", 1)
|
| 29 |
+
os.environ[key.strip()] = value.strip()
|
| 30 |
+
|
| 31 |
+
# Configure logging
|
| 32 |
+
logging.basicConfig(level=logging.INFO)
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class SimpleModelManager:
|
| 37 |
+
"""Simple model manager that downloads models from Google Drive using gdown"""
|
| 38 |
+
|
| 39 |
+
def __init__(self, model_dir: str = "models", cache_models: bool = True):
|
| 40 |
+
"""
|
| 41 |
+
Initialize simple model manager
|
| 42 |
+
|
| 43 |
+
Args:
|
| 44 |
+
model_dir: Local directory to store models
|
| 45 |
+
cache_models: Whether to cache models locally
|
| 46 |
+
"""
|
| 47 |
+
self.model_dir = Path(model_dir)
|
| 48 |
+
self.model_dir.mkdir(exist_ok=True)
|
| 49 |
+
self.cache_models = cache_models
|
| 50 |
+
|
| 51 |
+
# Load model links from environment variables
|
| 52 |
+
self.model_links = {
|
| 53 |
+
"vision": {
|
| 54 |
+
"url": os.getenv("VISION_MODEL_DRIVE_ID", ""),
|
| 55 |
+
"filename": os.getenv("VISION_MODEL_FILENAME", "resnet50_model.pth"),
|
| 56 |
+
"description": "Vision sentiment analysis model",
|
| 57 |
+
},
|
| 58 |
+
"audio": {
|
| 59 |
+
"url": os.getenv("AUDIO_MODEL_DRIVE_ID", ""),
|
| 60 |
+
"filename": os.getenv("AUDIO_MODEL_FILENAME", "wav2vec2_model.pth"),
|
| 61 |
+
"description": "Audio sentiment analysis model",
|
| 62 |
+
},
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
# Validate that environment variables are set
|
| 66 |
+
self._validate_environment()
|
| 67 |
+
|
| 68 |
+
def _validate_environment(self):
|
| 69 |
+
"""Validate that required environment variables are set"""
|
| 70 |
+
missing_vars = []
|
| 71 |
+
|
| 72 |
+
if not self.model_links["vision"]["url"]:
|
| 73 |
+
missing_vars.append("VISION_MODEL_DRIVE_LINK")
|
| 74 |
+
|
| 75 |
+
if not self.model_links["audio"]["url"]:
|
| 76 |
+
missing_vars.append("AUDIO_MODEL_DRIVE_LINK")
|
| 77 |
+
|
| 78 |
+
if missing_vars:
|
| 79 |
+
logger.warning(f"Missing environment variables: {', '.join(missing_vars)}")
|
| 80 |
+
logger.warning("Please set these in your .env file or environment")
|
| 81 |
+
logger.warning("Models will not be available until these are configured")
|
| 82 |
+
|
| 83 |
+
def download_from_google_drive(self, share_url: str, filename: str) -> str:
|
| 84 |
+
"""
|
| 85 |
+
Download file from Google Drive share link using gdown
|
| 86 |
+
|
| 87 |
+
Args:
|
| 88 |
+
share_url: Google Drive share link
|
| 89 |
+
filename: Name to save the file as
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
Path to downloaded file
|
| 93 |
+
"""
|
| 94 |
+
try:
|
| 95 |
+
local_path = self.model_dir / filename
|
| 96 |
+
|
| 97 |
+
if local_path.exists() and self.cache_models:
|
| 98 |
+
logger.info(f"Model already cached: {local_path}")
|
| 99 |
+
return str(local_path)
|
| 100 |
+
|
| 101 |
+
logger.info(f"Downloading {filename} from Google Drive using gdown...")
|
| 102 |
+
|
| 103 |
+
# Use gdown to download the file
|
| 104 |
+
# gdown automatically handles virus scan warnings and other Google Drive issues
|
| 105 |
+
output_path = str(local_path)
|
| 106 |
+
|
| 107 |
+
# Download with progress bar
|
| 108 |
+
gdown.download(
|
| 109 |
+
id=share_url,
|
| 110 |
+
output=output_path,
|
| 111 |
+
quiet=False, # Show progress bar
|
| 112 |
+
fuzzy=True, # Handle various Google Drive URL formats
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
# Verify the file was downloaded
|
| 116 |
+
if not Path(output_path).exists():
|
| 117 |
+
raise FileNotFoundError(f"Download failed: {output_path} not found")
|
| 118 |
+
|
| 119 |
+
file_size = Path(output_path).stat().st_size
|
| 120 |
+
if file_size == 0:
|
| 121 |
+
raise ValueError(f"Downloaded file is empty: {output_path}")
|
| 122 |
+
|
| 123 |
+
logger.info(f"Successfully downloaded {filename} ({file_size} bytes)")
|
| 124 |
+
return output_path
|
| 125 |
+
|
| 126 |
+
except Exception as e:
|
| 127 |
+
logger.error(f"Failed to download {filename}: {e}")
|
| 128 |
+
raise
|
| 129 |
+
|
| 130 |
+
def load_vision_model(self) -> Tuple[Any, torch.device, int]:
|
| 131 |
+
"""Load vision sentiment model"""
|
| 132 |
+
try:
|
| 133 |
+
model_info = self.model_links["vision"]
|
| 134 |
+
|
| 135 |
+
# Check if URL is configured
|
| 136 |
+
if not model_info["url"]:
|
| 137 |
+
raise ValueError("VISION_MODEL_DRIVE_LINK environment variable not set")
|
| 138 |
+
|
| 139 |
+
model_path = self.download_from_google_drive(
|
| 140 |
+
model_info["url"], model_info["filename"]
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Validate the downloaded file
|
| 144 |
+
if not Path(model_path).exists():
|
| 145 |
+
raise FileNotFoundError(f"Model file not found at {model_path}")
|
| 146 |
+
|
| 147 |
+
file_size = Path(model_path).stat().st_size
|
| 148 |
+
if file_size == 0:
|
| 149 |
+
raise ValueError(f"Model file is empty: {model_path}")
|
| 150 |
+
|
| 151 |
+
# Check file header to see what type of file it is
|
| 152 |
+
with open(model_path, "rb") as f:
|
| 153 |
+
header = f.read(100) # Read first 100 bytes
|
| 154 |
+
|
| 155 |
+
logger.info(f"File size: {file_size} bytes")
|
| 156 |
+
logger.info(f"File header (first 100 bytes): {header[:50]}...")
|
| 157 |
+
|
| 158 |
+
# Try to detect file type
|
| 159 |
+
if header.startswith(b"<"):
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f"File appears to be HTML/XML, not a PyTorch model: {model_path}"
|
| 162 |
+
)
|
| 163 |
+
elif header.startswith(b"\x89PNG"):
|
| 164 |
+
raise ValueError(f"File appears to be a PNG image: {model_path}")
|
| 165 |
+
elif header.startswith(b"\xff\xd8\xff"):
|
| 166 |
+
raise ValueError(f"File appears to be a JPEG image: {model_path}")
|
| 167 |
+
|
| 168 |
+
# For any other file type (including ZIP), try to load it directly as a PyTorch model
|
| 169 |
+
logger.info(
|
| 170 |
+
f"File appears to be a PyTorch model file, attempting to load directly..."
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Load the model
|
| 174 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 175 |
+
try:
|
| 176 |
+
# Try loading the file directly as a PyTorch model
|
| 177 |
+
checkpoint = torch.load(
|
| 178 |
+
model_path, map_location=device, weights_only=False
|
| 179 |
+
)
|
| 180 |
+
logger.info("Successfully loaded model file directly")
|
| 181 |
+
except Exception as load_error:
|
| 182 |
+
logger.error(f"Failed to load model directly: {load_error}")
|
| 183 |
+
try:
|
| 184 |
+
# Try with weights only as fallback
|
| 185 |
+
checkpoint = torch.load(
|
| 186 |
+
model_path, map_location=device, weights_only=True
|
| 187 |
+
)
|
| 188 |
+
logger.info("Loaded with weights_only=True (weights only)")
|
| 189 |
+
except Exception as fallback_error:
|
| 190 |
+
logger.error(
|
| 191 |
+
f"Failed to load with weights_only=True: {fallback_error}"
|
| 192 |
+
)
|
| 193 |
+
raise ValueError(
|
| 194 |
+
f"Cannot load model file {model_path}. File may be corrupted or in wrong format."
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
# Initialize ResNet-50 model
|
| 198 |
+
model = models.resnet50(weights=None)
|
| 199 |
+
num_ftrs = model.fc.in_features
|
| 200 |
+
|
| 201 |
+
# Determine number of classes from checkpoint
|
| 202 |
+
if "fc.weight" in checkpoint:
|
| 203 |
+
num_classes = checkpoint["fc.weight"].shape[0]
|
| 204 |
+
else:
|
| 205 |
+
num_classes = 3 # Default fallback
|
| 206 |
+
|
| 207 |
+
model.fc = nn.Linear(num_ftrs, num_classes)
|
| 208 |
+
model.load_state_dict(checkpoint)
|
| 209 |
+
model.to(device)
|
| 210 |
+
model.eval()
|
| 211 |
+
|
| 212 |
+
logger.info(f"Vision model loaded successfully with {num_classes} classes!")
|
| 213 |
+
return model, device, num_classes
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
logger.error(f"Failed to load vision model: {e}")
|
| 217 |
+
raise
|
| 218 |
+
|
| 219 |
+
def load_audio_model(self) -> Tuple[Any, torch.device]:
|
| 220 |
+
"""Load audio sentiment model"""
|
| 221 |
+
try:
|
| 222 |
+
model_info = self.model_links["audio"]
|
| 223 |
+
|
| 224 |
+
# Check if URL is configured
|
| 225 |
+
if not model_info["url"]:
|
| 226 |
+
raise ValueError("AUDIO_MODEL_DRIVE_LINK environment variable not set")
|
| 227 |
+
|
| 228 |
+
model_path = self.download_from_google_drive(
|
| 229 |
+
model_info["url"], model_info["filename"]
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
# Validate the downloaded file
|
| 233 |
+
if not Path(model_path).exists():
|
| 234 |
+
raise FileNotFoundError(f"Model file not found at {model_path}")
|
| 235 |
+
|
| 236 |
+
file_size = Path(model_path).stat().st_size
|
| 237 |
+
if file_size == 0:
|
| 238 |
+
raise ValueError(f"Model file is empty: {model_path}")
|
| 239 |
+
|
| 240 |
+
# Check file header to see what type of file it is
|
| 241 |
+
with open(model_path, "rb") as f:
|
| 242 |
+
header = f.read(100) # Read first 100 bytes
|
| 243 |
+
|
| 244 |
+
logger.info(f"File size: {file_size} bytes")
|
| 245 |
+
logger.info(f"File header (first 100 bytes): {header[:50]}...")
|
| 246 |
+
|
| 247 |
+
# Try to detect file type
|
| 248 |
+
if header.startswith(b"<"):
|
| 249 |
+
raise ValueError(
|
| 250 |
+
f"File appears to be HTML/XML, not a PyTorch model: {model_path}"
|
| 251 |
+
)
|
| 252 |
+
elif header.startswith(b"\x89PNG"):
|
| 253 |
+
raise ValueError(f"File appears to be a PNG image: {model_path}")
|
| 254 |
+
elif header.startswith(b"\xff\xd8\xff"):
|
| 255 |
+
raise ValueError(f"File appears to be a JPEG image: {model_path}")
|
| 256 |
+
|
| 257 |
+
# For any other file type (including ZIP), try to load it directly as a PyTorch model
|
| 258 |
+
logger.info(
|
| 259 |
+
f"File appears to be a PyTorch model file, attempting to load directly..."
|
| 260 |
+
)
|
| 261 |
+
|
| 262 |
+
# Load the model
|
| 263 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 264 |
+
try:
|
| 265 |
+
# Try loading the file directly as a PyTorch model
|
| 266 |
+
checkpoint = torch.load(
|
| 267 |
+
model_path, map_location=device, weights_only=False
|
| 268 |
+
)
|
| 269 |
+
logger.info("Successfully loaded model file directly")
|
| 270 |
+
except Exception as load_error:
|
| 271 |
+
logger.error(f"Failed to load model directly: {load_error}")
|
| 272 |
+
try:
|
| 273 |
+
# Try with weights only as fallback
|
| 274 |
+
checkpoint = torch.load(
|
| 275 |
+
model_path, map_location=device, weights_only=True
|
| 276 |
+
)
|
| 277 |
+
logger.info("Loaded with weights_only=True (weights only)")
|
| 278 |
+
except Exception as fallback_error:
|
| 279 |
+
logger.error(
|
| 280 |
+
f"Failed to load with weights_only=True: {fallback_error}"
|
| 281 |
+
)
|
| 282 |
+
raise ValueError(
|
| 283 |
+
f"Cannot load model file {model_path}. File may be corrupted or in wrong format."
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Check if we have a state dict or a full model
|
| 287 |
+
if isinstance(checkpoint, dict) and "classifier.weight" in checkpoint:
|
| 288 |
+
# This is a state dictionary - we need to initialize the model first
|
| 289 |
+
from transformers import AutoModelForAudioClassification
|
| 290 |
+
|
| 291 |
+
# Determine number of classes from checkpoint
|
| 292 |
+
if "classifier.weight" in checkpoint:
|
| 293 |
+
num_classes = checkpoint["classifier.weight"].shape[0]
|
| 294 |
+
else:
|
| 295 |
+
num_classes = 3 # Default fallback
|
| 296 |
+
|
| 297 |
+
# Initialize Wav2Vec2 model with the correct number of classes
|
| 298 |
+
model = AutoModelForAudioClassification.from_pretrained(
|
| 299 |
+
"facebook/wav2vec2-base", num_labels=num_classes
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
# Load the state dictionary
|
| 303 |
+
model.load_state_dict(checkpoint)
|
| 304 |
+
model.to(device)
|
| 305 |
+
model.eval()
|
| 306 |
+
|
| 307 |
+
logger.info(
|
| 308 |
+
f"Audio model loaded successfully with {num_classes} classes!"
|
| 309 |
+
)
|
| 310 |
+
return model, device
|
| 311 |
+
else:
|
| 312 |
+
# This is a full model object
|
| 313 |
+
model = checkpoint
|
| 314 |
+
model.to(device)
|
| 315 |
+
model.eval()
|
| 316 |
+
|
| 317 |
+
logger.info("Audio model loaded successfully!")
|
| 318 |
+
return model, device
|
| 319 |
+
|
| 320 |
+
except Exception as e:
|
| 321 |
+
logger.error(f"Failed to load audio model: {e}")
|
| 322 |
+
raise
|
| 323 |
+
|
| 324 |
+
def update_model_links(self, vision_url: str = None, audio_url: str = None):
|
| 325 |
+
"""Update Google Drive URLs for models (optional override)"""
|
| 326 |
+
if vision_url:
|
| 327 |
+
self.model_links["vision"]["url"] = vision_url
|
| 328 |
+
if audio_url:
|
| 329 |
+
self.model_links["audio"]["url"] = audio_url
|
| 330 |
+
|
| 331 |
+
# Update environment variables if provided
|
| 332 |
+
if vision_url:
|
| 333 |
+
os.environ["VISION_MODEL_DRIVE_LINK"] = vision_url
|
| 334 |
+
if audio_url:
|
| 335 |
+
os.environ["AUDIO_MODEL_DRIVE_LINK"] = audio_url
|
| 336 |
+
|
| 337 |
+
logger.info("Model links updated!")
|
| 338 |
+
|
| 339 |
+
def list_cached_models(self) -> list:
|
| 340 |
+
"""List all cached models"""
|
| 341 |
+
cached_models = []
|
| 342 |
+
for file_path in self.model_dir.glob("*.pth"):
|
| 343 |
+
cached_models.append(file_path.name)
|
| 344 |
+
return cached_models
|
| 345 |
+
|
| 346 |
+
def clear_cache(self):
|
| 347 |
+
"""Clear all cached models"""
|
| 348 |
+
for file_path in self.model_dir.glob("*.pth"):
|
| 349 |
+
file_path.unlink()
|
| 350 |
+
logger.info("Cache cleared!")
|
| 351 |
+
|
| 352 |
+
def get_model_status(self) -> dict:
|
| 353 |
+
"""Get status of all models"""
|
| 354 |
+
status = {}
|
| 355 |
+
for model_type, info in self.model_links.items():
|
| 356 |
+
status[model_type] = {
|
| 357 |
+
"configured": bool(info["url"]),
|
| 358 |
+
"filename": info["filename"],
|
| 359 |
+
"cached": (self.model_dir / info["filename"]).exists(),
|
| 360 |
+
"url": info["url"] if info["url"] else "Not configured",
|
| 361 |
+
}
|
| 362 |
+
return status
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
# Example usage
|
| 366 |
+
if __name__ == "__main__":
|
| 367 |
+
# Initialize manager
|
| 368 |
+
manager = SimpleModelManager()
|
| 369 |
+
|
| 370 |
+
# Check model status
|
| 371 |
+
status = manager.get_model_status()
|
| 372 |
+
print("Model Status:")
|
| 373 |
+
for model_type, info in status.items():
|
| 374 |
+
print(f" {model_type}: {'✅' if info['configured'] else '❌'} {info['url']}")
|
| 375 |
+
if info["cached"]:
|
| 376 |
+
print(f" 📁 Cached: {info['filename']}")
|
| 377 |
+
|
| 378 |
+
# Load models if configured
|
| 379 |
+
try:
|
| 380 |
+
if status["vision"]["configured"]:
|
| 381 |
+
vision_model, device, num_classes = manager.load_vision_model()
|
| 382 |
+
print(f"✅ Vision model loaded: {num_classes} classes")
|
| 383 |
+
else:
|
| 384 |
+
print("❌ Vision model not configured")
|
| 385 |
+
|
| 386 |
+
if status["audio"]["configured"]:
|
| 387 |
+
audio_model, device = manager.load_audio_model()
|
| 388 |
+
print("✅ Audio model loaded")
|
| 389 |
+
else:
|
| 390 |
+
print("❌ Audio model not configured")
|
| 391 |
+
|
| 392 |
+
if status["vision"]["configured"] and status["audio"]["configured"]:
|
| 393 |
+
print("\n🎉 All models loaded successfully!")
|
| 394 |
+
else:
|
| 395 |
+
print("\n⚠️ Some models are not configured")
|
| 396 |
+
print("Please set the following environment variables:")
|
| 397 |
+
print(" VISION_MODEL_DRIVE_LINK")
|
| 398 |
+
print(" AUDIO_MODEL_DRIVE_LINK")
|
| 399 |
+
|
| 400 |
+
except Exception as e:
|
| 401 |
+
print(f"Error loading models: {e}")
|
| 402 |
+
print("\nFor folder structures:")
|
| 403 |
+
print(" 1. Navigate to each subfolder (Audio/Vision)")
|
| 404 |
+
print(" 2. Right-click on each .pth file")
|
| 405 |
+
print(" 3. Share -> Copy link")
|
| 406 |
+
print(" 4. Use those direct file links instead of folder links")
|
| 407 |
+
print("\nNote: Downloaded files are used directly as PyTorch models.")
|
| 408 |
+
print("\nOr set environment variables in your .env file:")
|
| 409 |
+
print(" VISION_MODEL_DRIVE_ID=your_vision_model_file_id")
|
| 410 |
+
print(" AUDIO_MODEL_DRIVE_ID=your_audio_model_file_id")
|
test_download.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test the updated Google Drive download function
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from simple_model_manager import SimpleModelManager
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def test_download():
|
| 10 |
+
"""Test the download function"""
|
| 11 |
+
print("Testing Google Drive Download Function")
|
| 12 |
+
print("=" * 50)
|
| 13 |
+
|
| 14 |
+
# Initialize manager
|
| 15 |
+
manager = SimpleModelManager()
|
| 16 |
+
|
| 17 |
+
# Check model status
|
| 18 |
+
status = manager.get_model_status()
|
| 19 |
+
print("Model Status:")
|
| 20 |
+
for model_type, info in status.items():
|
| 21 |
+
print(f" {model_type}: {'✅' if info['configured'] else '❌'} {info['url']}")
|
| 22 |
+
if info["cached"]:
|
| 23 |
+
print(f" 📁 Cached: {info['filename']}")
|
| 24 |
+
|
| 25 |
+
# Test vision model download
|
| 26 |
+
if status["vision"]["configured"]:
|
| 27 |
+
print(f"\nTesting vision model download...")
|
| 28 |
+
try:
|
| 29 |
+
vision_model, device, num_classes = manager.load_vision_model()
|
| 30 |
+
print(f"✅ Vision model loaded: {num_classes} classes")
|
| 31 |
+
except Exception as e:
|
| 32 |
+
print(f"❌ Vision model failed: {e}")
|
| 33 |
+
else:
|
| 34 |
+
print("❌ Vision model not configured")
|
| 35 |
+
|
| 36 |
+
# Test audio model download
|
| 37 |
+
if status["audio"]["configured"]:
|
| 38 |
+
print(f"\nTesting audio model download...")
|
| 39 |
+
try:
|
| 40 |
+
audio_model, device = manager.load_audio_model()
|
| 41 |
+
print(f"✅ Audio model loaded")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"❌ Audio model failed: {e}")
|
| 44 |
+
else:
|
| 45 |
+
print("❌ Audio model not configured")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
if __name__ == "__main__":
|
| 49 |
+
test_download()
|
test_drive_links.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test Google Drive links directly to see what's being returned
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import requests
|
| 7 |
+
import os
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def test_drive_link(url, filename):
|
| 12 |
+
"""Test a Google Drive link directly"""
|
| 13 |
+
print(f"\nTesting: {filename}")
|
| 14 |
+
print(f"URL: {url}")
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
# Convert Google Drive share link to direct download link
|
| 18 |
+
if "drive.google.com" in url:
|
| 19 |
+
if "/file/d/" in url:
|
| 20 |
+
file_id = url.split("/file/d/")[1].split("/")[0]
|
| 21 |
+
elif "id=" in url:
|
| 22 |
+
file_id = url.split("id=")[1].split("&")[0]
|
| 23 |
+
else:
|
| 24 |
+
print("❌ Could not extract file ID")
|
| 25 |
+
return
|
| 26 |
+
|
| 27 |
+
direct_url = f"https://drive.google.com/uc?export=download&id={file_id}"
|
| 28 |
+
print(f"Direct URL: {direct_url}")
|
| 29 |
+
else:
|
| 30 |
+
direct_url = url
|
| 31 |
+
|
| 32 |
+
# Test the download
|
| 33 |
+
print("Downloading...")
|
| 34 |
+
response = requests.get(direct_url, stream=True)
|
| 35 |
+
|
| 36 |
+
print(f"Status Code: {response.status_code}")
|
| 37 |
+
print(f"Content-Type: {response.headers.get('content-type', 'Unknown')}")
|
| 38 |
+
print(f"Content-Length: {response.headers.get('content-length', 'Unknown')}")
|
| 39 |
+
|
| 40 |
+
if response.status_code == 200:
|
| 41 |
+
# Read first 200 bytes to check content
|
| 42 |
+
content = response.raw.read(200)
|
| 43 |
+
print(f"First 200 bytes: {content[:100]}...")
|
| 44 |
+
|
| 45 |
+
# Check if it's HTML
|
| 46 |
+
if content.startswith(b"<!DOCTYPE") or content.startswith(b"<html"):
|
| 47 |
+
print("❌ ERROR: This is an HTML page, not a model file!")
|
| 48 |
+
print(" Your Google Drive link is not working properly")
|
| 49 |
+
print(" Check file permissions and sharing settings")
|
| 50 |
+
else:
|
| 51 |
+
print("✅ Looks like a valid file (not HTML)")
|
| 52 |
+
|
| 53 |
+
# Save a small sample to check
|
| 54 |
+
sample_path = f"sample_{filename}"
|
| 55 |
+
with open(sample_path, "wb") as f:
|
| 56 |
+
f.write(content)
|
| 57 |
+
print(f"Saved sample to: {sample_path}")
|
| 58 |
+
|
| 59 |
+
else:
|
| 60 |
+
print(f"❌ Download failed with status: {response.status_code}")
|
| 61 |
+
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"❌ Error: {e}")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def main():
|
| 67 |
+
print("Google Drive Link Tester")
|
| 68 |
+
print("=" * 50)
|
| 69 |
+
|
| 70 |
+
# Check environment variables
|
| 71 |
+
vision_url = os.getenv("VISION_MODEL_DRIVE_LINK")
|
| 72 |
+
audio_url = os.getenv("AUDIO_MODEL_DRIVE_LINK")
|
| 73 |
+
|
| 74 |
+
if not vision_url and not audio_url:
|
| 75 |
+
print("❌ No environment variables found!")
|
| 76 |
+
print("Please run setup_env.py first or set:")
|
| 77 |
+
print(" VISION_MODEL_DRIVE_LINK")
|
| 78 |
+
print(" AUDIO_MODEL_DRIVE_LINK")
|
| 79 |
+
return
|
| 80 |
+
|
| 81 |
+
if vision_url:
|
| 82 |
+
test_drive_link(vision_url, "resnet50_model.pth")
|
| 83 |
+
|
| 84 |
+
if audio_url:
|
| 85 |
+
test_drive_link(audio_url, "wav2vec2_model.pth")
|
| 86 |
+
|
| 87 |
+
print("\n" + "=" * 50)
|
| 88 |
+
print("If you see HTML content, your Google Drive links need fixing!")
|
| 89 |
+
print("Make sure:")
|
| 90 |
+
print(" 1. Files are set to 'Anyone with the link can view'")
|
| 91 |
+
print(" 2. You're using direct file links, not folder links")
|
| 92 |
+
print(" 3. Files are not too large for direct download")
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
main()
|
uv.lock
CHANGED
|
@@ -2,7 +2,233 @@ version = 1
|
|
| 2 |
revision = 2
|
| 3 |
requires-python = ">=3.9"
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
[[package]]
|
| 6 |
name = "sentiment-fused"
|
| 7 |
version = "0.1.0"
|
| 8 |
source = { virtual = "." }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
revision = 2
|
| 3 |
requires-python = ">=3.9"
|
| 4 |
|
| 5 |
+
[[package]]
|
| 6 |
+
name = "beautifulsoup4"
|
| 7 |
+
version = "4.13.5"
|
| 8 |
+
source = { registry = "https://pypi.org/simple" }
|
| 9 |
+
dependencies = [
|
| 10 |
+
{ name = "soupsieve" },
|
| 11 |
+
{ name = "typing-extensions" },
|
| 12 |
+
]
|
| 13 |
+
sdist = { url = "https://files.pythonhosted.org/packages/85/2e/3e5079847e653b1f6dc647aa24549d68c6addb4c595cc0d902d1b19308ad/beautifulsoup4-4.13.5.tar.gz", hash = "sha256:5e70131382930e7c3de33450a2f54a63d5e4b19386eab43a5b34d594268f3695", size = 622954, upload_time = "2025-08-24T14:06:13.168Z" }
|
| 14 |
+
wheels = [
|
| 15 |
+
{ url = "https://files.pythonhosted.org/packages/04/eb/f4151e0c7377a6e08a38108609ba5cede57986802757848688aeedd1b9e8/beautifulsoup4-4.13.5-py3-none-any.whl", hash = "sha256:642085eaa22233aceadff9c69651bc51e8bf3f874fb6d7104ece2beb24b47c4a", size = 105113, upload_time = "2025-08-24T14:06:14.884Z" },
|
| 16 |
+
]
|
| 17 |
+
|
| 18 |
+
[[package]]
|
| 19 |
+
name = "certifi"
|
| 20 |
+
version = "2025.8.3"
|
| 21 |
+
source = { registry = "https://pypi.org/simple" }
|
| 22 |
+
sdist = { url = "https://files.pythonhosted.org/packages/dc/67/960ebe6bf230a96cda2e0abcf73af550ec4f090005363542f0765df162e0/certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407", size = 162386, upload_time = "2025-08-03T03:07:47.08Z" }
|
| 23 |
+
wheels = [
|
| 24 |
+
{ url = "https://files.pythonhosted.org/packages/e5/48/1549795ba7742c948d2ad169c1c8cdbae65bc450d6cd753d124b17c8cd32/certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5", size = 161216, upload_time = "2025-08-03T03:07:45.777Z" },
|
| 25 |
+
]
|
| 26 |
+
|
| 27 |
+
[[package]]
|
| 28 |
+
name = "charset-normalizer"
|
| 29 |
+
version = "3.4.3"
|
| 30 |
+
source = { registry = "https://pypi.org/simple" }
|
| 31 |
+
sdist = { url = "https://files.pythonhosted.org/packages/83/2d/5fd176ceb9b2fc619e63405525573493ca23441330fcdaee6bef9460e924/charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14", size = 122371, upload_time = "2025-08-09T07:57:28.46Z" }
|
| 32 |
+
wheels = [
|
| 33 |
+
{ url = "https://files.pythonhosted.org/packages/d6/98/f3b8013223728a99b908c9344da3aa04ee6e3fa235f19409033eda92fb78/charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72", size = 207695, upload_time = "2025-08-09T07:55:36.452Z" },
|
| 34 |
+
{ url = "https://files.pythonhosted.org/packages/21/40/5188be1e3118c82dcb7c2a5ba101b783822cfb413a0268ed3be0468532de/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe", size = 147153, upload_time = "2025-08-09T07:55:38.467Z" },
|
| 35 |
+
{ url = "https://files.pythonhosted.org/packages/37/60/5d0d74bc1e1380f0b72c327948d9c2aca14b46a9efd87604e724260f384c/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601", size = 160428, upload_time = "2025-08-09T07:55:40.072Z" },
|
| 36 |
+
{ url = "https://files.pythonhosted.org/packages/85/9a/d891f63722d9158688de58d050c59dc3da560ea7f04f4c53e769de5140f5/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c", size = 157627, upload_time = "2025-08-09T07:55:41.706Z" },
|
| 37 |
+
{ url = "https://files.pythonhosted.org/packages/65/1a/7425c952944a6521a9cfa7e675343f83fd82085b8af2b1373a2409c683dc/charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2", size = 152388, upload_time = "2025-08-09T07:55:43.262Z" },
|
| 38 |
+
{ url = "https://files.pythonhosted.org/packages/f0/c9/a2c9c2a355a8594ce2446085e2ec97fd44d323c684ff32042e2a6b718e1d/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0", size = 150077, upload_time = "2025-08-09T07:55:44.903Z" },
|
| 39 |
+
{ url = "https://files.pythonhosted.org/packages/3b/38/20a1f44e4851aa1c9105d6e7110c9d020e093dfa5836d712a5f074a12bf7/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0", size = 161631, upload_time = "2025-08-09T07:55:46.346Z" },
|
| 40 |
+
{ url = "https://files.pythonhosted.org/packages/a4/fa/384d2c0f57edad03d7bec3ebefb462090d8905b4ff5a2d2525f3bb711fac/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0", size = 159210, upload_time = "2025-08-09T07:55:47.539Z" },
|
| 41 |
+
{ url = "https://files.pythonhosted.org/packages/33/9e/eca49d35867ca2db336b6ca27617deed4653b97ebf45dfc21311ce473c37/charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a", size = 153739, upload_time = "2025-08-09T07:55:48.744Z" },
|
| 42 |
+
{ url = "https://files.pythonhosted.org/packages/2a/91/26c3036e62dfe8de8061182d33be5025e2424002125c9500faff74a6735e/charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f", size = 99825, upload_time = "2025-08-09T07:55:50.305Z" },
|
| 43 |
+
{ url = "https://files.pythonhosted.org/packages/e2/c6/f05db471f81af1fa01839d44ae2a8bfeec8d2a8b4590f16c4e7393afd323/charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669", size = 107452, upload_time = "2025-08-09T07:55:51.461Z" },
|
| 44 |
+
{ url = "https://files.pythonhosted.org/packages/7f/b5/991245018615474a60965a7c9cd2b4efbaabd16d582a5547c47ee1c7730b/charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b", size = 204483, upload_time = "2025-08-09T07:55:53.12Z" },
|
| 45 |
+
{ url = "https://files.pythonhosted.org/packages/c7/2a/ae245c41c06299ec18262825c1569c5d3298fc920e4ddf56ab011b417efd/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64", size = 145520, upload_time = "2025-08-09T07:55:54.712Z" },
|
| 46 |
+
{ url = "https://files.pythonhosted.org/packages/3a/a4/b3b6c76e7a635748c4421d2b92c7b8f90a432f98bda5082049af37ffc8e3/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91", size = 158876, upload_time = "2025-08-09T07:55:56.024Z" },
|
| 47 |
+
{ url = "https://files.pythonhosted.org/packages/e2/e6/63bb0e10f90a8243c5def74b5b105b3bbbfb3e7bb753915fe333fb0c11ea/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f", size = 156083, upload_time = "2025-08-09T07:55:57.582Z" },
|
| 48 |
+
{ url = "https://files.pythonhosted.org/packages/87/df/b7737ff046c974b183ea9aa111b74185ac8c3a326c6262d413bd5a1b8c69/charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07", size = 150295, upload_time = "2025-08-09T07:55:59.147Z" },
|
| 49 |
+
{ url = "https://files.pythonhosted.org/packages/61/f1/190d9977e0084d3f1dc169acd060d479bbbc71b90bf3e7bf7b9927dec3eb/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30", size = 148379, upload_time = "2025-08-09T07:56:00.364Z" },
|
| 50 |
+
{ url = "https://files.pythonhosted.org/packages/4c/92/27dbe365d34c68cfe0ca76f1edd70e8705d82b378cb54ebbaeabc2e3029d/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14", size = 160018, upload_time = "2025-08-09T07:56:01.678Z" },
|
| 51 |
+
{ url = "https://files.pythonhosted.org/packages/99/04/baae2a1ea1893a01635d475b9261c889a18fd48393634b6270827869fa34/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c", size = 157430, upload_time = "2025-08-09T07:56:02.87Z" },
|
| 52 |
+
{ url = "https://files.pythonhosted.org/packages/2f/36/77da9c6a328c54d17b960c89eccacfab8271fdaaa228305330915b88afa9/charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae", size = 151600, upload_time = "2025-08-09T07:56:04.089Z" },
|
| 53 |
+
{ url = "https://files.pythonhosted.org/packages/64/d4/9eb4ff2c167edbbf08cdd28e19078bf195762e9bd63371689cab5ecd3d0d/charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849", size = 99616, upload_time = "2025-08-09T07:56:05.658Z" },
|
| 54 |
+
{ url = "https://files.pythonhosted.org/packages/f4/9c/996a4a028222e7761a96634d1820de8a744ff4327a00ada9c8942033089b/charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c", size = 107108, upload_time = "2025-08-09T07:56:07.176Z" },
|
| 55 |
+
{ url = "https://files.pythonhosted.org/packages/e9/5e/14c94999e418d9b87682734589404a25854d5f5d0408df68bc15b6ff54bb/charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1", size = 205655, upload_time = "2025-08-09T07:56:08.475Z" },
|
| 56 |
+
{ url = "https://files.pythonhosted.org/packages/7d/a8/c6ec5d389672521f644505a257f50544c074cf5fc292d5390331cd6fc9c3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884", size = 146223, upload_time = "2025-08-09T07:56:09.708Z" },
|
| 57 |
+
{ url = "https://files.pythonhosted.org/packages/fc/eb/a2ffb08547f4e1e5415fb69eb7db25932c52a52bed371429648db4d84fb1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018", size = 159366, upload_time = "2025-08-09T07:56:11.326Z" },
|
| 58 |
+
{ url = "https://files.pythonhosted.org/packages/82/10/0fd19f20c624b278dddaf83b8464dcddc2456cb4b02bb902a6da126b87a1/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392", size = 157104, upload_time = "2025-08-09T07:56:13.014Z" },
|
| 59 |
+
{ url = "https://files.pythonhosted.org/packages/16/ab/0233c3231af734f5dfcf0844aa9582d5a1466c985bbed6cedab85af9bfe3/charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f", size = 151830, upload_time = "2025-08-09T07:56:14.428Z" },
|
| 60 |
+
{ url = "https://files.pythonhosted.org/packages/ae/02/e29e22b4e02839a0e4a06557b1999d0a47db3567e82989b5bb21f3fbbd9f/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154", size = 148854, upload_time = "2025-08-09T07:56:16.051Z" },
|
| 61 |
+
{ url = "https://files.pythonhosted.org/packages/05/6b/e2539a0a4be302b481e8cafb5af8792da8093b486885a1ae4d15d452bcec/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491", size = 160670, upload_time = "2025-08-09T07:56:17.314Z" },
|
| 62 |
+
{ url = "https://files.pythonhosted.org/packages/31/e7/883ee5676a2ef217a40ce0bffcc3d0dfbf9e64cbcfbdf822c52981c3304b/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93", size = 158501, upload_time = "2025-08-09T07:56:18.641Z" },
|
| 63 |
+
{ url = "https://files.pythonhosted.org/packages/c1/35/6525b21aa0db614cf8b5792d232021dca3df7f90a1944db934efa5d20bb1/charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f", size = 153173, upload_time = "2025-08-09T07:56:20.289Z" },
|
| 64 |
+
{ url = "https://files.pythonhosted.org/packages/50/ee/f4704bad8201de513fdc8aac1cabc87e38c5818c93857140e06e772b5892/charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37", size = 99822, upload_time = "2025-08-09T07:56:21.551Z" },
|
| 65 |
+
{ url = "https://files.pythonhosted.org/packages/39/f5/3b3836ca6064d0992c58c7561c6b6eee1b3892e9665d650c803bd5614522/charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc", size = 107543, upload_time = "2025-08-09T07:56:23.115Z" },
|
| 66 |
+
{ url = "https://files.pythonhosted.org/packages/65/ca/2135ac97709b400c7654b4b764daf5c5567c2da45a30cdd20f9eefe2d658/charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe", size = 205326, upload_time = "2025-08-09T07:56:24.721Z" },
|
| 67 |
+
{ url = "https://files.pythonhosted.org/packages/71/11/98a04c3c97dd34e49c7d247083af03645ca3730809a5509443f3c37f7c99/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8", size = 146008, upload_time = "2025-08-09T07:56:26.004Z" },
|
| 68 |
+
{ url = "https://files.pythonhosted.org/packages/60/f5/4659a4cb3c4ec146bec80c32d8bb16033752574c20b1252ee842a95d1a1e/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9", size = 159196, upload_time = "2025-08-09T07:56:27.25Z" },
|
| 69 |
+
{ url = "https://files.pythonhosted.org/packages/86/9e/f552f7a00611f168b9a5865a1414179b2c6de8235a4fa40189f6f79a1753/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31", size = 156819, upload_time = "2025-08-09T07:56:28.515Z" },
|
| 70 |
+
{ url = "https://files.pythonhosted.org/packages/7e/95/42aa2156235cbc8fa61208aded06ef46111c4d3f0de233107b3f38631803/charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f", size = 151350, upload_time = "2025-08-09T07:56:29.716Z" },
|
| 71 |
+
{ url = "https://files.pythonhosted.org/packages/c2/a9/3865b02c56f300a6f94fc631ef54f0a8a29da74fb45a773dfd3dcd380af7/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927", size = 148644, upload_time = "2025-08-09T07:56:30.984Z" },
|
| 72 |
+
{ url = "https://files.pythonhosted.org/packages/77/d9/cbcf1a2a5c7d7856f11e7ac2d782aec12bdfea60d104e60e0aa1c97849dc/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9", size = 160468, upload_time = "2025-08-09T07:56:32.252Z" },
|
| 73 |
+
{ url = "https://files.pythonhosted.org/packages/f6/42/6f45efee8697b89fda4d50580f292b8f7f9306cb2971d4b53f8914e4d890/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5", size = 158187, upload_time = "2025-08-09T07:56:33.481Z" },
|
| 74 |
+
{ url = "https://files.pythonhosted.org/packages/70/99/f1c3bdcfaa9c45b3ce96f70b14f070411366fa19549c1d4832c935d8e2c3/charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc", size = 152699, upload_time = "2025-08-09T07:56:34.739Z" },
|
| 75 |
+
{ url = "https://files.pythonhosted.org/packages/a3/ad/b0081f2f99a4b194bcbb1934ef3b12aa4d9702ced80a37026b7607c72e58/charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce", size = 99580, upload_time = "2025-08-09T07:56:35.981Z" },
|
| 76 |
+
{ url = "https://files.pythonhosted.org/packages/9a/8f/ae790790c7b64f925e5c953b924aaa42a243fb778fed9e41f147b2a5715a/charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef", size = 107366, upload_time = "2025-08-09T07:56:37.339Z" },
|
| 77 |
+
{ url = "https://files.pythonhosted.org/packages/8e/91/b5a06ad970ddc7a0e513112d40113e834638f4ca1120eb727a249fb2715e/charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15", size = 204342, upload_time = "2025-08-09T07:56:38.687Z" },
|
| 78 |
+
{ url = "https://files.pythonhosted.org/packages/ce/ec/1edc30a377f0a02689342f214455c3f6c2fbedd896a1d2f856c002fc3062/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db", size = 145995, upload_time = "2025-08-09T07:56:40.048Z" },
|
| 79 |
+
{ url = "https://files.pythonhosted.org/packages/17/e5/5e67ab85e6d22b04641acb5399c8684f4d37caf7558a53859f0283a650e9/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d", size = 158640, upload_time = "2025-08-09T07:56:41.311Z" },
|
| 80 |
+
{ url = "https://files.pythonhosted.org/packages/f1/e5/38421987f6c697ee3722981289d554957c4be652f963d71c5e46a262e135/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096", size = 156636, upload_time = "2025-08-09T07:56:43.195Z" },
|
| 81 |
+
{ url = "https://files.pythonhosted.org/packages/a0/e4/5a075de8daa3ec0745a9a3b54467e0c2967daaaf2cec04c845f73493e9a1/charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa", size = 150939, upload_time = "2025-08-09T07:56:44.819Z" },
|
| 82 |
+
{ url = "https://files.pythonhosted.org/packages/02/f7/3611b32318b30974131db62b4043f335861d4d9b49adc6d57c1149cc49d4/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049", size = 148580, upload_time = "2025-08-09T07:56:46.684Z" },
|
| 83 |
+
{ url = "https://files.pythonhosted.org/packages/7e/61/19b36f4bd67f2793ab6a99b979b4e4f3d8fc754cbdffb805335df4337126/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0", size = 159870, upload_time = "2025-08-09T07:56:47.941Z" },
|
| 84 |
+
{ url = "https://files.pythonhosted.org/packages/06/57/84722eefdd338c04cf3030ada66889298eaedf3e7a30a624201e0cbe424a/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92", size = 157797, upload_time = "2025-08-09T07:56:49.756Z" },
|
| 85 |
+
{ url = "https://files.pythonhosted.org/packages/72/2a/aff5dd112b2f14bcc3462c312dce5445806bfc8ab3a7328555da95330e4b/charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16", size = 152224, upload_time = "2025-08-09T07:56:51.369Z" },
|
| 86 |
+
{ url = "https://files.pythonhosted.org/packages/b7/8c/9839225320046ed279c6e839d51f028342eb77c91c89b8ef2549f951f3ec/charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce", size = 100086, upload_time = "2025-08-09T07:56:52.722Z" },
|
| 87 |
+
{ url = "https://files.pythonhosted.org/packages/ee/7a/36fbcf646e41f710ce0a563c1c9a343c6edf9be80786edeb15b6f62e17db/charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c", size = 107400, upload_time = "2025-08-09T07:56:55.172Z" },
|
| 88 |
+
{ url = "https://files.pythonhosted.org/packages/c2/ca/9a0983dd5c8e9733565cf3db4df2b0a2e9a82659fd8aa2a868ac6e4a991f/charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05", size = 207520, upload_time = "2025-08-09T07:57:11.026Z" },
|
| 89 |
+
{ url = "https://files.pythonhosted.org/packages/39/c6/99271dc37243a4f925b09090493fb96c9333d7992c6187f5cfe5312008d2/charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e", size = 147307, upload_time = "2025-08-09T07:57:12.4Z" },
|
| 90 |
+
{ url = "https://files.pythonhosted.org/packages/e4/69/132eab043356bba06eb333cc2cc60c6340857d0a2e4ca6dc2b51312886b3/charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99", size = 160448, upload_time = "2025-08-09T07:57:13.712Z" },
|
| 91 |
+
{ url = "https://files.pythonhosted.org/packages/04/9a/914d294daa4809c57667b77470533e65def9c0be1ef8b4c1183a99170e9d/charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7", size = 157758, upload_time = "2025-08-09T07:57:14.979Z" },
|
| 92 |
+
{ url = "https://files.pythonhosted.org/packages/b0/a8/6f5bcf1bcf63cb45625f7c5cadca026121ff8a6c8a3256d8d8cd59302663/charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7", size = 152487, upload_time = "2025-08-09T07:57:16.332Z" },
|
| 93 |
+
{ url = "https://files.pythonhosted.org/packages/c4/72/d3d0e9592f4e504f9dea08b8db270821c909558c353dc3b457ed2509f2fb/charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19", size = 150054, upload_time = "2025-08-09T07:57:17.576Z" },
|
| 94 |
+
{ url = "https://files.pythonhosted.org/packages/20/30/5f64fe3981677fe63fa987b80e6c01042eb5ff653ff7cec1b7bd9268e54e/charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312", size = 161703, upload_time = "2025-08-09T07:57:20.012Z" },
|
| 95 |
+
{ url = "https://files.pythonhosted.org/packages/e1/ef/dd08b2cac9284fd59e70f7d97382c33a3d0a926e45b15fc21b3308324ffd/charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc", size = 159096, upload_time = "2025-08-09T07:57:21.329Z" },
|
| 96 |
+
{ url = "https://files.pythonhosted.org/packages/45/8c/dcef87cfc2b3f002a6478f38906f9040302c68aebe21468090e39cde1445/charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34", size = 153852, upload_time = "2025-08-09T07:57:22.608Z" },
|
| 97 |
+
{ url = "https://files.pythonhosted.org/packages/63/86/9cbd533bd37883d467fcd1bd491b3547a3532d0fbb46de2b99feeebf185e/charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432", size = 99840, upload_time = "2025-08-09T07:57:23.883Z" },
|
| 98 |
+
{ url = "https://files.pythonhosted.org/packages/ce/d6/7e805c8e5c46ff9729c49950acc4ee0aeb55efb8b3a56687658ad10c3216/charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca", size = 107438, upload_time = "2025-08-09T07:57:25.287Z" },
|
| 99 |
+
{ url = "https://files.pythonhosted.org/packages/8a/1f/f041989e93b001bc4e44bb1669ccdcf54d3f00e628229a85b08d330615c5/charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a", size = 53175, upload_time = "2025-08-09T07:57:26.864Z" },
|
| 100 |
+
]
|
| 101 |
+
|
| 102 |
+
[[package]]
|
| 103 |
+
name = "colorama"
|
| 104 |
+
version = "0.4.6"
|
| 105 |
+
source = { registry = "https://pypi.org/simple" }
|
| 106 |
+
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload_time = "2022-10-25T02:36:22.414Z" }
|
| 107 |
+
wheels = [
|
| 108 |
+
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload_time = "2022-10-25T02:36:20.889Z" },
|
| 109 |
+
]
|
| 110 |
+
|
| 111 |
+
[[package]]
|
| 112 |
+
name = "filelock"
|
| 113 |
+
version = "3.19.1"
|
| 114 |
+
source = { registry = "https://pypi.org/simple" }
|
| 115 |
+
sdist = { url = "https://files.pythonhosted.org/packages/40/bb/0ab3e58d22305b6f5440629d20683af28959bf793d98d11950e305c1c326/filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58", size = 17687, upload_time = "2025-08-14T16:56:03.016Z" }
|
| 116 |
+
wheels = [
|
| 117 |
+
{ url = "https://files.pythonhosted.org/packages/42/14/42b2651a2f46b022ccd948bca9f2d5af0fd8929c4eec235b8d6d844fbe67/filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d", size = 15988, upload_time = "2025-08-14T16:56:01.633Z" },
|
| 118 |
+
]
|
| 119 |
+
|
| 120 |
+
[[package]]
|
| 121 |
+
name = "gdown"
|
| 122 |
+
version = "5.2.0"
|
| 123 |
+
source = { registry = "https://pypi.org/simple" }
|
| 124 |
+
dependencies = [
|
| 125 |
+
{ name = "beautifulsoup4" },
|
| 126 |
+
{ name = "filelock" },
|
| 127 |
+
{ name = "requests", extra = ["socks"] },
|
| 128 |
+
{ name = "tqdm" },
|
| 129 |
+
]
|
| 130 |
+
sdist = { url = "https://files.pythonhosted.org/packages/09/6a/37e6b70c5bda3161e40265861e63b64a86bfc6ca6a8f1c35328a675c84fd/gdown-5.2.0.tar.gz", hash = "sha256:2145165062d85520a3cd98b356c9ed522c5e7984d408535409fd46f94defc787", size = 284647, upload_time = "2024-05-12T06:45:12.725Z" }
|
| 131 |
+
wheels = [
|
| 132 |
+
{ url = "https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl", hash = "sha256:33083832d82b1101bdd0e9df3edd0fbc0e1c5f14c9d8c38d2a35bf1683b526d6", size = 18235, upload_time = "2024-05-12T06:45:10.017Z" },
|
| 133 |
+
]
|
| 134 |
+
|
| 135 |
+
[[package]]
|
| 136 |
+
name = "idna"
|
| 137 |
+
version = "3.10"
|
| 138 |
+
source = { registry = "https://pypi.org/simple" }
|
| 139 |
+
sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload_time = "2024-09-15T18:07:39.745Z" }
|
| 140 |
+
wheels = [
|
| 141 |
+
{ url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload_time = "2024-09-15T18:07:37.964Z" },
|
| 142 |
+
]
|
| 143 |
+
|
| 144 |
+
[[package]]
|
| 145 |
+
name = "pysocks"
|
| 146 |
+
version = "1.7.1"
|
| 147 |
+
source = { registry = "https://pypi.org/simple" }
|
| 148 |
+
sdist = { url = "https://files.pythonhosted.org/packages/bd/11/293dd436aea955d45fc4e8a35b6ae7270f5b8e00b53cf6c024c83b657a11/PySocks-1.7.1.tar.gz", hash = "sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0", size = 284429, upload_time = "2019-09-20T02:07:35.714Z" }
|
| 149 |
+
wheels = [
|
| 150 |
+
{ url = "https://files.pythonhosted.org/packages/8d/59/b4572118e098ac8e46e399a1dd0f2d85403ce8bbaad9ec79373ed6badaf9/PySocks-1.7.1-py3-none-any.whl", hash = "sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5", size = 16725, upload_time = "2019-09-20T02:06:22.938Z" },
|
| 151 |
+
]
|
| 152 |
+
|
| 153 |
+
[[package]]
|
| 154 |
+
name = "python-dotenv"
|
| 155 |
+
version = "1.1.1"
|
| 156 |
+
source = { registry = "https://pypi.org/simple" }
|
| 157 |
+
sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload_time = "2025-06-24T04:21:07.341Z" }
|
| 158 |
+
wheels = [
|
| 159 |
+
{ url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload_time = "2025-06-24T04:21:06.073Z" },
|
| 160 |
+
]
|
| 161 |
+
|
| 162 |
+
[[package]]
|
| 163 |
+
name = "requests"
|
| 164 |
+
version = "2.32.5"
|
| 165 |
+
source = { registry = "https://pypi.org/simple" }
|
| 166 |
+
dependencies = [
|
| 167 |
+
{ name = "certifi" },
|
| 168 |
+
{ name = "charset-normalizer" },
|
| 169 |
+
{ name = "idna" },
|
| 170 |
+
{ name = "urllib3" },
|
| 171 |
+
]
|
| 172 |
+
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload_time = "2025-08-18T20:46:02.573Z" }
|
| 173 |
+
wheels = [
|
| 174 |
+
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload_time = "2025-08-18T20:46:00.542Z" },
|
| 175 |
+
]
|
| 176 |
+
|
| 177 |
+
[package.optional-dependencies]
|
| 178 |
+
socks = [
|
| 179 |
+
{ name = "pysocks" },
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
[[package]]
|
| 183 |
name = "sentiment-fused"
|
| 184 |
version = "0.1.0"
|
| 185 |
source = { virtual = "." }
|
| 186 |
+
dependencies = [
|
| 187 |
+
{ name = "gdown" },
|
| 188 |
+
{ name = "python-dotenv" },
|
| 189 |
+
]
|
| 190 |
+
|
| 191 |
+
[package.metadata]
|
| 192 |
+
requires-dist = [
|
| 193 |
+
{ name = "gdown", specifier = ">=5.2.0" },
|
| 194 |
+
{ name = "python-dotenv", specifier = ">=1.1.1" },
|
| 195 |
+
]
|
| 196 |
+
|
| 197 |
+
[[package]]
|
| 198 |
+
name = "soupsieve"
|
| 199 |
+
version = "2.7"
|
| 200 |
+
source = { registry = "https://pypi.org/simple" }
|
| 201 |
+
sdist = { url = "https://files.pythonhosted.org/packages/3f/f4/4a80cd6ef364b2e8b65b15816a843c0980f7a5a2b4dc701fc574952aa19f/soupsieve-2.7.tar.gz", hash = "sha256:ad282f9b6926286d2ead4750552c8a6142bc4c783fd66b0293547c8fe6ae126a", size = 103418, upload_time = "2025-04-20T18:50:08.518Z" }
|
| 202 |
+
wheels = [
|
| 203 |
+
{ url = "https://files.pythonhosted.org/packages/e7/9c/0e6afc12c269578be5c0c1c9f4b49a8d32770a080260c333ac04cc1c832d/soupsieve-2.7-py3-none-any.whl", hash = "sha256:6e60cc5c1ffaf1cebcc12e8188320b72071e922c2e897f737cadce79ad5d30c4", size = 36677, upload_time = "2025-04-20T18:50:07.196Z" },
|
| 204 |
+
]
|
| 205 |
+
|
| 206 |
+
[[package]]
|
| 207 |
+
name = "tqdm"
|
| 208 |
+
version = "4.67.1"
|
| 209 |
+
source = { registry = "https://pypi.org/simple" }
|
| 210 |
+
dependencies = [
|
| 211 |
+
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
| 212 |
+
]
|
| 213 |
+
sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload_time = "2024-11-24T20:12:22.481Z" }
|
| 214 |
+
wheels = [
|
| 215 |
+
{ url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload_time = "2024-11-24T20:12:19.698Z" },
|
| 216 |
+
]
|
| 217 |
+
|
| 218 |
+
[[package]]
|
| 219 |
+
name = "typing-extensions"
|
| 220 |
+
version = "4.14.1"
|
| 221 |
+
source = { registry = "https://pypi.org/simple" }
|
| 222 |
+
sdist = { url = "https://files.pythonhosted.org/packages/98/5a/da40306b885cc8c09109dc2e1abd358d5684b1425678151cdaed4731c822/typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36", size = 107673, upload_time = "2025-07-04T13:28:34.16Z" }
|
| 223 |
+
wheels = [
|
| 224 |
+
{ url = "https://files.pythonhosted.org/packages/b5/00/d631e67a838026495268c2f6884f3711a15a9a2a96cd244fdaea53b823fb/typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76", size = 43906, upload_time = "2025-07-04T13:28:32.743Z" },
|
| 225 |
+
]
|
| 226 |
+
|
| 227 |
+
[[package]]
|
| 228 |
+
name = "urllib3"
|
| 229 |
+
version = "2.5.0"
|
| 230 |
+
source = { registry = "https://pypi.org/simple" }
|
| 231 |
+
sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload_time = "2025-06-18T14:07:41.644Z" }
|
| 232 |
+
wheels = [
|
| 233 |
+
{ url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload_time = "2025-06-18T14:07:40.39Z" },
|
| 234 |
+
]
|