Upload folder using huggingface_hub
Browse files- README.md +41 -0
- concept_10_medical_imaging.py +32 -0
- concept_1_multimodal.py +33 -0
- concept_2_music_generation.py +20 -0
- concept_3_sentiment_analysis.py +15 -0
- concept_4_chatbot.py +24 -0
- concept_5_reasoning.py +16 -0
- concept_6_interpretability.py +33 -0
- concept_7_emotional_tts.py +36 -0
- concept_8_adaptive_soundtrack.py +36 -0
- concept_9_agentic_ai.py +39 -0
- main.py +30 -0
- requirements.txt +13 -0
README.md
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
---
|
| 3 |
+
license: mit
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Innovative AI Model: 10 Concepts
|
| 7 |
+
|
| 8 |
+
This repository contains an innovative AI model that merges 10 different concepts to showcase the power and versatility of modern AI. Each concept is implemented as a separate Python script, and a unified interface is provided to run them.
|
| 9 |
+
|
| 10 |
+
## Concepts
|
| 11 |
+
|
| 12 |
+
1. **Multimodal Classification:** Classifies an image based on a text description.
|
| 13 |
+
2. **Music Generation:** Generates a simple music sequence.
|
| 14 |
+
3. **Sentiment Analysis:** Analyzes the sentiment of a given text.
|
| 15 |
+
4. **Chatbot:** A simple conversational chatbot.
|
| 16 |
+
5. **Reasoning:** Answers a question based on a given context.
|
| 17 |
+
6. **Model Interpretability:** Explains the prediction of a text classification model using LIME.
|
| 18 |
+
7. **Emotional Text-to-Speech:** Generates speech from text with a simulated emotion.
|
| 19 |
+
8. **Adaptive Soundtrack:** Generates a music sequence with a mood based on the sentiment of the input text.
|
| 20 |
+
9. **Agentic AI:** An agent that analyzes the sentiment of a text and reads it aloud with the corresponding emotion.
|
| 21 |
+
10. **Medical Imaging Analysis:** Classifies a medical image.
|
| 22 |
+
|
| 23 |
+
## How to Use
|
| 24 |
+
|
| 25 |
+
To use the model, first install the required dependencies:
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
pip install -r requirements.txt
|
| 29 |
+
```
|
| 30 |
+
|
| 31 |
+
Then, run the `main.py` script with the concept number you want to try:
|
| 32 |
+
|
| 33 |
+
```bash
|
| 34 |
+
python main.py <concept_number>
|
| 35 |
+
```
|
| 36 |
+
|
| 37 |
+
For example, to run the sentiment analysis concept, use the following command:
|
| 38 |
+
|
| 39 |
+
```bash
|
| 40 |
+
python main.py 3
|
| 41 |
+
```
|
concept_10_medical_imaging.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import ViTImageProcessor, ViTForImageClassification
|
| 3 |
+
from PIL import Image
|
| 4 |
+
import requests
|
| 5 |
+
from datasets import load_dataset
|
| 6 |
+
|
| 7 |
+
def classify_medical_image(image_path):
|
| 8 |
+
"""
|
| 9 |
+
Classifies a medical image using a pretrained model.
|
| 10 |
+
"""
|
| 11 |
+
# Load a pretrained model
|
| 12 |
+
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')
|
| 13 |
+
model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224')
|
| 14 |
+
|
| 15 |
+
# Load an image
|
| 16 |
+
image = Image.open(image_path).convert("RGB")
|
| 17 |
+
|
| 18 |
+
# Preprocess the image and predict the class
|
| 19 |
+
inputs = processor(images=image, return_tensors="pt")
|
| 20 |
+
outputs = model(**inputs)
|
| 21 |
+
logits = outputs.logits
|
| 22 |
+
predicted_class_idx = logits.argmax(-1).item()
|
| 23 |
+
return model.config.id2label[predicted_class_idx]
|
| 24 |
+
|
| 25 |
+
if __name__ == '__main__':
|
| 26 |
+
# Load a sample image from the chest x-ray dataset
|
| 27 |
+
dataset = load_dataset("keremam/chest-xray-classification-augmented", split="train")
|
| 28 |
+
image_to_classify = dataset[0]["image"]
|
| 29 |
+
image_to_classify.save("medical_image.png")
|
| 30 |
+
|
| 31 |
+
predicted_class = classify_medical_image("medical_image.png")
|
| 32 |
+
print("Predicted class:", predicted_class)
|
concept_1_multimodal.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import torch
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from transformers import CLIPProcessor, CLIPModel
|
| 5 |
+
|
| 6 |
+
def classify_image(image_path, text_labels):
|
| 7 |
+
"""
|
| 8 |
+
Classifies an image based on a list of text labels using a CLIP model.
|
| 9 |
+
"""
|
| 10 |
+
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
|
| 11 |
+
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
| 12 |
+
|
| 13 |
+
image = Image.open(image_path)
|
| 14 |
+
inputs = processor(text=text_labels, images=image, return_tensors="pt", padding=True)
|
| 15 |
+
|
| 16 |
+
with torch.no_grad():
|
| 17 |
+
outputs = model(**inputs)
|
| 18 |
+
|
| 19 |
+
logits_per_image = outputs.logits_per_image
|
| 20 |
+
probs = logits_per_image.softmax(dim=1)
|
| 21 |
+
|
| 22 |
+
return dict(zip(text_labels, probs.tolist()[0]))
|
| 23 |
+
|
| 24 |
+
if __name__ == '__main__':
|
| 25 |
+
# Create a dummy image for testing
|
| 26 |
+
dummy_image = Image.new('RGB', (100, 100), color = 'red')
|
| 27 |
+
dummy_image.save("dummy_image.png")
|
| 28 |
+
|
| 29 |
+
labels = ["a red square", "a blue circle", "a green triangle"]
|
| 30 |
+
probabilities = classify_image("dummy_image.png", labels)
|
| 31 |
+
print("Probabilities:", probabilities)
|
| 32 |
+
print("Predicted label:", max(probabilities, key=probabilities.get))
|
| 33 |
+
|
concept_2_music_generation.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import numpy as np
|
| 3 |
+
from pydub import AudioSegment
|
| 4 |
+
from pydub.generators import Sine
|
| 5 |
+
|
| 6 |
+
def generate_music_sequence(note_sequence, output_path="music_sequence.wav"):
|
| 7 |
+
"""
|
| 8 |
+
Generates a simple music sequence from a list of notes and saves it as a WAV file.
|
| 9 |
+
"""
|
| 10 |
+
duration = 500 # milliseconds for each note
|
| 11 |
+
song = AudioSegment.empty()
|
| 12 |
+
for note in note_sequence:
|
| 13 |
+
song += Sine(note).to_audio_segment(duration=duration)
|
| 14 |
+
song.export(output_path, format="wav")
|
| 15 |
+
|
| 16 |
+
if __name__ == '__main__':
|
| 17 |
+
# A simple melody sequence (frequency in Hz)
|
| 18 |
+
melody = [261.63, 293.66, 329.63, 349.23, 392.00, 440.00, 493.88, 523.25]
|
| 19 |
+
generate_music_sequence(melody)
|
| 20 |
+
print("Music sequence generated and saved to music_sequence.wav")
|
concept_3_sentiment_analysis.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
|
| 4 |
+
def analyze_sentiment(text):
|
| 5 |
+
"""
|
| 6 |
+
Analyzes the sentiment of a given text using a pretrained model.
|
| 7 |
+
"""
|
| 8 |
+
sentiment_pipeline = pipeline("sentiment-analysis")
|
| 9 |
+
result = sentiment_pipeline(text)
|
| 10 |
+
return result
|
| 11 |
+
|
| 12 |
+
if __name__ == '__main__':
|
| 13 |
+
text_to_analyze = "I love the new AI model! It's so innovative and powerful."
|
| 14 |
+
sentiment = analyze_sentiment(text_to_analyze)
|
| 15 |
+
print("Sentiment analysis result:", sentiment)
|
concept_4_chatbot.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import pipeline, Conversation
|
| 3 |
+
|
| 4 |
+
def chat_with_bot(message, conversation_history=None):
|
| 5 |
+
"""
|
| 6 |
+
Has a conversation with a chatbot using a pretrained model.
|
| 7 |
+
"""
|
| 8 |
+
chatbot_pipeline = pipeline("conversational")
|
| 9 |
+
if conversation_history:
|
| 10 |
+
conversation = Conversation(text=message, past_user_inputs=conversation_history['past_user_inputs'], generated_responses=conversation_history['generated_responses'])
|
| 11 |
+
else:
|
| 12 |
+
conversation = Conversation(text=message)
|
| 13 |
+
|
| 14 |
+
result = chatbot_pipeline(conversation)
|
| 15 |
+
return result.generated_responses[-1], {'past_user_inputs': result.past_user_inputs, 'generated_responses': result.generated_responses}
|
| 16 |
+
|
| 17 |
+
if __name__ == '__main__':
|
| 18 |
+
user_message = "Hello, how are you?"
|
| 19 |
+
response, history = chat_with_bot(user_message)
|
| 20 |
+
print("Bot:", response)
|
| 21 |
+
|
| 22 |
+
user_message = "What is the weather like today?"
|
| 23 |
+
response, history = chat_with_bot(user_message, conversation_history=history)
|
| 24 |
+
print("Bot:", response)
|
concept_5_reasoning.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
|
| 4 |
+
def perform_reasoning(context, question):
|
| 5 |
+
"""
|
| 6 |
+
Performs a simple reasoning task by answering a question based on a given context.
|
| 7 |
+
"""
|
| 8 |
+
qa_pipeline = pipeline("question-answering")
|
| 9 |
+
result = qa_pipeline(question=question, context=context)
|
| 10 |
+
return result
|
| 11 |
+
|
| 12 |
+
if __name__ == '__main__':
|
| 13 |
+
context = "The new AI model is capable of performing a wide range of tasks, including multimodal classification, music generation, and sentiment analysis. It is designed to be a versatile and powerful tool for developers."
|
| 14 |
+
question = "What tasks can the new AI model perform?"
|
| 15 |
+
answer = perform_reasoning(context, question)
|
| 16 |
+
print("Reasoning task result:", answer)
|
concept_6_interpretability.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import lime
|
| 3 |
+
import lime.lime_text
|
| 4 |
+
from sklearn.pipeline import make_pipeline
|
| 5 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
| 6 |
+
from sklearn.linear_model import LogisticRegression
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
def explain_prediction(text):
|
| 10 |
+
"""
|
| 11 |
+
Explains the prediction of a text classification model using LIME.
|
| 12 |
+
"""
|
| 13 |
+
# Create a dummy dataset
|
| 14 |
+
data = ["I love this product", "This is a terrible movie", "The book was okay", "I hate this"]
|
| 15 |
+
labels = [1, 0, 0, 0]
|
| 16 |
+
|
| 17 |
+
# Create a simple text classification pipeline
|
| 18 |
+
vectorizer = TfidfVectorizer()
|
| 19 |
+
classifier = LogisticRegression()
|
| 20 |
+
pipeline = make_pipeline(vectorizer, classifier)
|
| 21 |
+
pipeline.fit(data, labels)
|
| 22 |
+
|
| 23 |
+
# Create a LIME explainer
|
| 24 |
+
explainer = lime.lime_text.LimeTextExplainer(class_names=['negative', 'positive'])
|
| 25 |
+
|
| 26 |
+
# Explain the prediction for the given text
|
| 27 |
+
explanation = explainer.explain_instance(text, pipeline.predict_proba, num_features=6)
|
| 28 |
+
return explanation.as_list()
|
| 29 |
+
|
| 30 |
+
if __name__ == '__main__':
|
| 31 |
+
text_to_explain = "This is a fantastic and innovative AI model."
|
| 32 |
+
explanation = explain_prediction(text_to_explain)
|
| 33 |
+
print("LIME explanation:", explanation)
|
concept_7_emotional_tts.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from gtts import gTTS
|
| 3 |
+
from pydub import AudioSegment
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
def emotional_tts(text, emotion="neutral", output_path="emotional_speech.mp3"):
|
| 7 |
+
"""
|
| 8 |
+
Generates speech from text with a simulated emotion.
|
| 9 |
+
"""
|
| 10 |
+
tts = gTTS(text, lang='en', slow=False)
|
| 11 |
+
tts.save("temp.mp3")
|
| 12 |
+
|
| 13 |
+
# Load the audio and manipulate it to simulate emotion
|
| 14 |
+
audio = AudioSegment.from_mp3("temp.mp3")
|
| 15 |
+
if emotion == "happy":
|
| 16 |
+
# Faster speed, higher pitch
|
| 17 |
+
audio = audio.speedup(playback_speed=1.2)
|
| 18 |
+
audio = audio._spawn(audio.raw_data, overrides={
|
| 19 |
+
"frame_rate": int(audio.frame_rate * 1.1)
|
| 20 |
+
}).set_frame_rate(audio.frame_rate)
|
| 21 |
+
elif emotion == "sad":
|
| 22 |
+
# Slower speed, lower pitch
|
| 23 |
+
audio = audio.speedup(playback_speed=0.9)
|
| 24 |
+
audio = audio._spawn(audio.raw_data, overrides={
|
| 25 |
+
"frame_rate": int(audio.frame_rate * 0.9)
|
| 26 |
+
}).set_frame_rate(audio.frame_rate)
|
| 27 |
+
|
| 28 |
+
audio.export(output_path, format="mp3")
|
| 29 |
+
os.remove("temp.mp3")
|
| 30 |
+
|
| 31 |
+
if __name__ == '__main__':
|
| 32 |
+
text = "This is a demonstration of emotional text-to-speech."
|
| 33 |
+
emotional_tts(text, emotion="happy")
|
| 34 |
+
print("Happy speech saved to emotional_speech.mp3")
|
| 35 |
+
emotional_tts(text, emotion="sad", output_path="sad_speech.mp3")
|
| 36 |
+
print("Sad speech saved to sad_speech.mp3")
|
concept_8_adaptive_soundtrack.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
import numpy as np
|
| 4 |
+
from pydub import AudioSegment
|
| 5 |
+
from pydub.generators import Sine
|
| 6 |
+
|
| 7 |
+
def generate_adaptive_soundtrack(text, output_path="adaptive_soundtrack.wav"):
|
| 8 |
+
"""
|
| 9 |
+
Generates a music sequence with a mood based on the sentiment of the input text.
|
| 10 |
+
"""
|
| 11 |
+
# Analyze the sentiment of the text
|
| 12 |
+
sentiment_pipeline = pipeline("sentiment-analysis")
|
| 13 |
+
sentiment = sentiment_pipeline(text)[0]['label']
|
| 14 |
+
|
| 15 |
+
# Generate a music sequence based on the sentiment
|
| 16 |
+
duration = 500 # milliseconds for each note
|
| 17 |
+
song = AudioSegment.empty()
|
| 18 |
+
if sentiment == "POSITIVE":
|
| 19 |
+
# Happy melody (major scale)
|
| 20 |
+
melody = [261.63, 293.66, 329.63, 349.23, 392.00, 440.00, 493.88, 523.25]
|
| 21 |
+
else:
|
| 22 |
+
# Sad melody (minor scale)
|
| 23 |
+
melody = [261.63, 293.66, 311.13, 349.23, 392.00, 415.30, 466.16, 523.25]
|
| 24 |
+
|
| 25 |
+
for note in melody:
|
| 26 |
+
song += Sine(note).to_audio_segment(duration=duration)
|
| 27 |
+
song.export(output_path, format="wav")
|
| 28 |
+
|
| 29 |
+
if __name__ == '__main__':
|
| 30 |
+
positive_text = "This is a wonderful day!"
|
| 31 |
+
generate_adaptive_soundtrack(positive_text, output_path="happy_soundtrack.wav")
|
| 32 |
+
print("Happy soundtrack generated and saved to happy_soundtrack.wav")
|
| 33 |
+
|
| 34 |
+
negative_text = "I'm feeling down today."
|
| 35 |
+
generate_adaptive_soundtrack(negative_text, output_path="sad_soundtrack.wav")
|
| 36 |
+
print("Sad soundtrack generated and saved to sad_soundtrack.wav")
|
concept_9_agentic_ai.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
from transformers import pipeline
|
| 3 |
+
from gtts import gTTS
|
| 4 |
+
from pydub import AudioSegment
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
def emotional_tts(text, emotion="neutral", output_path="emotional_speech.mp3"):
|
| 8 |
+
"""
|
| 9 |
+
Generates speech from text with a simulated emotion.
|
| 10 |
+
"""
|
| 11 |
+
tts = gTTS(text, lang='en', slow=False)
|
| 12 |
+
tts.save("temp.mp3")
|
| 13 |
+
|
| 14 |
+
audio = AudioSegment.from_mp3("temp.mp3")
|
| 15 |
+
if emotion == "POSITIVE":
|
| 16 |
+
audio = audio.speedup(playback_speed=1.2)
|
| 17 |
+
audio = audio._spawn(audio.raw_data, overrides={"frame_rate": int(audio.frame_rate * 1.1)}).set_frame_rate(audio.frame_rate)
|
| 18 |
+
elif emotion == "NEGATIVE":
|
| 19 |
+
audio = audio.speedup(playback_speed=0.9)
|
| 20 |
+
audio = audio._spawn(audio.raw_data, overrides={"frame_rate": int(audio.frame_rate * 0.9)}).set_frame_rate(audio.frame_rate)
|
| 21 |
+
|
| 22 |
+
audio.export(output_path, format="mp3")
|
| 23 |
+
os.remove("temp.mp3")
|
| 24 |
+
|
| 25 |
+
def agentic_ai(text):
|
| 26 |
+
"""
|
| 27 |
+
An agent that analyzes the sentiment of a text and reads it aloud with the corresponding emotion.
|
| 28 |
+
"""
|
| 29 |
+
# Analyze the sentiment of the text
|
| 30 |
+
sentiment_pipeline = pipeline("sentiment-analysis")
|
| 31 |
+
sentiment = sentiment_pipeline(text)[0]['label']
|
| 32 |
+
|
| 33 |
+
# Generate emotional speech
|
| 34 |
+
emotional_tts(text, emotion=sentiment, output_path="agent_speech.mp3")
|
| 35 |
+
print(f"Agent read the text with a {sentiment} emotion and saved it to agent_speech.mp3")
|
| 36 |
+
|
| 37 |
+
if __name__ == '__main__':
|
| 38 |
+
text_to_process = "I am so excited about the future of AI!"
|
| 39 |
+
agentic_ai(text_to_process)
|
main.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
def main():
|
| 6 |
+
parser = argparse.ArgumentParser(description="A unified interface for the innovative AI model.")
|
| 7 |
+
parser.add_argument("concept", type=int, help="The concept number to run (1-10).")
|
| 8 |
+
args = parser.parse_args()
|
| 9 |
+
|
| 10 |
+
concept_scripts = {
|
| 11 |
+
1: "concept_1_multimodal.py",
|
| 12 |
+
2: "concept_2_music_generation.py",
|
| 13 |
+
3: "concept_3_sentiment_analysis.py",
|
| 14 |
+
4: "concept_4_chatbot.py",
|
| 15 |
+
5: "concept_5_reasoning.py",
|
| 16 |
+
6: "concept_6_interpretability.py",
|
| 17 |
+
7: "concept_7_emotional_tts.py",
|
| 18 |
+
8: "concept_8_adaptive_soundtrack.py",
|
| 19 |
+
9: "concept_9_agentic_ai.py",
|
| 20 |
+
10: "concept_10_medical_imaging.py",
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
if args.concept in concept_scripts:
|
| 24 |
+
script_to_run = os.path.join(os.path.dirname(__file__), concept_scripts[args.concept])
|
| 25 |
+
os.system(f"python {script_to_run}")
|
| 26 |
+
else:
|
| 27 |
+
print("Invalid concept number. Please choose a number between 1 and 10.")
|
| 28 |
+
|
| 29 |
+
if __name__ == "__main__":
|
| 30 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers
|
| 2 |
+
torch
|
| 3 |
+
Pillow
|
| 4 |
+
lime
|
| 5 |
+
shap
|
| 6 |
+
pydub
|
| 7 |
+
gtts
|
| 8 |
+
librosa
|
| 9 |
+
soundfile
|
| 10 |
+
numpy
|
| 11 |
+
scikit-learn
|
| 12 |
+
datasets
|
| 13 |
+
huggingface_hub
|