HenriqueBraz commited on
Commit
713e5ac
·
verified ·
1 Parent(s): 08bf505

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +157 -38
src/streamlit_app.py CHANGED
@@ -1,40 +1,159 @@
1
- import altair as alt
2
- import numpy as np
3
- import pandas as pd
4
  import streamlit as st
 
 
 
 
 
 
 
5
 
6
- """
7
- # Welcome to Streamlit!
8
-
9
- Edit `/streamlit_app.py` to customize this app to your heart's desire :heart:.
10
- If you have any questions, checkout our [documentation](https://docs.streamlit.io) and [community
11
- forums](https://discuss.streamlit.io).
12
-
13
- In the meantime, below is an example of what you can do with just a few lines of code:
14
- """
15
-
16
- num_points = st.slider("Number of points in spiral", 1, 10000, 1100)
17
- num_turns = st.slider("Number of turns in spiral", 1, 300, 31)
18
-
19
- indices = np.linspace(0, 1, num_points)
20
- theta = 2 * np.pi * num_turns * indices
21
- radius = indices
22
-
23
- x = radius * np.cos(theta)
24
- y = radius * np.sin(theta)
25
-
26
- df = pd.DataFrame({
27
- "x": x,
28
- "y": y,
29
- "idx": indices,
30
- "rand": np.random.randn(num_points),
31
- })
32
-
33
- st.altair_chart(alt.Chart(df, height=700, width=700)
34
- .mark_point(filled=True)
35
- .encode(
36
- x=alt.X("x", axis=None),
37
- y=alt.Y("y", axis=None),
38
- color=alt.Color("idx", legend=None, scale=alt.Scale()),
39
- size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
40
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+ from diffusers import StableDiffusionPipeline
4
+ import torch
5
+ from PIL import Image
6
+ import librosa
7
+ import tempfile
8
+ import os
9
 
10
+ # Configuração da página
11
+ st.set_page_config(page_title="Demo Multi-Modal AI", page_icon="🤖", layout="wide")
12
+
13
+ # -------- Cache de modelos --------
14
+ @st.cache_resource(show_spinner=False)
15
+ def load_model(model_key):
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ cache_dir = "model_cache"
18
+ os.makedirs(cache_dir, exist_ok=True)
19
+
20
+ if model_key == 'sentiment_analysis':
21
+ return pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment-latest", device=device, cache_dir=cache_dir)
22
+ elif model_key == 'text_classification':
23
+ return pipeline("text-classification", model="distilbert-base-uncased-finetuned-sst-2-english", device=device, cache_dir=cache_dir)
24
+ elif model_key == 'summarization':
25
+ return pipeline("summarization", model="facebook/bart-large-cnn", device=device, max_length=150, min_length=30, cache_dir=cache_dir)
26
+ elif model_key == 'question_answering':
27
+ return pipeline("question-answering", model="deepset/roberta-base-squad2", device=device, cache_dir=cache_dir)
28
+ elif model_key == 'translation':
29
+ return pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-pt", device=device, cache_dir=cache_dir)
30
+ elif model_key == 'text_generation':
31
+ tokenizer = AutoTokenizer.from_pretrained("gpt2", cache_dir=cache_dir)
32
+ model = AutoModelForCausalLM.from_pretrained("gpt2", cache_dir=cache_dir)
33
+ model.config.pad_token_id = model.config.eos_token_id
34
+ return pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
35
+ elif model_key == 'ner':
36
+ return pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", device=device, aggregation_strategy="simple", cache_dir=cache_dir)
37
+ elif model_key == 'image_classification':
38
+ return pipeline("image-classification", model="google/vit-base-patch16-224", device=device, cache_dir=cache_dir)
39
+ elif model_key == 'object_detection':
40
+ return pipeline("object-detection", model="facebook/detr-resnet-50", device=device, cache_dir=cache_dir)
41
+ elif model_key == 'speech_to_text':
42
+ return pipeline("automatic-speech-recognition", model="openai/whisper-base", device=device, cache_dir=cache_dir)
43
+ elif model_key == 'audio_classification':
44
+ return pipeline("audio-classification", model="superb/hubert-base-superb-er", device=device, cache_dir=cache_dir)
45
+ elif model_key == 'text_to_image':
46
+ return StableDiffusionPipeline.from_pretrained(
47
+ "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
48
+ use_safetensors=True, safety_checker=None, cache_dir=cache_dir
49
+ )
50
+
51
+ # -------- Funções auxiliares --------
52
+ def process_audio_file(audio_file):
53
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(audio_file.name)[1]) as tmp_file:
54
+ tmp_file.write(audio_file.read())
55
+ tmp_file_path = tmp_file.name
56
+ audio_array, sr = librosa.load(tmp_file_path, sr=16000)
57
+ os.unlink(tmp_file_path)
58
+ return audio_array
59
+
60
+ def process_image_file(image_file):
61
+ image = Image.open(image_file)
62
+ if image.mode != 'RGB':
63
+ image = image.convert('RGB')
64
+ return image
65
+
66
+ def display_results(result, model_key, input_text=None):
67
+ if model_key == 'summarization':
68
+ st.subheader("📝 Resumo")
69
+ if input_text:
70
+ st.markdown("**Texto Original:**")
71
+ st.write(input_text)
72
+ st.info(result[0]['summary_text'])
73
+ elif model_key == 'translation':
74
+ st.subheader("🌍 Tradução")
75
+ st.success(result[0]['translation_text'])
76
+ elif model_key in ['sentiment_analysis', 'text_classification']:
77
+ st.subheader("📊 Resultados")
78
+ for res in result:
79
+ st.write(f"- **{res['label']}**: {res['score']:.2%}")
80
+ elif model_key == 'ner':
81
+ st.subheader("🔍 Entidades Reconhecidas")
82
+ for entity in result:
83
+ st.write(f"- **{entity['word']}**: {entity['entity_group']} ({entity['score']:.2%})")
84
+ elif model_key == 'text_generation':
85
+ st.subheader("🧠 Texto Gerado")
86
+ st.write(result[0]['generated_text'])
87
+ elif model_key == 'image_classification':
88
+ st.subheader("🏷️ Classificação de Imagem")
89
+ for res in result[:5]:
90
+ st.write(f"- **{res['label']}**: {res['score']:.2%}")
91
+ elif model_key == 'object_detection':
92
+ st.subheader("📦 Objetos Detectados")
93
+ for obj in result:
94
+ st.write(f"- {obj['label']} ({obj['score']:.2%})")
95
+ elif model_key == 'speech_to_text':
96
+ st.subheader("🔈 Transcrição de Áudio")
97
+ st.success(result['text'])
98
+ elif model_key == 'audio_classification':
99
+ st.subheader("🎧 Classificação de Áudio")
100
+ top_emotion = result[0]
101
+ st.write(f"**Emoção detectada**: {top_emotion['label']} ({top_emotion['score']:.2%})")
102
+ elif model_key == 'text_to_image':
103
+ st.subheader("🎨 Imagem Gerada")
104
+ st.image(result[0], caption="Imagem gerada a partir do texto")
105
+
106
+ # -------- Casos de uso --------
107
+ use_cases = {
108
+ 'sentiment_analysis': "A entrega foi super rápida, adorei!",
109
+ 'text_classification': "Estou insatisfeito com o produto",
110
+ 'summarization': "A empresa XYZ reportou um crescimento de 15% no último trimestre...",
111
+ 'question_answering': {
112
+ 'context': "O produto X tem garantia de 2 anos e pode ser configurado via app em 5 minutos.",
113
+ 'question': "Qual é o tempo de garantia do produto X?"
114
+ },
115
+ 'translation': "Our product ensures high performance",
116
+ 'ner': "Microsoft assinou um contrato com a empresa XYZ em Nova York.",
117
+ 'text_generation': "Era uma vez um robô que",
118
+ 'speech_to_text': None,
119
+ 'audio_classification': None,
120
+ 'image_classification': None,
121
+ 'object_detection': None,
122
+ 'text_to_image': "Um carro futurista voando sobre Lisboa"
123
+ }
124
+
125
+ # -------- Interface --------
126
+ st.title("🤖 Demo Multi-Modal AI")
127
+ model_key = st.selectbox("Escolha o modelo para testar:", list(use_cases.keys()))
128
+ model = load_model(model_key)
129
+
130
+ if model_key in ['sentiment_analysis', 'text_classification', 'summarization', 'translation', 'text_generation', 'ner']:
131
+ input_text = st.text_area("Insira texto:", value=use_cases[model_key] if isinstance(use_cases[model_key], str) else "")
132
+ if st.button("Executar"):
133
+ if model_key == 'question_answering':
134
+ result = model(question=use_cases['question_answering']['question'], context=use_cases['question_answering']['context'])
135
+ else:
136
+ result = model(input_text)
137
+ display_results(result, model_key, input_text=input_text)
138
+
139
+ elif model_key in ['speech_to_text', 'audio_classification']:
140
+ audio_file = st.file_uploader("Carregue um arquivo de áudio", type=['wav','mp3','flac','m4a'])
141
+ if audio_file and st.button("Executar"):
142
+ audio_data = process_audio_file(audio_file)
143
+ result = model(audio_file)
144
+ display_results(result, model_key)
145
+
146
+ elif model_key in ['image_classification', 'object_detection', 'text_to_image']:
147
+ uploaded_file = st.file_uploader("Carregue uma imagem (ou deixe vazio para gerar)", type=['jpg','jpeg','png'])
148
+ prompt = st.text_input("Prompt para gerar imagem (apenas text_to_image):", value=use_cases['text_to_image'] if model_key=='text_to_image' else "")
149
+ if st.button("Executar"):
150
+ if model_key == 'text_to_image':
151
+ result = [model(prompt).images[0]]
152
+ elif uploaded_file:
153
+ image = process_image_file(uploaded_file)
154
+ result = model(image)
155
+ else:
156
+ st.warning("Carregue uma imagem ou insira prompt para gerar.")
157
+ result = None
158
+ if result:
159
+ display_results(result, model_key)