phi4
Browse files- main.py +66 -72
- prompts.yml +3 -0
- requirements.txt +4 -10
main.py
CHANGED
|
@@ -1,77 +1,71 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
| 2 |
import torch
|
| 3 |
-
import os
|
| 4 |
-
import io
|
| 5 |
-
from PIL import Image
|
| 6 |
-
import soundfile as sf
|
| 7 |
-
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
| 8 |
-
from urllib.request import urlopen
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
# Define prompt structure
|
| 29 |
-
user_prompt = '<|user|>'
|
| 30 |
-
assistant_prompt = '<|assistant|>'
|
| 31 |
-
prompt_suffix = '<|end|>'
|
| 32 |
-
|
| 33 |
-
# Part 1: Image Processing
|
| 34 |
-
print("\n--- IMAGE PROCESSING ---")
|
| 35 |
-
image_url = 'https://www.ilankelman.org/stopsigns/australia.jpg'
|
| 36 |
-
prompt = f'{user_prompt}<|image_1|>What is shown in this image?{prompt_suffix}{assistant_prompt}'
|
| 37 |
-
print(f'>>> Prompt\n{prompt}')
|
| 38 |
-
|
| 39 |
-
# Download and open image
|
| 40 |
-
image = Image.open(requests.get(image_url, stream=True).raw)
|
| 41 |
-
inputs = processor(text=prompt, images=image, return_tensors='pt').to('cuda:0')
|
| 42 |
-
|
| 43 |
-
# Generate response
|
| 44 |
-
generate_ids = model.generate(
|
| 45 |
-
**inputs,
|
| 46 |
-
max_new_tokens=1000,
|
| 47 |
-
generation_config=generation_config,
|
| 48 |
-
)
|
| 49 |
-
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
| 50 |
-
response = processor.batch_decode(
|
| 51 |
-
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 52 |
-
)[0]
|
| 53 |
-
print(f'>>> Response\n{response}')
|
| 54 |
-
|
| 55 |
-
# Part 2: Audio Processing
|
| 56 |
-
print("\n--- AUDIO PROCESSING ---")
|
| 57 |
-
audio_url = "https://upload.wikimedia.org/wikipedia/commons/b/b0/Barbara_Sahakian_BBC_Radio4_The_Life_Scientific_29_May_2012_b01j5j24.flac"
|
| 58 |
-
speech_prompt = "Transcribe the audio to text, and then translate the audio to French. Use <sep> as a separator between the original transcript and the translation."
|
| 59 |
-
prompt = f'{user_prompt}<|audio_1|>{speech_prompt}{prompt_suffix}{assistant_prompt}'
|
| 60 |
-
print(f'>>> Prompt\n{prompt}')
|
| 61 |
-
|
| 62 |
-
# Downlowd and open audio file
|
| 63 |
-
audio, samplerate = sf.read(io.BytesIO(urlopen(audio_url).read()))
|
| 64 |
-
|
| 65 |
-
# Process with the model
|
| 66 |
-
inputs = processor(text=prompt, audios=[(audio, samplerate)], return_tensors='pt').to('cuda:0')
|
| 67 |
-
|
| 68 |
-
generate_ids = model.generate(
|
| 69 |
-
**inputs,
|
| 70 |
-
max_new_tokens=1000,
|
| 71 |
-
generation_config=generation_config,
|
| 72 |
-
)
|
| 73 |
-
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
| 74 |
-
response = processor.batch_decode(
|
| 75 |
-
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
| 76 |
-
)[0]
|
| 77 |
-
print(f'>>> Response\n{response}')
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import yaml
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
class ModelManager:
|
| 7 |
+
def __init__(self, model_name="microsoft/Phi-4-mini-instruct"):
|
| 8 |
+
# Diccionario de modelos: usa los identificadores de Hugging Face
|
| 9 |
+
self.models = {
|
| 10 |
+
"microsoft/Phi-4-mini-instruct": "microsoft/Phi-4-mini-instruct",
|
| 11 |
+
"meta-llama/Llama-3.3-70B-Instruct": "meta-llama/Llama-3.3-70B-Instruct"
|
| 12 |
+
}
|
| 13 |
+
self.current_model_name = model_name
|
| 14 |
+
self.tokenizer = None
|
| 15 |
+
self.model = None
|
| 16 |
+
self.load_model(model_name)
|
| 17 |
+
|
| 18 |
+
def load_model(self, model_name):
|
| 19 |
+
self.current_model_name = model_name
|
| 20 |
+
model_path = self.models[model_name]
|
| 21 |
+
st.info(f"Cargando modelo: {model_name} ...")
|
| 22 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
|
| 23 |
+
self.model = AutoModelForCausalLM.from_pretrained(model_path)
|
| 24 |
+
|
| 25 |
+
def generate(self, prompt, max_length=50, temperature=0.7):
|
| 26 |
+
inputs = self.tokenizer(prompt, return_tensors="pt")
|
| 27 |
+
outputs = self.model.generate(inputs["input_ids"], max_length=max_length, temperature=temperature)
|
| 28 |
+
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 29 |
+
|
| 30 |
+
def switch_model(self, model_name):
|
| 31 |
+
if model_name in self.models:
|
| 32 |
+
self.load_model(model_name)
|
| 33 |
+
else:
|
| 34 |
+
raise ValueError(f"El modelo {model_name} no est谩 disponible.")
|
| 35 |
|
| 36 |
+
@st.cache_data
|
| 37 |
+
def load_prompts():
|
| 38 |
+
with open("prompt.yml", "r", encoding="utf-8") as f:
|
| 39 |
+
prompts = yaml.safe_load(f)
|
| 40 |
+
return prompts
|
| 41 |
|
| 42 |
+
def main():
|
| 43 |
+
st.title("Switcher de Modelos de Transformers")
|
| 44 |
+
|
| 45 |
+
# Cargar configuraci贸n de prompts
|
| 46 |
+
prompts_config = load_prompts()
|
| 47 |
+
|
| 48 |
+
# Selecci贸n de modelo desde la barra lateral
|
| 49 |
+
st.sidebar.title("Selecci贸n de Modelo")
|
| 50 |
+
model_choice = st.sidebar.selectbox("Selecciona un modelo", list(prompts_config.keys()))
|
| 51 |
+
|
| 52 |
+
# Instanciar el manejador de modelos
|
| 53 |
+
model_manager = ModelManager(model_name=model_choice)
|
| 54 |
+
|
| 55 |
+
# Obtener el prompt de estilo para el modelo seleccionado
|
| 56 |
+
style_prompt = prompts_config.get(model_choice, prompts_config.get("default_prompt", ""))
|
| 57 |
+
|
| 58 |
+
st.write(f"**Modelo en uso:** {model_choice}")
|
| 59 |
+
|
| 60 |
+
# 脕rea de texto para ingresar el prompt, iniciando con el estilo predefinido
|
| 61 |
+
user_prompt = st.text_area("Ingresa tu prompt:", value=style_prompt)
|
| 62 |
+
|
| 63 |
+
max_length = st.slider("Longitud m谩xima", min_value=10, max_value=200, value=50)
|
| 64 |
+
temperature = st.slider("Temperatura", min_value=0.1, max_value=1.0, value=0.7)
|
| 65 |
+
|
| 66 |
+
if st.button("Generar respuesta"):
|
| 67 |
+
result = model_manager.generate(user_prompt, max_length=max_length, temperature=temperature)
|
| 68 |
+
st.text_area("Salida", value=result, height=200)
|
| 69 |
|
| 70 |
+
if __name__ == "__main__":
|
| 71 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompts.yml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
default_prompt: "Eres un asistente de inteligencia artificial. Responde de manera clara y concisa."
|
| 2 |
+
phi4-mini: "Est谩s usando el modelo phi4-mini. Proporciona respuestas t茅cnicas y resumidas."
|
| 3 |
+
llama: "Est谩s utilizando el modelo LLaMA. Ofrece explicaciones detalladas y en profundidad."
|
requirements.txt
CHANGED
|
@@ -1,10 +1,4 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
transformers
|
| 4 |
-
|
| 5 |
-
soundfile==0.13.1
|
| 6 |
-
pillow==11.1.0
|
| 7 |
-
scipy==1.15.2
|
| 8 |
-
torchvision==0.21.0
|
| 9 |
-
backoff==2.2.1
|
| 10 |
-
peft==0.13.2
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
PyYAML
|
| 3 |
+
transformers
|
| 4 |
+
torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|