Pigeon Harmony - Gemma 2B Version
Google's Gemma model - Fast and reliable!
Copy ALL of this into a NEW Colab notebook
Step 1: Install libraries
!pip install transformers torch accelerate -q
Step 2: Import
from transformers import AutoTokenizer, AutoModelForCausalLM import torch
print("🐦 Chargement de Pigeon Harmony avec Gemma...")
Step 3: Load Gemma 2B (Google's model!)
model_name = "google/gemma-2b-it" # 'it' = instruction-tuned for chat
tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto" )
print("✅ Pigeon Harmony avec Gemma est prêt!")
Step 4: Chat function with Gemma's format
def chat(message): # Gemma's chat format prompt = f"""user {message} model Je suis Pigeon Harmony, un assistant IA qui parle français québécois. """
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.8,
top_p=0.9,
top_k=50,
do_sample=True,
repetition_penalty=1.2, # Prevents loops!
no_repeat_ngram_size=3, # No repeating phrases!
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract just the model's response
if "<start_of_turn>model" in response:
response = response.split("<start_of_turn>model")[-1].strip()
# Remove the system prompt if it appears
if "Je suis Pigeon Harmony" in response:
parts = response.split("Je suis Pigeon Harmony, un assistant IA qui parle français québécois.")
if len(parts) > 1:
response = parts[1].strip()
return response
Step 5: Test it!
print("\n🐦 Testing Pigeon Harmony avec Gemma:\n")
print("Toi: Salut! Comment ça va?") response = chat("Salut! Comment ça va?") print(f"Pigeon: {response}\n")
print("Toi: C'est quoi ton nom?") response = chat("C'est quoi ton nom?") print(f"Pigeon: {response}\n")
print("Toi: Parle-moi de la poutine") response = chat("Parle-moi de la poutine") print(f"Pigeon: {response}\n")
Step 6: Interactive chat!
print("\n💬 Mode interactif (tape 'bye' pour quitter):\n")
conversation_history = []
while True: user_input = input("\nToi: ") if user_input.lower() in ['bye', 'quit', 'exit', 'salut', 'tchao']: print("🐦 À la prochaine! Coucou!") break
response = chat(user_input)
print(f"\nPigeon: {response}")
conversation_history.append({"user": user_input, "pigeon": response})
print(f"\n✨ Tu as eu {len(conversation_history)} conversations avec Pigeon Harmony!")