smurfs / src /ai_behavior.py
BolyosCsaba
initial commit
82fa936
"""
Mock AI Behavior System
Simulates LLM decision-making locally
"""
import random
from dataclasses import dataclass
from typing import List, Optional
from enum import Enum
from entities.enums import ActionType, NeedType, ResourceType
class Personality(Enum):
"""Personalities emerge from stats and actions, not predefined"""
NONE = "none" # No personality yet (babies/children)
WORKER = "worker" # High strength/stamina, gathers a lot
LAZY = "lazy" # Low activity, high rest ratio
EXPLORER = "explorer" # High speed, explores a lot
SOCIAL = "social" # High social stat, many interactions
BUILDER = "builder" # Works with buildings, construction
GUARDIAN = "guardian" # Protects village, watches perimeter
GATHERER = "gatherer" # Specializes in food gathering
CRAFTER = "crafter" # Works in workshops, processes resources
LEADER = "leader" # High intelligence/social, organizes others
SCHOLAR = "scholar" # High intelligence, studies/researches
class SocialActivity(Enum):
PLAYING = "play" # Changed to match ActionType.PLAY
CHATTING = "chat" # Changed to match ActionType.CHAT
@dataclass
class AIDecision:
"""Represents an AI decision"""
action: ActionType
target: Optional[any] = None
confidence: float = 1.0
reasoning: str = ""
class MockAI:
"""
Mock AI that simulates LLM behavior
Later: Replace with real HuggingFace API calls
"""
@staticmethod
def calculate_need_urgency(needs) -> dict:
"""Calculate urgency of each need - needs dictate actions, not personality"""
from config import NEED_CRITICAL_THRESHOLD_BASE, NEED_LOW_THRESHOLD_BASE
urgencies = {}
need_types = [NeedType.HUNGER, NeedType.ENERGY, NeedType.HYGIENE, NeedType.HEALTH, NeedType.HAPPINESS]
for need_type in need_types:
need_value = getattr(needs, need_type.value)
# Calculate urgency: lower need value = higher urgency
# All needs are equally important for survival
# Using BASE thresholds (individual variations are small, so this is fine for AI decisions)
if need_value < NEED_CRITICAL_THRESHOLD_BASE:
urgency = (100 - need_value) * 3.0 # Critical multiplier
elif need_value < NEED_LOW_THRESHOLD_BASE:
urgency = (100 - need_value) * 2.0 # Low multiplier
else:
urgency = (100 - need_value) * 1.0
urgencies[need_type] = urgency
return urgencies
@staticmethod
def determine_personality_from_stats(stats, action_history: dict, life_stage) -> Personality:
"""
Determine personality based on stats and action history
Only adults/elders have personalities
"""
# Import here to avoid circular imports
from entities.smurf import LifeStage
# Babies and children have no personality
if hasattr(life_stage, 'value'):
# It's an enum
if life_stage in [LifeStage.BABY, LifeStage.CHILD]:
return Personality.NONE
elif isinstance(life_stage, str):
# It's a string
if life_stage in ["baby", "child"]:
return Personality.NONE
# Calculate personality scores
scores = {}
# WORKER: High strength + stamina, lots of gathering
scores[Personality.WORKER] = (
(stats.strength / 100.0) * 0.4 +
(stats.stamina / 100.0) * 0.4 +
(action_history.get(ActionType.GATHER.value, 0) / max(sum(action_history.values()), 1)) * 0.2
)
# LAZY: Low activity, high rest ratio
total_actions = sum(action_history.values())
rest_ratio = action_history.get(ActionType.REST.value, 0) / max(total_actions, 1)
scores[Personality.LAZY] = rest_ratio * 0.8 + (1.0 - (stats.strength / 100.0)) * 0.2
# EXPLORER: High speed, lots of exploring
scores[Personality.EXPLORER] = (
(stats.speed / 100.0) * 0.5 +
(action_history.get(ActionType.EXPLORE.value, 0) / max(total_actions, 1)) * 0.5
)
# SOCIAL: High social stat, lots of social actions
social_actions = action_history.get(ActionType.CHAT.value, 0) + action_history.get(ActionType.PLAY.value, 0) + action_history.get(ActionType.FOLLOW.value, 0)
scores[Personality.SOCIAL] = (
(stats.social / 100.0) * 0.5 +
(social_actions / max(total_actions, 1)) * 0.5
)
# GATHERER: Specializes in food gathering (would need to track food vs wood)
# For now, use high gathering ratio
scores[Personality.GATHERER] = (
(action_history.get(ActionType.GATHER.value, 0) / max(total_actions, 1)) * 0.7 +
(stats.strength / 100.0) * 0.3
)
# BUILDER: Works with buildings (would need building interaction tracking)
# For now, use high intelligence + gathering (construction materials)
scores[Personality.BUILDER] = (
(stats.intelligence / 100.0) * 0.5 +
(action_history.get(ActionType.GATHER.value, 0) / max(total_actions, 1)) * 0.3 +
(stats.strength / 100.0) * 0.2
)
# CRAFTER: High intelligence, workshop work (would need workshop tracking)
scores[Personality.CRAFTER] = (
(stats.intelligence / 100.0) * 0.7 +
(stats.strength / 100.0) * 0.3
)
# LEADER: High intelligence + social
scores[Personality.LEADER] = (
(stats.intelligence / 100.0) * 0.5 +
(stats.social / 100.0) * 0.5
)
# SCHOLAR: Very high intelligence
scores[Personality.SCHOLAR] = (stats.intelligence / 100.0) * 0.9
# GUARDIAN: High speed + strength, patrols (would need patrol tracking)
scores[Personality.GUARDIAN] = (
(stats.speed / 100.0) * 0.4 +
(stats.strength / 100.0) * 0.4 +
(action_history.get(ActionType.EXPLORE.value, 0) / max(total_actions, 1)) * 0.2
)
# Find highest score
if not scores:
return Personality.NONE
max_personality = max(scores.items(), key=lambda x: x[1])
# Only assign personality if score is significant (above 0.3)
if max_personality[1] > 0.3:
return max_personality[0]
return Personality.NONE
def decide_action(self, smurf_state: dict, world_state: dict) -> AIDecision:
"""
Make a decision for a smurf
Needs dictate actions, personality only provides slight preferences
Args:
smurf_state: {
"energy": float,
"hunger": float,
"hygiene": float,
"health": float,
"happiness": float,
"needs": SmurfNeeds,
"stats": SmurfStats,
"care_mistakes": int,
"inventory": dict, # Inventory dict with item counts
"inventory_weight": int, # Current total weight
"max_inventory_weight": int, # Max weight capacity
"is_inventory_full": bool, # Whether inventory is at capacity
"personality": Personality,
"position": (x, y),
"life_stage": LifeStage
}
world_state: {
"nearby_resources": List[Resource],
"nearby_smurfs": List[Smurf],
"nearby_buildings": List[Building],
"time_of_day": str,
"village_storage": dict, # {"food": int, "wood": int}
"total_smurfs": int, # Total number of alive smurfs
"building_goals": dict, # {"needs_wood_for_building": bool, "needs_wood_for_upgrade": bool, "wood_needed": int}
"village_goal": Optional[str] # Children's voted goal: "have_fun" or "work" or None
}
Returns:
AIDecision with action and reasoning
"""
personality = smurf_state["personality"]
needs = smurf_state.get("needs")
inventory = smurf_state["inventory"] # Dict with item counts
stats = smurf_state.get("stats")
# Check if smurf has food available
from entities.enums import ItemType, ResourceType
has_food = inventory.get(ItemType.FOOD.value, 0) > 0
# Check for buildings with food and food resources
nearby_buildings = world_state.get("nearby_buildings", [])
buildings_with_food = [b for b in nearby_buildings
if b.storage.get(ItemType.FOOD.value, 0) > 0]
nearby_resources = world_state.get("nearby_resources", [])
food_resources = [r for r in nearby_resources
if r.resource_type in ResourceType.get_food_types()]
# NEEDS-BASED DECISION MAKING (primary)
if needs:
# Calculate need urgencies (all needs equally important)
urgencies = self.calculate_need_urgency(needs)
# Find most urgent need
most_urgent = max(urgencies.items(), key=lambda x: x[1])
need_type, urgency_value = most_urgent
# Special handling for hunger: prioritize eating
if need_type == NeedType.HUNGER and urgency_value > 50:
# Priority 1: If smurf has food in inventory, EAT
if has_food:
return AIDecision(
action=ActionType.EAT,
confidence=1.0,
reasoning=f"Urgent need: hunger is critical ({urgency_value:.1f} urgency), eating from inventory"
)
# Priority 2: If there's a building with food nearby, go there to EAT
elif buildings_with_food:
return AIDecision(
action=ActionType.EAT,
confidence=1.0,
reasoning=f"Urgent need: hunger is critical ({urgency_value:.1f} urgency), heading to building with food"
)
# Priority 3: If there's a food resource nearby, GATHER and EAT
elif food_resources:
return AIDecision(
action=ActionType.GATHER,
confidence=1.0,
reasoning=f"Urgent need: hunger is critical ({urgency_value:.1f} urgency), gathering food to eat"
)
# Fallback: explore to find food
else:
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.9,
reasoning=f"Urgent need: hunger is critical ({urgency_value:.1f} urgency), exploring to find food"
)
# Map needs to actions (for non-hunger needs)
need_to_action = {
NeedType.ENERGY: ActionType.REST,
NeedType.HYGIENE: ActionType.REST, # Go to building for bathroom
NeedType.HEALTH: ActionType.REST, # Rest to recover
NeedType.HAPPINESS: ActionType.FOLLOW # Social interaction
}
# If urgency is high, prioritize that need (survival first!)
if urgency_value > 50:
action = need_to_action.get(need_type, ActionType.REST)
return AIDecision(
action=action,
confidence=1.0,
reasoning=f"Urgent need: {need_type.value} is critical ({urgency_value:.1f} urgency)"
)
# Check if inventory is full - prioritize offloading
is_inventory_full = smurf_state.get("is_inventory_full", False)
if is_inventory_full:
# If inventory is full, the only action is to offload
# Check if there are nearby buildings to offload to
nearby_buildings = world_state.get("nearby_buildings", [])
if nearby_buildings:
# Will move to building to offload (handled in _execute_decision)
return AIDecision(
action=ActionType.REST, # This will trigger movement to building in _execute_decision
confidence=1.0,
reasoning="Inventory full, must offload to building"
)
else:
# No buildings nearby, explore to find one
return AIDecision(
action=ActionType.EXPLORE,
confidence=1.0,
reasoning="Inventory full, exploring to find building to offload"
)
# Fallback energy check if needs system not available
energy = smurf_state.get("energy", needs.energy if needs else 50)
if energy < 30:
return AIDecision(
action=ActionType.REST,
confidence=1.0,
reasoning="Energy too low, must rest"
)
# CHILD-SPECIFIC GOAL-BASED BEHAVIOR
# Children should prioritize village goals over random actions
life_stage = smurf_state.get("life_stage")
from entities.smurf import LifeStage
if life_stage == LifeStage.CHILD:
village_storage = world_state.get("village_storage", {})
total_smurfs = world_state.get("total_smurfs", 1)
building_goals = world_state.get("building_goals", {})
# Calculate food per smurf
total_food = village_storage.get(ItemType.FOOD.value, 0)
food_per_smurf = total_food / max(total_smurfs, 1)
# Priority 1: If food per smurf < 2, gather food like crazy
if food_per_smurf < 2.0:
# Check if there are food resources nearby
nearby_resources = world_state.get("nearby_resources", [])
food_resources = [r for r in nearby_resources
if r.resource_type in ResourceType.get_food_types()]
if food_resources:
return AIDecision(
action=ActionType.GATHER,
confidence=1.0,
reasoning=f"Village needs food! Only {food_per_smurf:.1f} food per smurf, gathering food"
)
else:
# No food resources nearby, explore to find some
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.9,
reasoning=f"Village needs food! Exploring to find food resources"
)
# Priority 2: If there are building goals (need wood), gather wood
if building_goals.get("needs_wood_for_building", False) or building_goals.get("needs_wood_for_upgrade", False):
wood_needed = building_goals.get("wood_needed", 0)
# Check if there are wood resources nearby
nearby_resources = world_state.get("nearby_resources", [])
wood_resources = [r for r in nearby_resources
if r.resource_type == ResourceType.WOOD]
if wood_resources:
return AIDecision(
action=ActionType.GATHER,
confidence=1.0,
reasoning=f"Village needs {wood_needed} wood for building/upgrade! Gathering wood"
)
else:
# No wood resources nearby, explore to find some
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.9,
reasoning=f"Village needs wood! Exploring to find wood resources"
)
# Priority 3: Check children's voted goal (democracy!)
village_goal = world_state.get("village_goal")
# If children voted for "have_fun", prioritize social activities
if village_goal == "have_fun":
nearby_smurfs = world_state.get("nearby_smurfs", [])
# If there are nearby smurfs, prefer social interactions
if nearby_smurfs:
# 80% chance for social interaction when goal is "have_fun"
if random.random() < 0.8:
# Choose between play and chat
social_action = random.choice([ActionType.PLAY, ActionType.CHAT])
return AIDecision(
action=ActionType.FOLLOW, # "follow" triggers social interaction in _execute_decision
confidence=0.9,
reasoning=f"Children voted to have fun! Time to {social_action.value} with friends"
)
else:
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.8,
reasoning="Children voted to have fun! Time to explore and play"
)
else:
# No nearby smurfs, just explore
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.8,
reasoning="Children voted to have fun! Time to explore and play"
)
# If children voted for "work" or no goal set, prefer gathering/working
elif village_goal == "work":
# Still check for resources to gather
nearby_resources = world_state.get("nearby_resources", [])
if nearby_resources:
return AIDecision(
action=ActionType.GATHER,
confidence=0.9,
reasoning="Children voted to work hard! Gathering resources for the village"
)
else:
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.8,
reasoning="Children voted to work hard! Exploring to find resources"
)
# No goal set or default behavior: If enough food and no building goals, prefer social actions
# Children should play, chat, and explore to be happy
nearby_smurfs = world_state.get("nearby_smurfs", [])
# If there are nearby smurfs, prefer social interactions
if nearby_smurfs:
# 70% chance for social interaction, 30% for explore
if random.random() < 0.7:
# Choose between play and chat
social_action = random.choice([ActionType.PLAY, ActionType.CHAT])
return AIDecision(
action=ActionType.FOLLOW, # "follow" triggers social interaction in _execute_decision
confidence=0.9,
reasoning=f"Village is doing well! Time to {social_action.value} with friends"
)
else:
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.8,
reasoning="Village is doing well! Time to explore and have fun"
)
else:
# No nearby smurfs, just explore
return AIDecision(
action=ActionType.EXPLORE,
confidence=0.8,
reasoning="Village is doing well! Time to explore and have fun"
)
# PERSONALITY-BASED PREFERENCES (only when needs are met)
# Personality provides slight preference adjustments, not overriding needs
action_weights = self._get_personality_preferences(personality, stats, world_state)
# Choose action based on personality preferences
if action_weights:
actions = list(action_weights.keys())
weights = list(action_weights.values())
action = random.choices(actions, weights=weights)[0]
else:
# Default: balanced action selection
action = random.choice([ActionType.GATHER, ActionType.EXPLORE, ActionType.REST, ActionType.FOLLOW])
# Generate reasoning
reasoning = self._generate_reasoning(action, personality, smurf_state, world_state)
return AIDecision(
action=action,
confidence=random.uniform(0.7, 1.0),
reasoning=reasoning
)
def _get_personality_preferences(self, personality: Personality, stats, world_state: dict) -> dict:
"""Get action preferences based on personality (slight adjustments only)"""
# Base weights (balanced)
base_weights = {
ActionType.GATHER: 0.25,
ActionType.REST: 0.25,
ActionType.EXPLORE: 0.25,
ActionType.FOLLOW: 0.25
}
# Personality adjustments (small, not overriding)
adjustments = {
Personality.WORKER: {ActionType.GATHER: 0.4, ActionType.REST: 0.2, ActionType.EXPLORE: 0.2, ActionType.FOLLOW: 0.2},
Personality.LAZY: {ActionType.GATHER: 0.15, ActionType.REST: 0.5, ActionType.EXPLORE: 0.15, ActionType.FOLLOW: 0.2},
Personality.EXPLORER: {ActionType.GATHER: 0.2, ActionType.REST: 0.2, ActionType.EXPLORE: 0.5, ActionType.FOLLOW: 0.1},
Personality.SOCIAL: {ActionType.GATHER: 0.1, ActionType.REST: 0.2, ActionType.EXPLORE: 0.2, ActionType.FOLLOW: 0.5},
Personality.GATHERER: {ActionType.GATHER: 0.6, ActionType.REST: 0.15, ActionType.EXPLORE: 0.15, ActionType.FOLLOW: 0.1},
Personality.BUILDER: {ActionType.GATHER: 0.4, ActionType.REST: 0.3, ActionType.EXPLORE: 0.15, ActionType.FOLLOW: 0.15},
Personality.CRAFTER: {ActionType.GATHER: 0.3, ActionType.REST: 0.3, ActionType.EXPLORE: 0.2, ActionType.FOLLOW: 0.2},
Personality.LEADER: {ActionType.GATHER: 0.2, ActionType.REST: 0.2, ActionType.EXPLORE: 0.2, ActionType.FOLLOW: 0.4},
Personality.SCHOLAR: {ActionType.GATHER: 0.2, ActionType.REST: 0.4, ActionType.EXPLORE: 0.2, ActionType.FOLLOW: 0.2},
Personality.GUARDIAN: {ActionType.GATHER: 0.2, ActionType.REST: 0.2, ActionType.EXPLORE: 0.4, ActionType.FOLLOW: 0.2},
}
if personality in adjustments:
return adjustments[personality]
# No personality or NONE - use balanced weights
return base_weights
def _generate_reasoning(self, action: ActionType, personality: Personality,
smurf_state: dict, world_state: dict) -> str:
"""Generate human-like reasoning (mock LLM output)"""
reasonings = {
ActionType.GATHER: [
f"As a {personality.value}, I should gather resources for the village",
f"I see {len(world_state['nearby_resources'])} resources nearby, time to work!",
"The village needs food, I'll help gather"
],
ActionType.REST: [
f"I'm feeling tired, a {personality.value} needs rest too",
"Time for a break, I've been working hard",
"Let me relax for a moment"
],
ActionType.EXPLORE: [
f"As an {personality.value}, I love discovering new areas!",
"I wonder what's beyond that hill?",
"Time to explore the village surroundings"
],
ActionType.FOLLOW: [
f"I see {len(world_state['nearby_smurfs'])} smurfs nearby, let's work together!",
"Smurfs are stronger together, I'll follow my friends",
"Where are the others going? I'll join them"
]
}
return random.choice(reasonings.get(action, ["Deciding what to do..."]))
def get_personality_description(self, personality: Personality) -> str:
"""Get description of personality (mock LLM prompt)"""
descriptions = {
Personality.NONE: "Still developing, no clear personality yet",
Personality.WORKER: "Hardworking and dedicated, gathers resources efficiently",
Personality.LAZY: "Prefers relaxation, works only when necessary",
Personality.EXPLORER: "Curious and adventurous, loves discovering new places",
Personality.SOCIAL: "Enjoys company, seeks social interactions",
Personality.BUILDER: "Focuses on construction and building improvements",
Personality.GUARDIAN: "Protects the village, watches for threats",
Personality.GATHERER: "Specializes in gathering food resources",
Personality.CRAFTER: "Works in workshops, processes resources",
Personality.LEADER: "Organizes others, boosts nearby smurfs",
Personality.SCHOLAR: "Studies and researches, unlocks improvements"
}
return descriptions.get(personality, "Unknown personality")
# ============================================================================
# MOCK LLM INTEGRATION (for future)
# ============================================================================
class MockLLMAPI:
"""
Mock LLM API that will be replaced with HuggingFace later
This simulates the interface you'll use for real LLM calls
"""
def __init__(self, model_name: str = "mock-model"):
self.model_name = model_name
self.api_calls = 0
async def generate(self, prompt: str, max_tokens: int = 100) -> str:
"""
Mock LLM generation
Later replace with:
```python
from huggingface_hub import InferenceClient
client = InferenceClient(api_key="your_key")
response = await client.text_generation(prompt, max_new_tokens=max_tokens)
return response
```
"""
self.api_calls += 1
# Simple keyword-based mock responses
if "personality" in prompt.lower():
return random.choice([
"This smurf is a hardworking type who loves gathering",
"A lazy but lovable smurf who prefers rest",
"An adventurous explorer always seeking new places"
])
elif "decide" in prompt.lower():
return random.choice([
"gather resources from nearby mushrooms",
"take a short rest to restore energy",
"explore the forest area to the north"
])
return "I'm thinking about what to do next..."
def get_stats(self) -> dict:
"""Get API usage stats"""
return {
"model": self.model_name,
"api_calls": self.api_calls,
"status": "mock" if self.model_name == "mock-model" else "connected"
}