File size: 7,924 Bytes
a32dc8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
// llmHandler.js - Module for handling LLM interactions
class LLMHandler {
    constructor(apiToken = null) {
        // List of free Hugging Face models
        this.freeLLMs = [
            "meta-llama/Llama-3.1-8B-Instruct",
            "google/gemma-3-270m-it",
            "google/gemma-3-4b-it",
            "google/gemma-3-27b-it",
            "Qwen/Qwen3-4B-Instruct-2507",
            "Qwen/Qwen3-8B",
            "mistralai/Mistral-7B-Instruct-v0.3",
            "HuggingFaceH4/zephyr-7b-beta",
            "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
            "microsoft/Phi-3-mini-4k-instruct",
            "stabilityai/stablelm-2-1_6b",
            "NousResearch/Hermes-2-Pro-Llama-3-8B",
            "CohereForAI/c4ai-command-r-v01",
            "nvidia/Nemotron-Research-Reasoning-Qwen-1.5B",
            "inclusionAI/AReaL-boba-2-8B"
        ];
        
        // Default selected model
        this.selectedModel = this.freeLLMs[0];
        
        // Hugging Face Inference API endpoint
        this.apiEndpoint = "https://api-inference.huggingface.co/models/";
        
        // API token (can be set in constructor or externally)
        this.apiToken = apiToken;
        
        // Flag to indicate if we should use simulated responses
        // If we have a token, we'll use real responses by default
        this.useSimulatedResponses = !apiToken;
    }
    
    /**
     * Set the API token for Hugging Face Inference API
     * @param {string} token - The API token
     */
    setApiToken(token) {
        this.apiToken = token;
        // When a real API token is set, disable simulated responses
        this.useSimulatedResponses = !token;
    }
    
    /**
     * Set whether to use simulated responses
     * @param {boolean} useSimulated - Whether to use simulated responses
     */
    setUseSimulatedResponses(useSimulated) {
        this.useSimulatedResponses = useSimulated;
    }
    
    /**
     * Set the selected model
     * @param {string} model - The model identifier
     */
    setSelectedModel(model) {
        if (this.freeLLMs.includes(model)) {
            this.selectedModel = model;
        } else {
            console.warn(`Model ${model} is not in the list of free LLMs`);
        }
    }
    
    /**
     * Send a query to the selected LLM
     * @param {string} query - The query to send to the LLM
     * @returns {Promise<string>} - The response from the LLM
     */
    async sendQuery(query) {
        // If we don't have an API token, throw an error
        if (!this.apiToken) {
            throw new Error("API token is not set. Please set your Hugging Face API token to use the LLM functionality.");
        }
        
        // If we're using simulated responses, return a simulated response
        if (this.useSimulatedResponses) {
            console.log("Using simulated response for query:", query);
            // Simulate API delay
            await new Promise(resolve => setTimeout(resolve, 1000));
            return this.simulateLLMResponse(query);
        }
        
        // Prepare the API request
        const url = this.apiEndpoint + this.selectedModel;
        const payload = {
            inputs: query,
            parameters: {
                max_new_tokens: 200,
                temperature: 0.7,
                top_p: 0.9,
                do_sample: true
            }
        };
        
        // Make the API request
        try {
            const response = await fetch(url, {
                method: 'POST',
                headers: {
                    'Authorization': `Bearer ${this.apiToken}`,
                    'Content-Type': 'application/json'
                },
                body: JSON.stringify(payload)
            });
            
            // Check if the request was successful
            if (!response.ok) {
                const errorData = await response.json();
                throw new Error(`API request failed with status ${response.status}: ${errorData.error || 'Unknown error'}`);
            }
            
            // Parse the response
            const data = await response.json();
            
            // Extract the generated text from the response
            if (Array.isArray(data) && data.length > 0 && data[0].generated_text) {
                return data[0].generated_text;
            } else if (data.generated_text) {
                return data.generated_text;
            } else {
                throw new Error("Unexpected response format from the API");
            }
        } catch (error) {
            console.error("Error sending query to LLM:", error);
            throw error;
        }
    }
    
    /**
     * Simulate an LLM response (for testing purposes)
     * @param {string} query - The query to simulate a response for
     * @returns {string} - A simulated response
     */
    simulateLLMResponse(query) {
        const lowerQuery = query.toLowerCase();
        
        if (lowerQuery.includes("villager") && lowerQuery.includes("behavior")) {
            return "Villagers in the medieval village simulation exhibit complex behaviors based on their needs and the time of day. They cycle through states like sleeping, working, eating, and socializing. Their decisions are influenced by factors such as energy levels, hunger, and social needs.";
        } else if (lowerQuery.includes("resource") && lowerQuery.includes("management")) {
            return "Resource management in the village is critical for sustainability. Villagers collect resources like wood and stone from designated areas. Proper allocation of resources to buildings and villagers ensures the village's growth and resilience against disasters.";
        } else if (lowerQuery.includes("disaster") || lowerQuery.includes("emergency")) {
            return "The village simulation includes various disasters like fires, floods, and plagues. These events test the village's resilience and require strategic planning to mitigate their effects. Warriors can be dispatched to help protect the village from certain threats.";
        } else if (lowerQuery.includes("ai") || lowerQuery.includes("artificial intelligence")) {
            return "This simulation uses several AI techniques including finite state machines for villager behavior, pathfinding algorithms for navigation, and rule-based systems for decision making. The emergent behaviors arise from the interaction of these systems.";
        } else if (lowerQuery.includes("building") || lowerQuery.includes("structure")) {
            return "The village features various building types, each with unique functions: houses for living, workshops for crafting, markets for trading, and specialized buildings like universities and hospitals. Buildings are placed strategically to optimize villager workflows.";
        } else {
            return `I've received your query about "${query}". In a full implementation with API access, I would provide a detailed response based on the selected LLM model. For now, try asking about villagers, resources, disasters, AI systems, or buildings in the village.`;
        }
    }
    
    /**
     * Get the list of free LLMs
     * @returns {string[]} - Array of free LLM model identifiers
     */
    getFreeLLMs() {
        return this.freeLLMs;
    }
    
    /**
     * Get the currently selected model
     * @returns {string} - The currently selected model identifier
     */
    getSelectedModel() {
        return this.selectedModel;
    }
    
    /**
     * Check if the API token is set
     * @returns {boolean} - Whether the API token is set
     */
    isApiTokenSet() {
        return !!this.apiToken;
    }
    
    /**
     * Get the API token (for debugging purposes)
     * @returns {string|null} - The API token or null if not set
     */
    getApiToken() {
        return this.apiToken;
    }
}

// Export the LLMHandler class
export default LLMHandler;