l0n3r commited on
Commit
b04738d
·
verified ·
1 Parent(s): fc8e3f3

you said you could add open source models to projects and host it for free on awsome servers, wtf is this simulated shit?

Browse files
Files changed (3) hide show
  1. index.html +53 -13
  2. script.js +72 -46
  3. style.css +5 -0
index.html CHANGED
@@ -79,17 +79,36 @@
79
  handleFileUpload(file);
80
  }
81
  });
82
-
83
- function sendMessage() {
84
  const message = userInput.value.trim();
85
  if (message) {
86
  addMessage('user', message);
87
  userInput.value = '';
88
 
89
- // Simulate bot response (in a real app, this would call an API)
90
- setTimeout(() => {
91
- addMessage('bot', "I'm a simulated response. In a real implementation, this would call an API like GPT-3.5 Turbo or similar.");
92
- }, 1000);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  }
94
  }
95
 
@@ -108,23 +127,44 @@
108
  chatContainer.scrollTop = chatContainer.scrollHeight;
109
  }
110
 
111
- function handleFileUpload(file) {
112
  if (file.type.match('image.*')) {
113
  const reader = new FileReader();
114
- reader.onload = function(e) {
115
  addMessage('user', `<img src="${e.target.result}" class="max-w-full h-auto rounded-lg" alt="Uploaded image">`);
116
 
117
- // Simulate image understanding response
118
- setTimeout(() => {
119
- addMessage('bot', "I see you've uploaded an image! In a real implementation, this would use a multimodal model to analyze the image.");
120
- }, 1500);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  };
122
  reader.readAsDataURL(file);
123
  } else {
124
  alert('Please upload an image file.');
125
  }
126
  }
127
- });
128
  </script>
129
  <script src="https://huggingface.co/deepsite/deepsite-badge.js"></script>
130
  </body>
 
79
  handleFileUpload(file);
80
  }
81
  });
82
+ async function sendMessage() {
 
83
  const message = userInput.value.trim();
84
  if (message) {
85
  addMessage('user', message);
86
  userInput.value = '';
87
 
88
+ // Show loading state
89
+ const loadingDiv = document.createElement('div');
90
+ loadingDiv.className = 'flex justify-start mb-4';
91
+ loadingDiv.innerHTML = `
92
+ <div class="bg-gray-200 dark:bg-gray-700 text-gray-800 dark:text-white rounded-lg rounded-bl-none p-3 max-w-xs">
93
+ <div class="flex space-x-2">
94
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce"></div>
95
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce" style="animation-delay: 0.2s"></div>
96
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce" style="animation-delay: 0.4s"></div>
97
+ </div>
98
+ </div>
99
+ `;
100
+ chatContainer.appendChild(loadingDiv);
101
+ chatContainer.scrollTop = chatContainer.scrollHeight;
102
+
103
+ try {
104
+ const response = await callChatAPI(message);
105
+ loadingDiv.remove();
106
+ addMessage('bot', response);
107
+ } catch (error) {
108
+ loadingDiv.remove();
109
+ addMessage('bot', "Sorry, I encountered an error processing your request. Please try again.");
110
+ console.error(error);
111
+ }
112
  }
113
  }
114
 
 
127
  chatContainer.scrollTop = chatContainer.scrollHeight;
128
  }
129
 
130
+ async function handleFileUpload(file) {
131
  if (file.type.match('image.*')) {
132
  const reader = new FileReader();
133
+ reader.onload = async function(e) {
134
  addMessage('user', `<img src="${e.target.result}" class="max-w-full h-auto rounded-lg" alt="Uploaded image">`);
135
 
136
+ // Show loading state
137
+ const loadingDiv = document.createElement('div');
138
+ loadingDiv.className = 'flex justify-start mb-4';
139
+ loadingDiv.innerHTML = `
140
+ <div class="bg-gray-200 dark:bg-gray-700 text-gray-800 dark:text-white rounded-lg rounded-bl-none p-3 max-w-xs">
141
+ <div class="flex space-x-2">
142
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce"></div>
143
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce" style="animation-delay: 0.2s"></div>
144
+ <div class="w-2 h-2 rounded-full bg-gray-400 animate-bounce" style="animation-delay: 0.4s"></div>
145
+ </div>
146
+ </div>
147
+ `;
148
+ chatContainer.appendChild(loadingDiv);
149
+ chatContainer.scrollTop = chatContainer.scrollHeight;
150
+
151
+ try {
152
+ const base64Image = await fileToBase64(file);
153
+ const response = await callChatAPI("", base64Image);
154
+ loadingDiv.remove();
155
+ addMessage('bot', response);
156
+ } catch (error) {
157
+ loadingDiv.remove();
158
+ addMessage('bot', "Sorry, I couldn't process that image. Please try another one.");
159
+ console.error(error);
160
+ }
161
  };
162
  reader.readAsDataURL(file);
163
  } else {
164
  alert('Please upload an image file.');
165
  }
166
  }
167
+ });
168
  </script>
169
  <script src="https://huggingface.co/deepsite/deepsite-badge.js"></script>
170
  </body>
script.js CHANGED
@@ -1,56 +1,82 @@
1
- // This would contain the actual API integration in a real implementation
2
- // For now, it's just a placeholder with the chat functionality from index.html
3
-
4
- // In a real implementation, you would:
5
- // 1. Choose an open-source model that fits free hosting constraints (like GPT-3.5 Turbo API)
6
- // 2. Implement API calls to the model
7
- // 3. Add error handling
8
- // 4. Implement proper rate limiting
9
- // 5. Add loading states
10
- // 6. Implement proper session management
11
-
12
- // Example API integration would look something like:
13
- /*
14
  async function callChatAPI(message, imageData = null) {
15
- const apiKey = 'your_api_key'; // In real app, this would be secured
16
- const endpoint = 'https://api.openai.com/v1/chat/completions';
17
-
18
- const messages = [
19
- { role: 'system', content: 'You are a helpful assistant.' },
20
- { role: 'user', content: message }
21
- ];
22
-
23
- if (imageData) {
24
- // For multimodal models, you would include the image data
25
- messages[1].content = [
26
- { type: 'text', text: message },
27
- { type: 'image_url', image_url: imageData }
28
- ];
29
- }
30
-
31
  try {
32
- const response = await fetch(endpoint, {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  method: 'POST',
34
  headers: {
35
- 'Content-Type': 'application/json',
36
- 'Authorization': `Bearer ${apiKey}`
37
  },
38
  body: JSON.stringify({
39
- model: 'gpt-4-vision-preview', // Or another model
40
- messages: messages,
41
- max_tokens: 300
 
 
42
  })
43
- });
44
-
45
- if (!response.ok) {
46
- throw new Error(`API request failed with status ${response.status}`);
47
  }
48
-
49
- const data = await response.json();
50
- return data.choices[0].message.content;
51
- } catch (error) {
52
- console.error('Error calling API:', error);
53
- return "Sorry, I encountered an error processing your request.";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  }
56
- */
 
1
+
2
+ // Configuration for Hugging Face Inference API
3
+ const HF_API_KEY = 'hf_YOUR_API_KEY'; // Replace with your actual API key
4
+ const TEXT_MODEL = 'mistralai/Mistral-7B-Instruct-v0.1'; // Free open-source text model
5
+ const IMAGE_MODEL = 'nlpconnect/vit-gpt2-image-captioning'; // Free image captioning model
6
+
7
+ // Main chat function
 
 
 
 
 
 
8
  async function callChatAPI(message, imageData = null) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  try {
10
+ if (imageData) {
11
+ // Handle image processing
12
+ const caption = await generateImageCaption(imageData);
13
+ return `I see an image that appears to be: ${caption}. ${message ? 'You asked: ' + message : ''}`;
14
+ } else {
15
+ // Handle text processing
16
+ return await generateTextResponse(message);
17
+ }
18
+ } catch (error) {
19
+ console.error('API Error:', error);
20
+ return "Sorry, I'm having trouble processing your request right now.";
21
+ }
22
+ }
23
+
24
+ // Generate text response using Mistral-7B
25
+ async function generateTextResponse(prompt) {
26
+ const response = await fetch(
27
+ `https://api-inference.huggingface.co/models/${TEXT_MODEL}`,
28
+ {
29
  method: 'POST',
30
  headers: {
31
+ 'Authorization': `Bearer ${HF_API_KEY}`,
32
+ 'Content-Type': 'application/json'
33
  },
34
  body: JSON.stringify({
35
+ inputs: `<s>[INST] ${prompt} [/INST]`,
36
+ parameters: {
37
+ max_new_tokens: 200,
38
+ temperature: 0.7
39
+ }
40
  })
 
 
 
 
41
  }
42
+ );
43
+
44
+ if (!response.ok) {
45
+ throw new Error(`API request failed with status ${response.status}`);
46
+ }
47
+
48
+ const result = await response.json();
49
+ return result[0].generated_text.split('[/INST]').pop().trim();
50
+ }
51
+
52
+ // Generate image caption using ViT-GPT2
53
+ async function generateImageCaption(imageData) {
54
+ const response = await fetch(
55
+ `https://api-inference.huggingface.co/models/${IMAGE_MODEL}`,
56
+ {
57
+ method: 'POST',
58
+ headers: {
59
+ 'Authorization': `Bearer ${HF_API_KEY}`,
60
+ 'Content-Type': 'application/json'
61
+ },
62
+ body: JSON.stringify({ inputs: imageData })
63
+ }
64
+ );
65
+
66
+ if (!response.ok) {
67
+ throw new Error(`API request failed with status ${response.status}`);
68
  }
69
+
70
+ const result = await response.json();
71
+ return result[0].generated_text;
72
+ }
73
+
74
+ // Utility function to convert file to base64
75
+ function fileToBase64(file) {
76
+ return new Promise((resolve, reject) => {
77
+ const reader = new FileReader();
78
+ reader.readAsDataURL(file);
79
+ reader.onload = () => resolve(reader.result.split(',')[1]);
80
+ reader.onerror = error => reject(error);
81
+ });
82
  }
 
style.css CHANGED
@@ -33,6 +33,11 @@ body {
33
  .dark ::-webkit-scrollbar-thumb:hover {
34
  background: #718096;
35
  }
 
 
 
 
 
36
 
37
  /* Markdown styling */
38
  .markdown p {
 
33
  .dark ::-webkit-scrollbar-thumb:hover {
34
  background: #718096;
35
  }
36
+ /* Loading animation */
37
+ @keyframes bounce {
38
+ 0%, 100% { transform: translateY(0); }
39
+ 50% { transform: translateY(-5px); }
40
+ }
41
 
42
  /* Markdown styling */
43
  .markdown p {