mmdhx commited on
Commit
28ecdaa
·
verified ·
1 Parent(s): 0053fdd

Update egg-api.js

Browse files
Files changed (1) hide show
  1. egg-api.js +259 -129
egg-api.js CHANGED
@@ -1,16 +1,17 @@
1
  const express = require('express');
2
- const fs = require('fs');
3
- const path = require('path');
4
  const cv = require('opencv4nodejs');
5
  const Jimp = require('jimp');
 
6
  const axios = require('axios');
 
 
 
 
7
  const sharp = require('sharp');
8
  const serveStatic = require('serve-static');
9
- const ffmpeg = require('fluent-ffmpeg');
10
 
11
  const app = express();
12
- const port = 7860;
13
-
14
  // 从文件夹加载所有分类器
15
  const loadClassifiersFromFolder = async (folderPath) => {
16
  const classifiers = [];
@@ -26,148 +27,110 @@ const loadClassifiersFromFolder = async (folderPath) => {
26
  // 下载并转换图像到内存中
27
  const downloadAndConvertImage = async (url) => {
28
  try {
29
- const response = await axios.get(url, { responseType: 'arraybuffer' });
30
- return await sharp(response.data).toFormat('jpeg').toBuffer();
 
 
 
 
 
 
 
 
 
 
31
  } catch (error) {
32
- throw new Error('Error downloading or converting image: ' + error.message);
 
33
  }
34
  };
35
 
36
  // 图像处理函数
37
  const processImage = async (imageBuffer, classifiersFolder, replacementImageBuffer) => {
38
- const classifiers = await loadClassifiersFromFolder(classifiersFolder);
39
- const img = cv.imdecode(imageBuffer);
40
- const grayImg = img.bgrToGray();
41
- const allFaces = classifiers.flatMap(classifier => classifier.detectMultiScale(grayImg).objects);
42
-
43
- if (allFaces.length === 0) throw new Error('No faces detected in the image.');
44
-
45
- const replacementFace = await Jimp.read(replacementImageBuffer);
46
- allFaces.forEach((faceRect) => {
47
- const resizedReplacementFace = replacementFace.resize(faceRect.width, faceRect.height);
48
- resizedReplacementFace.scan(0, 0, resizedReplacementFace.bitmap.width, resizedReplacementFace.bitmap.height, (x, y, idx) => {
49
- if (resizedReplacementFace.bitmap.data[idx + 3] > 0) {
50
- img.set(faceRect.y + y, faceRect.x + x, new cv.Vec3(
51
- resizedReplacementFace.bitmap.data[idx + 2],
52
- resizedReplacementFace.bitmap.data[idx + 1],
53
- resizedReplacementFace.bitmap.data[idx]
54
- ));
55
- }
56
- });
57
- });
58
-
59
- return cv.imencode('.jpg', img);
60
- };
61
-
62
- // GIF转换为视频
63
- const gifToVideo = async (inputGifPath, outputVideoPath) => {
64
- return new Promise((resolve, reject) => {
65
- ffmpeg(inputGifPath)
66
- .inputFormat('gif')
67
- .output(outputVideoPath)
68
- .on('end', resolve)
69
- .on('error', reject)
70
- .run();
71
- });
72
- };
73
-
74
- // 视频转换为GIF
75
- const videoToGif = async (inputVideoPath, outputGifPath) => {
76
- return new Promise((resolve, reject) => {
77
- ffmpeg(inputVideoPath)
78
- .output(outputGifPath)
79
- .on('end', resolve)
80
- .on('error', reject)
81
- .run();
82
- });
83
- };
84
-
85
- // 处理 GIF 的接口
86
- app.get('/processGif', async (req, res) => {
87
- const { gifUrl } = req.query;
88
-
89
- if (!gifUrl) {
90
- return res.status(400).send('GIF URL is required');
91
- }
92
-
93
- const tempDir = path.join(__dirname, 'temp');
94
- const tempGifPath = path.join(tempDir, 'temp.gif');
95
- const tempVideoPath = path.join(tempDir, 'temp_video.mp4');
96
- const outputGifPath = path.join(tempDir, 'output.gif');
97
- const classifiersFolder = path.join(__dirname, 'classifiers');
98
- const replacementImagePath = path.join(__dirname, 'replacement_face.png');
99
-
100
  try {
101
- // 下载 GIF 到本地
102
- const response = await axios.get(gifUrl, { responseType: 'stream' });
103
- const writer = fs.createWriteStream(tempGifPath);
104
- response.data.pipe(writer);
105
- await new Promise((resolve, reject) => {
106
- writer.on('finish', resolve);
107
- writer.on('error', reject);
108
- });
109
-
110
- // 从文件夹动态加载分类器
111
  const classifiers = await loadClassifiersFromFolder(classifiersFolder);
112
- await gifToVideo(tempGifPath, tempVideoPath);
113
-
114
- const videoCapture = new cv.VideoCapture(tempVideoPath);
115
- const frameWidth = videoCapture.get(cv.CAP_PROP_FRAME_WIDTH);
116
- const frameHeight = videoCapture.get(cv.CAP_PROP_FRAME_HEIGHT);
117
- const originalFps = videoCapture.get(cv.CAP_PROP_FPS);
118
- const videoWriter = new cv.VideoWriter(path.join(tempDir, 'out.mp4'), cv.VideoWriter.fourcc('avc1'), originalFps, new cv.Size(frameWidth, frameHeight));
119
-
120
- while (true) {
121
- const frame = videoCapture.read();
122
- if (frame.empty) break;
123
-
124
- const grayFrame = frame.bgrToGray();
125
- const faces = classifiers.flatMap(classifier => classifier.detectMultiScale(grayFrame).objects);
126
-
127
- const replacementFace = await Jimp.read(replacementImagePath);
128
- if (faces.length > 0) {
129
- faces.forEach(rect => {
130
- const resizedSubstituteImage = replacementFace.resize(rect.width, rect.height);
131
- resizedSubstituteImage.scan(0, 0, resizedSubstituteImage.bitmap.width, resizedSubstituteImage.bitmap.height, (x, y, idx) => {
132
- if (resizedSubstituteImage.bitmap.data[idx + 3] > 0) {
133
- frame.set(rect.y + y, rect.x + x, new cv.Vec3(
134
- resizedSubstituteImage.bitmap.data[idx + 2],
135
- resizedSubstituteImage.bitmap.data[idx + 1],
136
- resizedSubstituteImage.bitmap.data[idx]
137
- ));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  }
139
- });
140
- });
141
  }
142
-
143
- videoWriter.write(frame);
144
- }
145
-
146
- videoCapture.release();
147
- videoWriter.release();
148
- await videoToGif(path.join(tempDir, 'out.mp4'), outputGifPath);
149
-
150
- res.set('Content-Type', 'image/gif').sendFile(outputGifPath);
151
  } catch (error) {
152
- console.error('Error processing GIF:', error);
153
- res.status(500).send('Error processing GIF');
154
  }
155
- });
156
 
157
- // 图像处理接口
158
  app.get('/process-image', async (req, res) => {
159
  const { imageUrl, replacementImageUrl } = req.query;
160
-
161
  if (!imageUrl || !replacementImageUrl) {
162
  return res.status(400).send('Both image URL and replacement image URL are required');
163
  }
164
-
165
  const classifiersFolder = './classifiers';
 
166
  try {
 
167
  const imageBuffer = await downloadAndConvertImage(imageUrl);
168
- const replacementImageBuffer = await downloadAndConvertImage(replacementImageUrl);
 
 
 
 
 
 
 
 
 
169
  const outputBuffer = await processImage(imageBuffer, classifiersFolder, replacementImageBuffer);
170
- res.set('Content-Type', 'image/jpeg').send(outputBuffer);
 
 
 
171
  } catch (error) {
172
  if (error.message === 'No faces detected in the image.') {
173
  res.status(404).send('No faces detected in the image.');
@@ -178,12 +141,179 @@ app.get('/process-image', async (req, res) => {
178
  }
179
  });
180
 
181
- // 静态文件服务
182
- app.use(serveStatic(__dirname));
 
 
 
183
  app.get('/', (req, res) => {
184
- res.sendFile(path.join(__dirname, 'index.html'));
185
  });
186
 
187
  app.listen(port, () => {
188
- console.log(`Server is running on http://localhost:${port}`);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  });
 
1
  const express = require('express');
 
 
2
  const cv = require('opencv4nodejs');
3
  const Jimp = require('jimp');
4
+ const ffmpeg = require('fluent-ffmpeg');
5
  const axios = require('axios');
6
+ const fs = require('fs');
7
+ const os = require('os');
8
+ const path = require('path');
9
+ const Jimp = require('jimp');
10
  const sharp = require('sharp');
11
  const serveStatic = require('serve-static');
 
12
 
13
  const app = express();
14
+ const PORT = 7860;
 
15
  // 从文件夹加载所有分类器
16
  const loadClassifiersFromFolder = async (folderPath) => {
17
  const classifiers = [];
 
27
  // 下载并转换图像到内存中
28
  const downloadAndConvertImage = async (url) => {
29
  try {
30
+ const response = await axios({
31
+ method: 'get',
32
+ url: url,
33
+ responseType: 'arraybuffer'
34
+ });
35
+
36
+ const imageBuffer = await sharp(response.data)
37
+ .toFormat('jpeg')
38
+ .toBuffer();
39
+
40
+ console.log('Image downloaded and converted successfully.');
41
+ return imageBuffer;
42
  } catch (error) {
43
+ console.error('Error downloading or converting image:', error);
44
+ throw error;
45
  }
46
  };
47
 
48
  // 图像处理函数
49
  const processImage = async (imageBuffer, classifiersFolder, replacementImageBuffer) => {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  try {
 
 
 
 
 
 
 
 
 
 
51
  const classifiers = await loadClassifiersFromFolder(classifiersFolder);
52
+
53
+ const jimpImage = await Jimp.read(imageBuffer);
54
+ if (!jimpImage) {
55
+ throw new Error('Failed to read the image with Jimp. The image file might be corrupted or not supported.');
56
+ }
57
+
58
+ const img = cv.imdecode(imageBuffer);
59
+ if (img.empty) {
60
+ throw new Error('Failed to read the image with OpenCV. The image file might be corrupted or not supported.');
61
+ }
62
+
63
+ const grayImg = img.bgrToGray();
64
+ const allFaces = [];
65
+ classifiers.forEach(classifier => {
66
+ const faces = classifier.detectMultiScale(grayImg).objects;
67
+ allFaces.push(...faces);
68
+ });
69
+
70
+ // 如果没有检测到人脸,抛出错误
71
+ if (allFaces.length === 0) {
72
+ throw new Error('No faces detected in the image.');
73
+ }
74
+
75
+ const replacementFace = await Jimp.read(replacementImageBuffer);
76
+ allFaces.forEach((faceRect, i) => {
77
+ const resizedReplacementFace = replacementFace.resize(faceRect.width, faceRect.height);
78
+ const faceRegion = img.getRegion(faceRect);
79
+
80
+ const replacementBuffer = resizedReplacementFace.bitmap.data;
81
+ const centerX = faceRect.width / 2;
82
+ const centerY = faceRect.height / 2;
83
+ const maxRadius = Math.min(centerX, centerY);
84
+ for (let y = 0; y < faceRect.height; y++) {
85
+ for (let x = 0; x < faceRect.width; x++) {
86
+ const distance = Math.sqrt((x - centerX) ** 2 + (y - centerY) ** 2);
87
+ if (distance <= maxRadius) {
88
+ const idx = (y * faceRect.width + x) << 2;
89
+ const [r, g, b, a] = replacementBuffer.slice(idx, idx + 4);
90
+
91
+ if (a > 0) {
92
+ faceRegion.set(y, x, new cv.Vec3(b, g, r));
93
  }
94
+ }
95
+ }
96
  }
97
+ });
98
+ const outputBuffer = cv.imencode('.jpg', img);
99
+ return outputBuffer;
 
 
 
 
 
 
100
  } catch (error) {
101
+ console.error('Error during image processing:', error);
102
+ throw error;
103
  }
104
+ };
105
 
106
+ // Express接口
107
  app.get('/process-image', async (req, res) => {
108
  const { imageUrl, replacementImageUrl } = req.query;
109
+
110
  if (!imageUrl || !replacementImageUrl) {
111
  return res.status(400).send('Both image URL and replacement image URL are required');
112
  }
113
+
114
  const classifiersFolder = './classifiers';
115
+ let replacementImageBuffer;
116
  try {
117
+ // 下载并转换输入图片和替换图片到内存中
118
  const imageBuffer = await downloadAndConvertImage(imageUrl);
119
+ if (replacementImageUrl === 'replace') {
120
+ // 使用默认的本地替换图像路径
121
+ const defaultReplacementImagePath = './replacement_face.png';
122
+ replacementImageBuffer = await Jimp.read(defaultReplacementImagePath);
123
+ } else {
124
+ // 下载并转换替换图片到内存中
125
+ replacementImageBuffer = await downloadAndConvertImage(replacementImageUrl);
126
+ }
127
+
128
+ // 处理图片
129
  const outputBuffer = await processImage(imageBuffer, classifiersFolder, replacementImageBuffer);
130
+
131
+ // 返回处理后的图片
132
+ res.set('Content-Type', 'image/jpeg');
133
+ res.send(outputBuffer);
134
  } catch (error) {
135
  if (error.message === 'No faces detected in the image.') {
136
  res.status(404).send('No faces detected in the image.');
 
141
  }
142
  });
143
 
144
+ // 记录使用次数和内存占用的全局变量
145
+ let usageCount = 0;
146
+ let memoryUsage = 0;
147
+
148
+ app.use(serveStatic(__dirname)); // 添加这一行,设置静态文件服务器
149
  app.get('/', (req, res) => {
150
+ res.sendFile(path.join(__dirname, 'index.html')); // 修改这一行,发送HTML文件
151
  });
152
 
153
  app.listen(port, () => {
154
+ console.log(`Server is running on http://localhost:${port}`);
155
+ });
156
+ app.use(express.json());
157
+
158
+ // GIF转换为视频
159
+ async function gifToVideo(inputGifPath, outputVideoPath) {
160
+ console.log('开始将GIF转换为视频...');
161
+ return new Promise((resolve, reject) => {
162
+ ffmpeg(inputGifPath)
163
+ .inputFormat('gif')
164
+ .output(outputVideoPath)
165
+ .on('start', (commandLine) => {
166
+ console.log('FFmpeg 命令:', commandLine);
167
+ })
168
+ .on('end', () => {
169
+ console.log('GIF成功转换为视频:', outputVideoPath);
170
+ resolve();
171
+ })
172
+ .on('error', (err) => {
173
+ console.error('GIF转换为视频时出错:', err);
174
+ reject(err);
175
+ })
176
+ .run();
177
+ });
178
+ }
179
+
180
+ // 添加将输出视频转换为 GIF 的函数
181
+ async function videoToGif(inputVideoPath, outputGifPath) {
182
+ console.log('开始将视频转换为GIF...');
183
+ return new Promise((resolve, reject) => {
184
+ ffmpeg(inputVideoPath)
185
+ .output(outputGifPath)
186
+ .on('start', (commandLine) => {
187
+ console.log('FFmpeg 命令:', commandLine);
188
+ })
189
+ .on('end', () => {
190
+ console.log('视频成功转换为GIF:', outputGifPath);
191
+ resolve();
192
+ })
193
+ .on('error', (err) => {
194
+ console.error('视频转换为GIF时出错:', err);
195
+ reject(err);
196
+ })
197
+ .run();
198
+ });
199
+ }
200
+
201
+ // 从文件夹加载所有分类器
202
+ async function loadClassifiersFromFolder(folderPath) {
203
+ console.log(`开始从文件夹 ${folderPath} 加载分类器...`);
204
+ const classifiers = [];
205
+
206
+ const files = fs.readdirSync(folderPath);
207
+ for (const file of files) {
208
+ const filePath = path.join(folderPath, file);
209
+ const classifier = new cv.CascadeClassifier(filePath);
210
+ classifiers.push(classifier);
211
+ console.log(`加载分类器: ${file}`);
212
+ }
213
+
214
+ console.log('分类器加载完成');
215
+ return classifiers;
216
+ }
217
+
218
+ // 处理 GIF 的 Express 接口
219
+ app.get('/processGif', async (req, res) => {
220
+ try {
221
+ const { gifUrl } = req.query;
222
+ const tempDir = os.tmpdir();
223
+ const tempGifPath = path.join(tempDir, 'temp.gif');
224
+ const tempVideoPath = path.join(tempDir, 'temp_video.mp4');
225
+ const outputGifPath = path.join(tempDir, 'output.gif');
226
+ const replacementImagePath = path.join(__dirname, 'replacement_face.png');
227
+ const classifiersFolder = path.join(__dirname, 'classifiers');
228
+
229
+ console.log('接收到GIF处理请求:', gifUrl);
230
+
231
+ // 下载 GIF 到本地
232
+ console.log('开始下载GIF...');
233
+ const response = await axios.get(gifUrl, { responseType: 'stream' });
234
+ const writer = fs.createWriteStream(tempGifPath);
235
+ response.data.pipe(writer);
236
+
237
+ // 等待文件写入完成
238
+ await new Promise((resolve, reject) => {
239
+ writer.on('finish', resolve);
240
+ writer.on('error', reject);
241
+ });
242
+
243
+ console.log('GIF下载完成:', tempGifPath);
244
+
245
+ // 从文件夹动态加载分类器
246
+ const classifiers = await loadClassifiersFromFolder(classifiersFolder);
247
+
248
+ // 将GIF转换为视频
249
+ await gifToVideo(tempGifPath, tempVideoPath);
250
+
251
+ // 读取视频文件
252
+ console.log('开始读取视频文件...');
253
+ const videoCapture = new cv.VideoCapture(tempVideoPath);
254
+ const frameWidth = videoCapture.get(cv.CAP_PROP_FRAME_WIDTH);
255
+ const frameHeight = videoCapture.get(cv.CAP_PROP_FRAME_HEIGHT);
256
+ const originalFps = videoCapture.get(cv.CAP_PROP_FPS);
257
+ const videoWriter = new cv.VideoWriter(path.join(tempDir, 'out.mp4'), cv.VideoWriter.fourcc('avc1'), originalFps, new cv.Size(frameWidth, frameHeight));
258
+ console.log('视频文件读取完成');
259
+
260
+
261
+ // 处理每一帧
262
+ console.log('开始处理每一帧...');
263
+ while (true) {
264
+ const frame = videoCapture.read();
265
+ if (frame.empty) break;
266
+
267
+ const grayFrame = frame.bgrToGray();
268
+ const faces = [];
269
+ classifiers.forEach(classifier => {
270
+ const allfaces = classifier.detectMultiScale(grayFrame).objects;
271
+ faces.push(...allfaces);
272
+ });
273
+
274
+ const replacementFace = await Jimp.read(replacementImagePath);
275
+
276
+ if (Array.isArray(faces) && faces.length > 0) {
277
+ console.log(`检测到 ${faces.length} 张脸`);
278
+ for (const rect of faces) {
279
+ const resizedSubstituteImage = await replacementFace.clone().resize(rect.width, rect.height);
280
+ const substituteRegion = frame.getRegion(rect);
281
+ for (let y = 0; y < rect.height; y++) {
282
+ for (let x = 0; x < rect.width; x++) {
283
+ const { r, g, b, a } = Jimp.intToRGBA(resizedSubstituteImage.getPixelColor(x, y));
284
+ if (a > 0) {
285
+ const color = new cv.Vec3(b, g, r);
286
+ substituteRegion.set(y, x, color);
287
+ }
288
+ }
289
+ }
290
+ const buffer = await resizedSubstituteImage.getBufferAsync(Jimp.MIME_PNG);
291
+ const substituteMat = cv.imdecode(Buffer.from(buffer), cv.IMREAD_UNCHANGED);
292
+ substituteMat.copyTo(frame.getRegion(rect));
293
+ }
294
+ } else {
295
+ console.error('未检测到人脸或faces数组为空');
296
+ }
297
+
298
+ videoWriter.write(frame);
299
+ }
300
+
301
+ videoCapture.release();
302
+ videoWriter.release();
303
+ cv.destroyAllWindows();
304
+ console.log('帧处理完成');
305
+
306
+ await videoToGif(path.join(tempDir, 'out.mp4'), outputGifPath);
307
+
308
+ res.set('Content-Type', 'image/gif');
309
+ res.sendFile(outputGifPath);
310
+ } catch (error) {
311
+ console.error('处理GIF时出错:', error);
312
+ res.status(500).send('内部服务器错误');
313
+ }
314
+ });
315
+
316
+
317
+ app.listen(PORT, () => {
318
+ console.log(`服务器运行在端口 ${PORT}`);
319
  });