champ-chatbot / tests /stress_tests /chat_session_with_file.js
qyle's picture
many fixes
720b664 verified
// This file aims to test a scenario where 80 users send 3 messages to ONE model (pessimistic).
import http from 'k6/http';
import { sleep, check } from 'k6';
import { SharedArray } from 'k6/data';
const file_name = __ENV.FILE;
const message_examples = new SharedArray('chat messages', function () {
const f = open('./message_examples.txt');
// .split('\n') creates an array where each line is one element
// .filter removes empty lines at the end of the file
return f.split('\n').map(line => line.trim()).filter(line => line.length > 0);
});
const testFile = open(`./${file_name}`, 'b');
export const options = {
scenarios: {
my_spike_test: {
executor: 'per-vu-iterations',
vus: 80, // 80 total users
iterations: 1, // Each user runs the function exactly once.
},
},
};
export default function () {
// Each VU must wait a random time period to prevent them from
// sending their messages at the exact same time.
sleep(Math.random() * 10);
const url = __ENV.URL
// Each VU first sends 1 file
const data = {
file: http.file(testFile, file_name, 'application/pdf'), // The file
session_id: `VU${__VU}`,
};
const res = http.put(url + "/file", data);
check(res, { 'status is 200': (r) => r.status === 200 });
// Each VU sends 3 messages
for (let i = 0; i < 3; i++) {
const messageIndex = ((__VU - 1) * 3 + i) % message_examples.length;
const userMessage = message_examples[messageIndex];
const payload = {
user_id: `VU${__VU}`,
session_id: `VU${__VU}`,
conversation_id: `VU${__VU}`,
human_message: userMessage,
model_type: __ENV.MODEL_TYPE,
consent: true,
age_group: "0-18",
gender: "M",
roles: ["other"],
participant_id: `VU${__VU}`,
lang: "en"
};
const params = { headers: { 'Content-Type': 'application/json' } };
const res = http.post(url + "/chat", JSON.stringify(payload), params);
check(res, {'status is 200': (r) => r.status === 200})
let reply = '';
if (res.status === 200) {
// k6 does not support streaming response bodies. It waits for the entire response until
// the stream is 'done'. Therefore, we do not need to read the chunks one by one.
let data = "";
try {
data = res.json();
} catch (error) {
// However, if the response contains streamed data, it is not in JSON format.
// We would have to read the body to access that data.
data = res.body;
}
reply = data.reply || 'no_reply';
} else {
console.error(res.status);
console.error(res.body);
}
// Simulating reading time, thinking time and writing time.
// Simulate reading speed: ~200ms per word in the reply + 2s thinking time
const readingTime = (reply.split(' ').length * 0.2) + 2;
// Cap it so it doesn't wait forever, but add some randomness (jitter)
const finalSleep = Math.min(readingTime, 15) + (Math.random() * 3);
sleep(finalSleep);
}
}
// TEST RESULT ANALYSIS
// CHAMP (GROK) - T4 small - 80VUs - with a small file (15 Ko)
// █ TOTAL RESULTS
// checks_total.......: 320 3.761939/s
// checks_succeeded...: 100.00% 320 out of 320
// checks_failed......: 0.00% 0 out of 320
// ✓ status is 200
// HTTP
// http_req_duration..............: avg=5.45s min=77.65ms med=5.58s max=13.39s p(90)=10.38s p(95)=11.18s
// { expected_response:true }...: avg=5.45s min=77.65ms med=5.58s max=13.39s p(90)=10.38s p(95)=11.18s
// http_req_failed................: 0.00% 0 out of 320
// http_reqs......................: 320 3.761939/s
// EXECUTION
// iteration_duration.............: avg=1m11s min=42.01s med=1m11s max=1m25s p(90)=1m21s p(95)=1m23s
// iterations.....................: 80 0.940485/s
// vus............................: 1 min=1 max=80
// vus_max........................: 80 min=80 max=80
// NETWORK
// data_received..................: 571 kB 6.7 kB/s
// data_sent......................: 1.4 MB 17 kB/s
// running (01m25.1s), 00/80 VUs, 80 complete and 0 interrupted iterations
// my_spike_test ✓ [======================================] 80 VUs 01m25.1s/10m0s 80/80 iters, 1 per VU