| | |
| | |
| |
|
| | const { AutoTokenizer } = require('@xenova/transformers'); |
| | const ort = require('onnxruntime-node'); |
| |
|
| | async function detectAI(text) { |
| | |
| | const tokenizer = await AutoTokenizer.from_pretrained('darwinkernelpanic/ai-detector-pgx'); |
| | const encoded = await tokenizer(text, { |
| | padding: true, |
| | truncation: true, |
| | max_length: 512, |
| | return_tensors: 'pt' |
| | }); |
| | |
| | |
| | const session = await ort.InferenceSession.create('./model.onnx'); |
| | |
| | |
| | const inputIds = new ort.Tensor('int64', encoded.input_ids.data, encoded.input_ids.dims); |
| | const attentionMask = new ort.Tensor('int64', encoded.attention_mask.data, encoded.attention_mask.dims); |
| | |
| | |
| | const results = await session.run({ |
| | input_ids: inputIds, |
| | attention_mask: attentionMask |
| | }); |
| | |
| | |
| | const logits = results.logits.data; |
| | const exp0 = Math.exp(logits[0]); |
| | const exp1 = Math.exp(logits[1]); |
| | const aiProb = exp1 / (exp0 + exp1); |
| | |
| | return { |
| | ai_probability: aiProb, |
| | is_ai: aiProb > 0.5, |
| | confidence: Math.abs(aiProb - 0.5) * 2 |
| | }; |
| | } |
| |
|
| | |
| | detectAI("The mitochondria is the powerhouse of the cell...") |
| | .then(r => console.log('AI Probability:', (r.ai_probability * 100).toFixed(1) + '%')); |
| |
|
| | module.exports = { detectAI }; |
| |
|