File size: 1,496 Bytes
00115f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
// AI Detector Example - JavaScript/Node.js
// Install: npm install @xenova/transformers onnxruntime-node

const { AutoTokenizer } = require('@xenova/transformers');
const ort = require('onnxruntime-node');

async function detectAI(text) {
    // Tokenize
    const tokenizer = await AutoTokenizer.from_pretrained('darwinkernelpanic/ai-detector-pgx');
    const encoded = await tokenizer(text, { 
        padding: true, 
        truncation: true, 
        max_length: 512,
        return_tensors: 'pt'
    });
    
    // Load ONNX model
    const session = await ort.InferenceSession.create('./model.onnx');
    
    // Prepare inputs
    const inputIds = new ort.Tensor('int64', encoded.input_ids.data, encoded.input_ids.dims);
    const attentionMask = new ort.Tensor('int64', encoded.attention_mask.data, encoded.attention_mask.dims);
    
    // Run inference
    const results = await session.run({ 
        input_ids: inputIds, 
        attention_mask: attentionMask 
    });
    
    // Softmax
    const logits = results.logits.data;
    const exp0 = Math.exp(logits[0]);
    const exp1 = Math.exp(logits[1]);
    const aiProb = exp1 / (exp0 + exp1);
    
    return {
        ai_probability: aiProb,
        is_ai: aiProb > 0.5,
        confidence: Math.abs(aiProb - 0.5) * 2
    };
}

// Run example
detectAI("The mitochondria is the powerhouse of the cell...")
    .then(r => console.log('AI Probability:', (r.ai_probability * 100).toFixed(1) + '%'));

module.exports = { detectAI };