File size: 3,158 Bytes
f53eea0
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
{
  "basic_usage": {
    "description": "Basic suicide risk detection",
    "code": "\nfrom transformers import BertTokenizer, BertForSequenceClassification\nimport torch\n\n# Load model\nmodel_name = \"Akashpaul123/bert-suicide-detection\"\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertForSequenceClassification.from_pretrained(model_name)\n\n# Predict\ntext = \"I feel hopeless and want to end it all\"\ninputs = tokenizer(text, return_tensors=\"pt\", max_length=512, truncation=True, padding=True)\n\nwith torch.no_grad():\n    outputs = model(**inputs)\n    probs = torch.nn.functional.softmax(outputs.logits, dim=-1)\n\nsuicide_prob = probs[0][1].item()\nprediction = \"suicide\" if suicide_prob > 0.5 else \"non-suicide\"\n\nprint(f\"Prediction: {prediction}\")\nprint(f\"Confidence: {max(probs[0]).item():.4f}\")\n"
  },
  "batch_processing": {
    "description": "Process multiple texts at once",
    "code": "\nimport torch\nfrom transformers import BertTokenizer, BertForSequenceClassification\n\nmodel_name = \"Akashpaul123/bert-suicide-detection\"\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertForSequenceClassification.from_pretrained(model_name)\n\ntexts = [\n    \"Having a great day!\",\n    \"I want to disappear forever\",\n    \"Looking forward to tomorrow\"\n]\n\n# Tokenize all texts\ninputs = tokenizer(texts, return_tensors=\"pt\", max_length=512, truncation=True, padding=True)\n\n# Predict\nwith torch.no_grad():\n    outputs = model(**inputs)\n    probs = torch.nn.functional.softmax(outputs.logits, dim=-1)\n\n# Process results\nfor i, text in enumerate(texts):\n    suicide_prob = probs[i][1].item()\n    prediction = \"suicide\" if suicide_prob > 0.5 else \"non-suicide\"\n    print(f\"Text: {text}\")\n    print(f\"Prediction: {prediction} ({suicide_prob:.4f})\")\n    print()\n"
  },
  "web_api": {
    "description": "Web API integration with Flask",
    "code": "\nfrom flask import Flask, request, jsonify\nfrom transformers import BertTokenizer, BertForSequenceClassification\nimport torch\n\napp = Flask(__name__)\n\n# Load model once at startup\nmodel_name = \"Akashpaul123/bert-suicide-detection\"\ntokenizer = BertTokenizer.from_pretrained(model_name)\nmodel = BertForSequenceClassification.from_pretrained(model_name)\n\n@app.route('/predict', methods=['POST'])\ndef predict_suicide_risk():\n    data = request.get_json()\n    text = data.get('text', '')\n    \n    if not text:\n        return jsonify({\"error\": \"No text provided\"}), 400\n    \n    # Predict\n    inputs = tokenizer(text, return_tensors=\"pt\", max_length=512, truncation=True, padding=True)\n    \n    with torch.no_grad():\n        outputs = model(**inputs)\n        probs = torch.nn.functional.softmax(outputs.logits, dim=-1)\n    \n    suicide_prob = probs[0][1].item()\n    prediction = \"suicide\" if suicide_prob > 0.5 else \"non-suicide\"\n    \n    return jsonify({\n        \"text\": text,\n        \"prediction\": prediction,\n        \"suicide_probability\": suicide_prob,\n        \"confidence\": max(probs[0]).item()\n    })\n\nif __name__ == '__main__':\n    app.run(debug=True)\n"
  }
}