Spaces:
Running
Running
File size: 6,424 Bytes
cc65f2f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
var foto1 = [];
var foto2 = [];
async function imagem1() {
const verificado = document.getElementById("verificado")
const img1 = document.getElementById('img1');
verificado.innerText = "espere..."
let preview = document.getElementById('imagem1');
var inputs = document.querySelectorAll('input[type=file]');
let file = inputs[0].files[0];
const reader = new FileReader();
if(file) reader.readAsDataURL(file);
else preview.src = '';
reader.onloadend = function() {
preview.src = reader.result;
}
try {
const img = await faceapi.bufferToImage(file)
result = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor().withFaceExpressions().withAgeAndGender()
const regionsToExtract =
[
new faceapi.Rect(result.detection._box._x,result.detection._box._y,
result.detection._box._width,result.detection._box._height)
]
const canvases = await faceapi.extractFaces(img, regionsToExtract)
displayExtractedFaces(canvases)
verificado.innerText = "Escolha a 2ª "
img1.innerText = 'Idade '+result.age.toFixed(2)+'\n'+' Genero '+result.gender+' - '+result.genderProbability.toFixed(2) * 100 +'%\n'+
'Neutro: '+result.expressions.Neutro.toFixed(2)* 100 +'%\n'+
'Feliz: '+result.expressions.Feliz.toFixed(2)* 100 +'%\n'+
'Triste: '+result.expressions.Triste.toFixed(2)* 100 +'%\n'+
'Medo: '+ result.expressions.Medo.toFixed(2)* 100 +'%\n'+
'Nojo: '+result.expressions.Nojo.toFixed(2)* 100 +'%\n'+
'Surpreso: '+ result.expressions.Surpreso.toFixed(2)* 100 +'%\n'+
'Raiva: '+result.expressions.Raiva.toFixed(2)* 100 +'%';
foto1.push(result.descriptor)
} catch (err) {
alert("Não foi possivel detectar o rosto")
return;
}
}
function displayExtractedFaces(faceImages) {
var canvas = document.createElement("canvas");
canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, $('#imagem1').get(0))
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
document.getElementsByTagName("canvas")[2].setAttribute("id", 'cv3');
var c3 = document.getElementById("cv3");
var data3 = c3.toDataURL();
let preview2 = document.getElementById('imagem1');
preview2.src =data3;
c3.setAttribute("hidden", "hidden");
}
function displayExtractedFaces2(faceImages) {
var canvas = document.createElement("canvas");
canvas = $('#overlay').get(0)
faceapi.matchDimensions(canvas, $('#imagem2').get(0))
$('#facesContainer').empty()
faceImages.forEach(canvas => $('#facesContainer').append(canvas))
document.getElementsByTagName("canvas")[2].setAttribute("id", 'cv3');
var c3 = document.getElementById("cv3");
var data3 = c3.toDataURL();
let preview2 = document.getElementById('imagem2');
preview2.src =data3;
c3.setAttribute("hidden", "hidden");
}
async function imagem2() {
const verificado = document.getElementById("verificado")
const img2 = document.getElementById('img22');
verificado.innerText = "espere..."
let preview = document.getElementById('imagem2');
var inputs = document.querySelectorAll('input[type=file]');
let file = inputs[1].files[0];
const reader = new FileReader();
if(file) reader.readAsDataURL(file);
else preview.src = '';
reader.onloadend = function() {
preview.src = reader.result;
}
try {
const img = await faceapi.bufferToImage(file)
result = await faceapi.detectSingleFace(img).withFaceLandmarks().withFaceDescriptor().withFaceExpressions().withAgeAndGender()
const regionsToExtract =
[
new faceapi.Rect(result.detection._box._x,result.detection._box._y,
result.detection._box._width,result.detection._box._height)
]
const canvases = await faceapi.extractFaces(img, regionsToExtract)
displayExtractedFaces2(canvases)
img2.innerText = 'Idade '+result.age.toFixed(2)+'\n'+' Genero '+result.gender+' - '+result.genderProbability.toFixed(2) * 100 +'%\n'+
'Neutro: '+result.expressions.Neutro.toFixed(2)* 100 +'%\n'+
'Feliz: '+result.expressions.Feliz.toFixed(2)* 100 +'%\n'+
'Triste: '+result.expressions.Triste.toFixed(2)* 100 +'%\n'+
'Medo: '+ result.expressions.Medo.toFixed(2)* 100 +'%\n'+
'Nojo: '+result.expressions.Nojo.toFixed(2)* 100 +'%\n'+
'Surpreso: '+ result.expressions.Surpreso.toFixed(2)* 100 +'%\n'+
'Raiva: '+result.expressions.Raiva.toFixed(2)* 100 +'%';
foto2.push(result.descriptor)
verificado.innerText = "verificando...";
verificar();
} catch (err) {
alert("Não foi possivel detectar o rosto")
return;
}
}
async function verificar() {
const verificado = document.getElementById("verificado")
verificado.setAttribute("disabled", "true");
verificado.innerText = "verificando..."
var labeledFaceDescriptors = []
labeledFaceDescriptors.push(new faceapi.LabeledFaceDescriptors(
'person 1', foto1 ));
try {
var inputs = document.querySelectorAll('input[type=file]');
let file = inputs[1].files[0];
const img = await faceapi.bufferToImage(file)
const singleResult = await faceapi.detectSingleFace(img)
.withFaceLandmarks().withFaceDescriptor()
const faceMatcher = new faceapi.FaceMatcher(labeledFaceDescriptors)
const bestMatch = faceMatcher.findBestMatch(singleResult.descriptor)
if(bestMatch._distance<=0.50){
verificado.setAttribute("class", "greenclass")
verificado.innerText = "A mesma pessoa \n distância euclidiana entre os pontos: \n"+
(1 - bestMatch._distance.toFixed(2)) * 100 +'%'
verificado.removeAttribute("disabled")
}else{
verificado.setAttribute("class", "democlass")
verificado.innerText = "Não é a mesma pessoa \n distância euclidiana entre os pontos: \n"+
(1 - bestMatch._distance.toFixed(2)) * 100 +'%'
verificado.removeAttribute("disabled")
}
} catch (err) {
verificado.innerText = "Erro ao tentar detectar"
verificado.removeAttribute("disabled")
return;
}
} |