| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| |
|
| | |
| |
|
| | |
| | #include "LandmarkCoreIncludes.h" |
| |
|
| | #include <Face_utils.h> |
| | #include <FaceAnalyser.h> |
| | #include <GazeEstimation.h> |
| | #include <RecorderOpenFace.h> |
| | #include <RecorderOpenFaceParameters.h> |
| | #include <SequenceCapture.h> |
| | #include <Visualizer.h> |
| | #include <VisualizationUtils.h> |
| |
|
| | #ifndef CONFIG_DIR |
| | #define CONFIG_DIR "~" |
| | #endif |
| |
|
| | #define INFO_STREAM( stream ) \ |
| | std::cout << stream << std::endl |
| |
|
| | #define WARN_STREAM( stream ) \ |
| | std::cout << "Warning: " << stream << std::endl |
| |
|
| | #define ERROR_STREAM( stream ) \ |
| | std::cout << "Error: " << stream << std::endl |
| |
|
| | static void printErrorAndAbort(const std::string & error) |
| | { |
| | std::cout << error << std::endl; |
| | } |
| |
|
| | #define FATAL_STREAM( stream ) \ |
| | printErrorAndAbort( std::string( "Fatal error: " ) + stream ) |
| |
|
| | std::vector<std::string> get_arguments(int argc, char **argv) |
| | { |
| |
|
| | std::vector<std::string> arguments; |
| |
|
| | |
| | for (int i = 0; i < argc; ++i) |
| | { |
| | arguments.push_back(std::string(argv[i])); |
| | } |
| | return arguments; |
| | } |
| |
|
| | int main(int argc, char **argv) |
| | { |
| |
|
| | std::vector<std::string> arguments = get_arguments(argc, argv); |
| |
|
| | |
| | if (arguments.size() == 1) |
| | { |
| | std::cout << "For command line arguments see:" << std::endl; |
| | std::cout << " https://github.com/TadasBaltrusaitis/OpenFace/wiki/Command-line-arguments"; |
| | return 0; |
| | } |
| |
|
| | |
| | |
| | LandmarkDetector::FaceModelParameters det_parameters(arguments); |
| | |
| | LandmarkDetector::CLNF face_model(det_parameters.model_location); |
| |
|
| | if (!face_model.loaded_successfully) |
| | { |
| | std::cout << "ERROR: Could not load the landmark detector" << std::endl; |
| | return 1; |
| | } |
| |
|
| | |
| | FaceAnalysis::FaceAnalyserParameters face_analysis_params(arguments); |
| | FaceAnalysis::FaceAnalyser face_analyser(face_analysis_params); |
| |
|
| | if (!face_model.eye_model) |
| | { |
| | std::cout << "WARNING: no eye model found" << std::endl; |
| | } |
| |
|
| | if (face_analyser.GetAUClassNames().size() == 0 && face_analyser.GetAUClassNames().size() == 0) |
| | { |
| | std::cout << "WARNING: no Action Unit models found" << std::endl; |
| | } |
| |
|
| | Utilities::SequenceCapture sequence_reader; |
| |
|
| | |
| | Utilities::Visualizer visualizer(arguments); |
| |
|
| | |
| | Utilities::FpsTracker fps_tracker; |
| | fps_tracker.AddFrame(); |
| |
|
| | while (true) |
| | { |
| |
|
| | |
| | if (!sequence_reader.Open(arguments)) |
| | break; |
| |
|
| | INFO_STREAM("Device or file opened"); |
| |
|
| | if (sequence_reader.IsWebcam()) |
| | { |
| | INFO_STREAM("WARNING: using a webcam in feature extraction, Action Unit predictions will not be as accurate in real-time webcam mode"); |
| | INFO_STREAM("WARNING: using a webcam in feature extraction, forcing visualization of tracking to allow quitting the application (press q)"); |
| | visualizer.vis_track = true; |
| | } |
| |
|
| | cv::Mat captured_image; |
| |
|
| | Utilities::RecorderOpenFaceParameters recording_params(arguments, true, sequence_reader.IsWebcam(), |
| | sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, sequence_reader.fps); |
| | if (!face_model.eye_model) |
| | { |
| | recording_params.setOutputGaze(false); |
| | } |
| | Utilities::RecorderOpenFace open_face_rec(sequence_reader.name, recording_params, arguments); |
| |
|
| | if (recording_params.outputGaze() && !face_model.eye_model) |
| | std::cout << "WARNING: no eye model defined, but outputting gaze" << std::endl; |
| |
|
| | captured_image = sequence_reader.GetNextFrame(); |
| |
|
| | |
| | double reported_completion = 0; |
| |
|
| | INFO_STREAM("Starting tracking"); |
| | while (!captured_image.empty()) |
| | { |
| | |
| | cv::Mat_<uchar> grayscale_image = sequence_reader.GetGrayFrame(); |
| |
|
| |
|
| | |
| | bool detection_success = LandmarkDetector::DetectLandmarksInVideo(captured_image, face_model, det_parameters, grayscale_image); |
| | |
| | |
| | cv::Point3f gazeDirection0(0, 0, 0); cv::Point3f gazeDirection1(0, 0, 0); cv::Vec2d gazeAngle(0, 0); |
| |
|
| | if (detection_success && face_model.eye_model) |
| | { |
| | GazeAnalysis::EstimateGaze(face_model, gazeDirection0, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, true); |
| | GazeAnalysis::EstimateGaze(face_model, gazeDirection1, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy, false); |
| | gazeAngle = GazeAnalysis::GetGazeAngle(gazeDirection0, gazeDirection1); |
| | } |
| | |
| | |
| | cv::Mat sim_warped_img; |
| | cv::Mat_<double> hog_descriptor; int num_hog_rows = 0, num_hog_cols = 0; |
| |
|
| | |
| | if (recording_params.outputAlignedFaces() || recording_params.outputHOG() || recording_params.outputAUs() || visualizer.vis_align || visualizer.vis_hog || visualizer.vis_aus) |
| | { |
| | face_analyser.AddNextFrame(captured_image, face_model.detected_landmarks, face_model.detection_success, sequence_reader.time_stamp, sequence_reader.IsWebcam()); |
| | face_analyser.GetLatestAlignedFace(sim_warped_img); |
| | face_analyser.GetLatestHOG(hog_descriptor, num_hog_rows, num_hog_cols); |
| | } |
| | |
| | |
| | cv::Vec6d pose_estimate = LandmarkDetector::GetPose(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); |
| |
|
| | |
| | fps_tracker.AddFrame(); |
| |
|
| | |
| | visualizer.SetImage(captured_image, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy); |
| | visualizer.SetObservationFaceAlign(sim_warped_img); |
| | visualizer.SetObservationHOG(hog_descriptor, num_hog_rows, num_hog_cols); |
| | visualizer.SetObservationLandmarks(face_model.detected_landmarks, face_model.detection_certainty, face_model.GetVisibilities()); |
| | visualizer.SetObservationPose(pose_estimate, face_model.detection_certainty); |
| | visualizer.SetObservationGaze(gazeDirection0, gazeDirection1, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), face_model.detection_certainty); |
| | visualizer.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass()); |
| | visualizer.SetFps(fps_tracker.GetFPS()); |
| |
|
| | |
| | char character_press = visualizer.ShowObservation(); |
| | |
| | |
| | if (character_press == 'q') |
| | { |
| | break; |
| | } |
| |
|
| | |
| | open_face_rec.SetObservationHOG(detection_success, hog_descriptor, num_hog_rows, num_hog_cols, 31); |
| | open_face_rec.SetObservationVisualization(visualizer.GetVisImage()); |
| | open_face_rec.SetObservationActionUnits(face_analyser.GetCurrentAUsReg(), face_analyser.GetCurrentAUsClass()); |
| | open_face_rec.SetObservationLandmarks(face_model.detected_landmarks, face_model.GetShape(sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy), |
| | face_model.params_global, face_model.params_local, face_model.detection_certainty, detection_success); |
| | open_face_rec.SetObservationPose(pose_estimate); |
| | open_face_rec.SetObservationGaze(gazeDirection0, gazeDirection1, gazeAngle, LandmarkDetector::CalculateAllEyeLandmarks(face_model), LandmarkDetector::Calculate3DEyeLandmarks(face_model, sequence_reader.fx, sequence_reader.fy, sequence_reader.cx, sequence_reader.cy)); |
| | open_face_rec.SetObservationTimestamp(sequence_reader.time_stamp); |
| | open_face_rec.SetObservationFaceID(0); |
| | open_face_rec.SetObservationFrameNumber(sequence_reader.GetFrameNumber()); |
| | open_face_rec.SetObservationFaceAlign(sim_warped_img); |
| | open_face_rec.WriteObservation(); |
| | open_face_rec.WriteObservationTracked(); |
| | |
| | |
| | if (sequence_reader.GetProgress() >= reported_completion / 10.0) |
| | { |
| | std::cout << reported_completion * 10 << "% "; |
| | if (reported_completion == 10) |
| | { |
| | std::cout << std::endl; |
| | } |
| | reported_completion = reported_completion + 1; |
| | } |
| |
|
| | |
| | captured_image = sequence_reader.GetNextFrame(); |
| |
|
| | } |
| |
|
| | INFO_STREAM("Closing output recorder"); |
| | open_face_rec.Close(); |
| | INFO_STREAM("Closing input reader"); |
| | sequence_reader.Close(); |
| | INFO_STREAM("Closed successfully"); |
| |
|
| | if (recording_params.outputAUs()) |
| | { |
| | INFO_STREAM("Postprocessing the Action Unit predictions"); |
| | face_analyser.PostprocessOutputFile(open_face_rec.GetCSVFile()); |
| | } |
| |
|
| | |
| | face_analyser.Reset(); |
| | face_model.Reset(); |
| |
|
| | } |
| |
|
| | return 0; |
| | } |
| |
|