Skip to content

Commit c8fa9f3

Browse files
committed
Use android detection result to speed up.
To close #42
1 parent ed1349f commit c8fa9f3

1 file changed

Lines changed: 171 additions & 1 deletion

File tree

src/detector/index.js

Lines changed: 171 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,6 +557,169 @@ var onframe = function(cameraId, motion_detected, file_path, person_count, start
557557
stop_current_tracking(cameraId)
558558
}
559559
}
560+
561+
function handle_android_detection_result(cameraId,whole_file,person_count,start_ts,
562+
tracking_info,current_tracker_id,face_detected,cropped_num,cropped_images){
563+
const ts = new Date().getTime()
564+
565+
tracking_info && console.log(tracking_info);
566+
ON_DEBUG && console.log('detect callback')
567+
var current_person_count = getCurrentPersonCount(cameraId)
568+
console.log('['+cameraId+'] tid: '+current_tracker_id+' person num: '+person_count+' face num: '+face_detected+' cost: '+(new Date() - start_ts));
569+
setCurrentPersonCount(cameraId, person_count)
570+
setCurrentFaceCount(cameraId, face_detected)
571+
if(person_count >= 1){
572+
extend_tracker_id_life_time(cameraId)
573+
} else if (face_detected===0){
574+
stop_current_tracking(cameraId)
575+
timeline.update(current_tracker_id,'track_stopped',0,null)
576+
}
577+
/*else {
578+
// Keep Tracker ID same if multiple person detected
579+
console.log('TODO: Multiple Person Logic')
580+
}*/
581+
582+
// Person number changed, need handle it
583+
/*timeline.get_faces_detected(current_tracker_id,function(err,number){
584+
console.log('Current face number----', number)
585+
if(number !== 0 && number !== face_detected){
586+
console.log('number of faces changes.....')
587+
timeline.update(current_tracker_id,'track_stopped',0,null)
588+
stop_current_tracking(cameraId)
589+
start_new_tracker_id(cameraId)
590+
current_tracker_id = getCurrentTrackerId(cameraId)
591+
setCurrentPersonCount(cameraId, face_detected)
592+
}
593+
*/
594+
595+
var faces_to_be_recognited = getFaceRecognitionTaskList(cameraId,
596+
cropped_images,tracking_info,current_tracker_id)
597+
if (faces_to_be_recognited.length >0) {
598+
// 根据数学的Sampling 原则,我们计算一张图片的Embedding时,只需要确保其他的图片不要计算,而等着一张图片的都计算完
599+
if(SAMPLING_TO_SAVE_ENERGY_MODE){
600+
if(getEmbeddingInProcessingStatus(cameraId)){
601+
console.log('Sampling mode, skip this frame since previous calcuation is in progress, need delete images of faces')
602+
603+
gifQueue.add({
604+
person_count:person_count,
605+
cameraId:cameraId,
606+
current_tracker_id:current_tracker_id,
607+
whole_file:whole_file,
608+
name_sorting:false});
609+
return
610+
}
611+
}
612+
613+
var embedding_timeout = setTimeout(function(){
614+
console.log('timeout of tack embedding_clustering, manually recover it')
615+
setEmbeddingInProcessingStatus(cameraId,false)
616+
},TASK_IN_DETECTOR_EXPIRE_IN_SECONDS*1000)
617+
618+
setEmbeddingInProcessingStatus(cameraId,true)
619+
deepeye.embedding_clustering(faces_to_be_recognited, current_tracker_id, function(err,results){
620+
setEmbeddingInProcessingStatus(cameraId,false)
621+
clearTimeout(embedding_timeout)
622+
timeline.update(current_tracker_id,'in_tracking',person_count,results)
623+
624+
if(GIF_UPLOADING){
625+
//save gif info
626+
var jpg_motion_path = face_motions.save_face_motion_image_path(current_tracker_id, whole_file);
627+
timeline.push_gif_info(current_tracker_id, jpg_motion_path, results, ts, function(err) {
628+
if(err){
629+
console.log(err)
630+
}
631+
})
632+
// after it,the whole_file will be deleted, so need call it after face_motions.save_face_motion_image_path
633+
gifQueue.add({
634+
person_count:person_count,
635+
cameraId:cameraId,
636+
current_tracker_id:current_tracker_id,
637+
whole_file:whole_file,
638+
name_sorting:false});
639+
} else {
640+
deepeye.delete_image(whole_file)
641+
}
642+
})
643+
} else {
644+
if(GIF_UPLOADING){
645+
gifQueue.add({
646+
person_count:person_count,
647+
cameraId:cameraId,
648+
current_tracker_id:current_tracker_id,
649+
whole_file:whole_file,
650+
name_sorting:false});
651+
} else {
652+
deepeye.delete_image(whole_file)
653+
}
654+
}
655+
}
656+
657+
// Has motion mean in defined duration, motion detected.
658+
// Can define it on WEB GUI
659+
function onframe_for_android(json){
660+
const cameraId = json.deviceName;
661+
const motion_detected = json.motion;
662+
const start_ts = new Date();
663+
const person_count = json.msg.length || 0;
664+
const file_path = json.msg.wholeImagePath
665+
666+
console.log('onframe for android '+ cameraId +' motion detected frame has motion: '+ motion_detected)
667+
const previous_diff = new Date().getTime() - getOldTimeStamp(cameraId)
668+
console.log('previous_diff',previous_diff)
669+
setOldTimeStamp(cameraId, new Date().getTime())
670+
671+
let current_tracker_id = false;
672+
673+
if(motion_detected === true){
674+
if(is_in_tracking(cameraId)){
675+
current_tracker_id = getCurrentTrackerId(cameraId)
676+
// Better to extend it when person_count > 0 only,comment out here.
677+
//extend_tracker_id_life_time(cameraId)
678+
} else {
679+
start_new_tracker_id(cameraId)
680+
}
681+
682+
current_tracker_id = getCurrentTrackerId(cameraId)
683+
let croppedFaces = getCroppedFaces(json.msg,current_tracker_id,person_count,cameraId)
684+
685+
console.log('current tracker id',current_tracker_id,croppedFaces);
686+
timeline.get_tracking_info(current_tracker_id,function(error, tracking_info){
687+
//console.log(tracking_info)
688+
//return do_face_detection(cameraId,file_path,person_count,
689+
// start_ts,tracking_info,current_tracker_id)
690+
return handle_android_detection_result(cameraId,file_path,person_count,start_ts,
691+
tracking_info,current_tracker_id,croppedFaces.length,croppedFaces.length,croppedFaces)
692+
})
693+
} else if(is_in_tracking(cameraId)){
694+
// 由于现在没有使用Motion Detection,这里都不会进入
695+
current_tracker_id = getCurrentTrackerId(cameraId)
696+
face_motions.clean_up_face_motion_folder(cameraId,current_tracker_id)
697+
stop_current_tracking(cameraId)
698+
}
699+
}
700+
701+
function getCroppedFaces(detectedArray, trackerid, totalPeople, deviceId){
702+
let result = [];
703+
detectedArray.forEach((item)=>{
704+
console.log(item);
705+
if(item.faceNum !=0){
706+
const croppedFace = {
707+
trackerid:trackerid,
708+
style: item.faceStyle,
709+
blury: item.faceBlurry,
710+
width: item.faceWidth,
711+
totalPeople: totalPeople,
712+
path: item.faceImagePath,
713+
ts: new Date().valueOf(),
714+
cameraId: deviceId,
715+
height: item.faceHeight
716+
}
717+
result.push(croppedFace);
718+
}
719+
})
720+
return result;
721+
}
722+
560723
motion.init(onframe)
561724
if(UPLOAD_IMAGE_SERVICE_ENABLED){
562725
upload_listener.init(onframe)
@@ -586,7 +749,14 @@ router.get('/post', (request, response) => {
586749
});
587750

588751
app.post('/post2',function(request, response) {
589-
console.log(request.body);
752+
let json = request.body;
753+
setTimeout(()=>{
754+
try{
755+
onframe_for_android(json);
756+
} catch(e){
757+
console.log(e);
758+
}
759+
},0)
590760
response.json({message: 'OK'});
591761
})
592762
app.listen(port,'0.0.0.0' ,() => console.log('Listening on port ',port));

0 commit comments

Comments
 (0)