by Yu, Xueming, Wang, Shanhe, Busch, Jay, Phan, Thai, McSheery, Tracy, Bolas, Mark and Debevec, Paul
Abstract:
High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.
Reference:
Virtual Headcam: Pan/tilt Mirror-based Facial Performance Tracking (Yu, Xueming, Wang, Shanhe, Busch, Jay, Phan, Thai, McSheery, Tracy, Bolas, Mark and Debevec, Paul), In Proceedings of ACM SIGGRAPH 2015 Posters, ACM, 2015.
Bibtex Entry:
@inproceedings{yu_virtual_2015,
address = {Los Angeles, CA},
title = {Virtual {Headcam}: {Pan}/tilt {Mirror}-based {Facial} {Performance} {Tracking}},
shorttitle = {Virtual headcam},
url = {http://ict.usc.edu/pubs/Virtual%20Headcam%20-%20Pantilt%20Mirror-based%20Facial%20Performance%20Tracking.pdf},
abstract = {High-end facial performance capture solutions typically use head-mounted camera systems which provide one or more close-up video streams of each actor's performance. These provide clear views of each actor's performance, but can be bulky, uncomfortable, get in the way of sight lines, and prevent actors from getting close to each other. To address this, we propose a virtual head-mounted camera system: an array of cameras placed around around the performance capture volume which automatically track zoomed-in, sharply focussed, high-resolution views of the each actor's face from a multitude of directions. The resulting imagery can be used in conjunction with body motion capture data to derive nuanced facial performances without head-mounted cameras.},
booktitle = {Proceedings of {ACM} {SIGGRAPH} 2015 {Posters}},
publisher = {ACM},
author = {Yu, Xueming and Wang, Shanhe and Busch, Jay and Phan, Thai and McSheery, Tracy and Bolas, Mark and Debevec, Paul},
month = aug,
year = {2015},
keywords = {Graphics, MxR, UARC},
pages = {94}
}