@article {10.3844/ajassp.2017.886.898, article_type = {journal}, title = {Visual Tracking using Invariant Feature Descriptor}, author = {Ong, Lee-Yeng and Lau, Siong-Hoe and Koo, Voon-Chet}, volume = {14}, year = {2017}, month = {Sep}, pages = {886-898}, doi = {10.3844/ajassp.2017.886.898}, url = {https://thescipub.com/abstract/ajassp.2017.886.898}, abstract = {The process of identifying the state of an object in a video sequence is referred as visual tracking. It is mainly achieved by using the appearance information from a reference image to recognize the similar characteristics from the other images. Since a digital image is built-up with rows and columns of pixels that are represented with finite set of digital values, the appearance information is measured with a mathematical formulation that is known as image intensity. The problem of distinguishing the intensity of the object of interest from the other objects and the surrounding background is always the main challenge in visual tracking. In this study, a novel invariant feature descriptor model is introduced to address the aforesaid problem. The proposed framework is inspired by the theoretical model of local features that has been widely-used for image recognition. From the large number of diversified scenarios in the surveillance applications, the performance of the proposed model is demonstrated with the benchmarked dataset of single-target tracking. The experiment results shown the advantage of our proposed model for tracking non-rigid object in the changing background as compared to other state-of-the-art visual trackers. In addition, the important aspects of the proposed model are analyzed and highlighted as well in the experimental discussions. }, journal = {American Journal of Applied Sciences}, publisher = {Science Publications} }