225 lines
11 KiB
BibTeX
225 lines
11 KiB
BibTeX
@article{performance ,
|
|
language = {English},
|
|
copyright = {Copyright 2015, The Institution of Engineering and Technology},
|
|
title = {Performance Analysis of the Microsoft Kinect Sensor for 2D Simultaneous Localization and Mapping (SLAM) Techniques},
|
|
journal = {Sensors},
|
|
author = {Kamarudin, K. and Mamduh, S.M. and Shakaff, A.Y.M. and Zakaria, A.},
|
|
volume = { 14},
|
|
number = { 12},
|
|
year = {2014},
|
|
pages = {23365 - 87},
|
|
issn = {1424-8220},
|
|
address = {Switzerland},
|
|
abstract = {This paper presents a performance analysis of two open-source, laser scanner-based Simultaneous Localization and Mapping (SLAM) techniques (i.e., Gmapping and Hector SLAM) using a Microsoft Kinect to replace the laser sensor. Furthermore, the paper proposes a new system integration approach whereby a Linux virtual machine is used to run the open source SLAM algorithms. The experiments were conducted in two different environments; a small room with no features and a typical office corridor with desks and chairs. Using the data logged from real-time experiments, each SLAM technique was simulated and tested with different parameter settings. The results show that the system is able to achieve real time SLAM operation. The system implementation offers a simple and reliable way to compare the performance of Windows-based SLAM algorithm with the algorithms typically implemented in a Robot Operating System (ROS). The results also indicate that certain modifications to the default laser scanner-based parameters are able to improve the map accuracy. However, the limited field of view and range of Kinect's depth sensor often causes the map to be inaccurate, especially in featureless areas, therefore the Kinect sensor is not a direct replacement for a laser scanner, but rather offers a feasible alternative for 2D SLAM tasks.},
|
|
keywords = {control engineering computing;Linux;mobile robots;optical scanners;real-time systems;SLAM (robots);virtual machines;},
|
|
howpublished = {\url{http://dx.doi.org/10.3390/s141223365}},
|
|
}
|
|
|
|
@article {robotas,
|
|
|
|
language={English},
|
|
|
|
title={ROS and Kinect- Ubuntu Installation},
|
|
|
|
author={Dimitrios Prodromou},
|
|
|
|
abstract={Une présentation succinte de l'installation de ROS, d'Eclipse et des drivers de Kinect sur Ubunut 10.10 Maverick. L'article propose aussi un test de l'installation, puis la manière d'installer PCL},
|
|
|
|
howpublished={\url{http://robotas.at/ros-and-kinect-ubuntu-installation/}},
|
|
|
|
}
|
|
|
|
@inproceedings{Kinect-robotic ,
|
|
|
|
language = {English},
|
|
|
|
copyright = {Copyright 2012, The Institution of Engineering and Technology},
|
|
|
|
title = {Study on the Use of Microsoft Kinect for Robotics Applications},
|
|
|
|
journal = {2012 IEEE/ION Position, Location and Navigation Symposium - PLANS 2012},
|
|
|
|
author = {El-laithy, R.A. and Jidong Huang and Yeh, M.},
|
|
|
|
year = {2012},
|
|
|
|
pages = {1280 - 8},
|
|
|
|
address = {Piscataway, NJ, USA},
|
|
|
|
abstract = {The Microsoft X-Box Kinect Sensor is a revolutionary new depth camera that is used in the gaming industry to capture motions of people and players efficiently using the technology of an RGB camera and infrared camera to differentiate depth. In the Microsoft X-Box, Kinect was used to sense 3D perception of human's motions. It can also be used for robotic applications, precisely for indoor navigation through the process of reverse engineering. Certain software packages were made available and are open source from “LibFreenect” for Linux machines, Microsoft's Kinect SDK using the Kinect namespace on Visual Studio 2010 Express (C++ or Visual Basic), and Google's released “Robotic Operating System (ROS)”. In order to claim that this sensor is capable of taking on such a task, we must be able to investigate thoroughly all factors that contribute to this and at the same time we must be able to understand its limitations to be applied and integrated properly with certain types of robots for accomplishing our purpose of achieving successful indoor navigation using proper algorithms. In this paper, the results from testing the Kinect sensor on an autonomous ground vehicle was given.},
|
|
|
|
keywords = {C++ language;cameras;control engineering computing;infrared imaging;Linux;mobile robots;operating systems (computers);reverse engineering;robot vision;software packages;Visual BASIC;},
|
|
|
|
|
|
|
|
howpublished = {\url{http://dx.doi.org/10.1109/PLANS.2012.6236985}},
|
|
|
|
}
|
|
|
|
@inproceedings{Kinect-3D,
|
|
|
|
language = {English},
|
|
|
|
copyright = {Copyright 2012, The Institution of Engineering and Technology},
|
|
|
|
title = {3D with Kinect},
|
|
|
|
journal = {2011 IEEE International Conference on Computer Vision Workshops (ICCV Workshops)},
|
|
|
|
author = {Smisek, J. and Jancosek, M. and Pajdla, T.},
|
|
|
|
year = {2011},
|
|
|
|
pages = {1154 - 60},
|
|
|
|
address = {Piscataway, NJ, USA},
|
|
|
|
abstract = {We analyze Kinect as a 3D measuring device, experimentally investigate depth measurement resolution and error properties and make a quantitative comparison of Kinect accuracy with stereo reconstruction from SLR cameras and a 3D-TOF camera. We propose Kinect geometrical model and its calibration procedure providing an accurate calibration of Kinect 3D measurement and Kinect cameras. We demonstrate the functionality of Kinect calibration by integrating it into an SfM pipeline where 3D measurements from a moving Kinect are transformed into a common coordinate system by computing relative poses from matches in color camera.},
|
|
|
|
keywords = {calibration;cameras;image motion analysis;image reconstruction;image sensors;solid modelling;stereo image processing;},
|
|
|
|
|
|
|
|
howpublished = {\url{http://dx.doi.org/10.1109/ICCVW.2011.6130380}},
|
|
|
|
}
|
|
|
|
@article{Alisher20151475,
|
|
|
|
title = "Control of the Mobile Robots with \{ROS\} in Robotics Courses ",
|
|
|
|
journal = "Procedia Engineering ",
|
|
|
|
volume = "100",
|
|
|
|
number = "0",
|
|
|
|
pages = "1475 - 1484",
|
|
|
|
year = "2015",
|
|
|
|
|
|
|
|
issn = "1877-7058",
|
|
|
|
doi = "http://dx.doi.org/10.1016/j.proeng.2015.01.519",
|
|
|
|
howpublished = "\url{http://www.sciencedirect.com/science/article/pii/S1877705815005469}",
|
|
|
|
author = "Khassanov Alisher and Krupenkin Alexander and Borgul Alexandr",
|
|
|
|
keywords = "\{ROS\}",
|
|
|
|
keywords = "robotics",
|
|
|
|
keywords = "education",
|
|
|
|
keywords = "multiagent system",
|
|
|
|
keywords = "remote control ",
|
|
|
|
abstract = "Abstract The paper describes implementation of mobile robots programming process with Robot Operating System (ROS) in student robotics courses. \{ROS\} provides different tools for data analysis, facilities of multiple robots and their sensors, teleoperation devices interaction thereby targeting engineering education. An example with the multiagent interaction between agent-evader and agent-pursuer were taken as the basic navigational task. The computed behavior of the virtual agents were successfully transferred to the quadcopters, Lego Mindstorms \{NXT\} based and Robotino robots. Diverse experimental tests were conducted using the algorithms on virtual agents and robotic platforms. "
|
|
|
|
}
|
|
|
|
@article{Alisher,
|
|
|
|
title = "Control of the Mobile Robots with \{ROS\} in Robotics Courses ",
|
|
|
|
journal = "Procedia Engineering ",
|
|
|
|
volume = "100",
|
|
|
|
number = "0",
|
|
|
|
pages = "1475 - 1484",
|
|
|
|
year = "2015",
|
|
|
|
|
|
|
|
issn = "1877-7058",
|
|
|
|
doi = "http://dx.doi.org/10.1016/j.proeng.2015.01.519",
|
|
|
|
howpublished = "\url{http://www.sciencedirect.com/science/article/pii/S1877705815005469}",
|
|
|
|
author = "Khassanov Alisher and Krupenkin Alexander and Borgul Alexandr",
|
|
|
|
keywords = "\{ROS\}",
|
|
|
|
keywords = "robotics",
|
|
|
|
keywords = "education",
|
|
|
|
keywords = "multiagent system",
|
|
|
|
keywords = "remote control ",
|
|
|
|
abstract = "Abstract The paper describes implementation of mobile robots programming process with Robot Operating System (ROS) in student robotics courses. \{ROS\} provides different tools for data analysis, facilities of multiple robots and their sensors, teleoperation devices interaction thereby targeting engineering education. An example with the multiagent interaction between agent-evader and agent-pursuer were taken as the basic navigational task. The computed behavior of the virtual agents were successfully transferred to the quadcopters, Lego Mindstorms \{NXT\} based and Robotino robots. Diverse experimental tests were conducted using the algorithms on virtual agents and robotic platforms. "
|
|
|
|
}
|
|
|
|
ROS
|
|
|
|
@Manual{Tutoriels,
|
|
|
|
title = {ROS Tutorials},
|
|
|
|
organization = {ROS},
|
|
|
|
howpublished = {\url{http://wiki.ros.org/ROS/Tutorials}},
|
|
month = {11},
|
|
year = {2014},
|
|
|
|
}
|
|
|
|
IEEE Xplore via GoogleScholar
|
|
|
|
@INPROCEEDINGS{Postures,
|
|
|
|
author={Zheng Xiao and Fu Mengyin and Yang Yi and Lv Ningyi},
|
|
|
|
booktitle={Intelligent Human-Machine Systems and Cybernetics (IHMSC), 2012 4th International Conference on},
|
|
|
|
title={3D Human Postures Recognition Using Kinect},
|
|
|
|
year={2012},
|
|
|
|
month={Aug},
|
|
|
|
volume={1},
|
|
|
|
pages={344-347},
|
|
|
|
abstract={In many application cases, 2D human postures display haven't been able to meet people's requirements which is failure to show human motions comprehensive, image and vivid. However, 3D human Postures display could restore and show human motions well, which is convenient for people to observe and learn human motions. This paper presents a method to recognize 3D human postures by using Microsoft Kinect sensor. Kinect is used as a capturing device. Capturing 3D human features mainly uses depth images obtained from Kinect sensor. Each pixel of depth images contains three-dimensional coordinate information of camera's scenes. Finally, the captured 3D human postures can be displayed by employing a human skeletal joints model and using a LED cube.},
|
|
|
|
keywords={cameras;gait analysis;image motion analysis;image recognition;interactive systems;light emitting diodes;2D human posture display;3D human features;3D human posture display;3D human posture recognition;LED cube;Microsoft Kinect sensor;camera scenes;depth image pixels;human motions;human skeletal joint model;three-dimensional coordinate information;Cameras;Humans;Joints;Light emitting diodes;Robot sensing systems;Solid modeling;Vegetation;3D human postures;Kinect;LED cube;depth images;human motions;human skeletal joints model},
|
|
|
|
doi={10.1109/IHMSC.2012.92},}
|
|
|
|
@Misc{ROS,
|
|
title = {Listes des bibliothèques ROS liées à Kinect},
|
|
organization = {ROS},
|
|
howpublished= {\url{http://www.ros.org/browse/search.php?distro=indigo\&q=kinect}}
|
|
}
|
|
|
|
@Misc{OpenKinect,
|
|
|
|
title = {OpenKinect Wiki},
|
|
organization = {OpenKinect},
|
|
|
|
howpublished = {\url{http://openkinect.org/wiki/Main_Page}}
|
|
|
|
}
|
|
|
|
@Misc{Kinectwindows,
|
|
|
|
howpublished={\url{http://www.microsoft.com/en-us/kinectforwindows/}},
|
|
|
|
title = {Kinect for Windows},
|
|
|
|
organisation = {Microsoft}
|
|
|
|
}
|