diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index b5a7cc1..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,24 +0,0 @@ -image: osrf/ros:galactic-desktop - -variables: - GIT_CLONE_PATH: $CI_BUILDS_DIR/src/ros2_v4l2_camera - -build: - stage: build - script: - - apt update - - cd ${CI_BUILDS_DIR} - - rosdep update && rosdep install --from-paths src -r -y - # Build - - colcon build --cmake-args -DCMAKE_BUILD_TYPE=Debug - # Test - - colcon test --event-handlers console_cohesion+ --packages-select v4l2_camera - # Move artifact directories so they can be uploaded - - cp -r build src/ros2_v4l2_camera/build - - cp -r install src/ros2_v4l2_camera/install - # Summarize tests and fail on test failures - - colcon test-result - artifacts: - reports: - junit: build/v4l2_camera/test_results/v4l2_camera/*.xunit.xml - diff --git a/gpu_imgproc/CMakeLists.txt b/gpu_imgproc/CMakeLists.txt new file mode 100644 index 0000000..54237f2 --- /dev/null +++ b/gpu_imgproc/CMakeLists.txt @@ -0,0 +1,292 @@ +cmake_minimum_required(VERSION 3.5) +project(gpu_imgproc LANGUAGES CXX) + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/cmake/") + +# Default to C++14 +if(NOT CMAKE_CXX_STANDARD) + set(CMAKE_CXX_STANDARD 14) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Wall -Wextra -Wpedantic -Wunused-function) +endif() + +find_package(catkin REQUIRED COMPONENTS + roscpp + sensor_msgs + cv_bridge +) + +catkin_package( + INCLUDE_DIRS include + LIBRARIES gpu_imgproc sensor_msgs + CATKIN_DEPENDS roscpp sensor_msgs cv_bridge image_geometry +) + +include_directories( + include + ${catkin_INCLUDE_DIRS} +) + +find_package (LibJpegTurbo) +if (${LibJpegTurbo_FOUND}) + message("LibJpegTurbo found") + add_definitions(-DTURBOJPEG_AVAILABLE) +endif () + +find_package(CUDA) +if (CUDA_FOUND) + enable_language(CUDA) +endif() +find_package(NVJPEG) +find_library(CUDART_LIBRARY cudart ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) +find_library(CULIBOS culibos ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) +if (${NVJPEG_FOUND} AND CUDART_LIBRARY AND CULIBOS) + add_definitions(-DNVJPEG_AVAILABLE) +else() + message("NVJPEG or its dependencies not found") +endif() + +find_library(CUDA_nppicc_LIBRARY nppicc ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) +find_library(CUDA_nppidei_LIBRARY nppidei ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) +find_library(CUDA_nppig_LIBRARY nppig ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) +find_library(CUDA_nppisu_LIBRARY nppisu ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) + +if (CUDA_nppicc_LIBRARY AND CUDA_nppidei_LIBRARY AND CUDA_nppig_LIBRARY AND CUDA_nppisu_LIBRARY) + message("NPP found") + add_definitions(-DNPP_AVAILABLE) + set(NPP_FOUND TRUE) +endif() + +find_package(OpenCV REQUIRED PATHS "/usr/local/") # what to do about this one? +# If OpenCV is found +if (${OpenCV_FOUND}) + message("OpenCV version: ${OpenCV_VERSION}") + find_package(cv_bridge REQUIRED) + add_definitions(-DOPENCV_AVAILABLE) +endif() + +if (OpenCV_CUDA_VERSION) + # Found OpenCV with CUDA support + message("OpenCV CUDA version: ${OpenCV_CUDA_VERSION}") + add_definitions(-DOPENCV_CUDA_AVAILABLE) +else() + message("OpenCV CUDA not found") +endif() + +if (EXISTS "/etc/nv_tegra_release") + set(JETSON TRUE) + message(STATUS "Jetson platform detected") + add_definitions(-DJETSON_AVAILABLE) +else () + set(JETSON FALSE) + message(STATUS "Non-Jetson platform detected") +endif() + +if (NOT JETSON AND NOT NVJPEG_FOUND AND NOT LibJpegTurbo_FOUND) + message(FATAL_ERROR "No JPEG encoder found") +endif() + +find_package(sensor_msgs REQUIRED) + +# Jetson +if (JETSON) + include(FetchContent) + FetchContent_Declare(cuda-api-wrappers + GIT_REPOSITORY https://github.com/eyalroz/cuda-api-wrappers.git + GIT_TAG 831666a0bfd1af0f44f4fa234ee2d983d347fcaa # v0.6.1-rc1 + ) + FetchContent_MakeAvailable(cuda-api-wrappers) + + cuda_add_library(color_space SHARED + src/accelerator/color_space.cu + ) + set_target_properties(color_space PROPERTIES CUDA_ARCHITECTURES 72) + + add_library(jpeg_compressor SHARED + src/accelerator/jpeg_compressor.cpp + /usr/src/jetson_multimedia_api/samples/common/classes/NvBuffer.cpp + /usr/src/jetson_multimedia_api/samples/common/classes/NvElement.cpp + /usr/src/jetson_multimedia_api/samples/common/classes/NvElementProfiler.cpp + /usr/src/jetson_multimedia_api/samples/common/classes/NvJpegEncoder.cpp + /usr/src/jetson_multimedia_api/samples/common/classes/NvLogging.cpp + ) + target_include_directories(jpeg_compressor PUBLIC + ${CUDA_INCLUDE_DIRS} + ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} + ${SYS_ROOT}/usr/src/jetson_multimedia_api/include + ${SYS_ROOT}/usr/src/jetson_multimedia_api/include/libjpeg-8b + ) + target_link_directories(jpeg_compressor PUBLIC + ${SYS_ROOT}/lib/aarch64-linux-gnu + ${SYS_ROOT}/usr/lib/aarch64-linux-gnu + ${SYS_ROOT}/usr/lib/aarch64-linux-gnu/tegra) + target_link_libraries(jpeg_compressor + ${CUDA_nppicc_LIBRARY} + ${LIBJPEGTURBO_LIBRARIES} + nvjpeg + cuda-api-wrappers::runtime-and-driver + color_space + ) + + message(STATUS "Using Jetson Multimedia API") + + # include_directories(/usr/local/include/opencv4) + add_library(rectifier SHARED + src/accelerator/rectifier.cpp) + target_include_directories(rectifier PUBLIC + ${CUDA_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ) + target_link_directories(rectifier PUBLIC + ${SYS_ROOT}/lib/aarch64-linux-gnu + ${SYS_ROOT}/usr/lib/aarch64-linux-gnu + ${SYS_ROOT}/usr/lib/aarch64-linux-gnu/tegra) + target_link_libraries(rectifier + ${CUDA_nppidei_LIBRARY} + ${CUDA_nppig_LIBRARY} + ${CUDA_nppicc_LIBRARY} + ${CUDA_nppisu_LIBRARY} + ${OpenCV_LIBRARIES} + ${catkin_LIBRARIES} + ) + + add_library(gpu_imgproc SHARED + src/gpu_imgproc.cpp + ) + + target_link_libraries(gpu_imgproc + ${CUDA_LIBRARIES} + ${OpenCV_LIBRARIES} + rectifier + jpeg_compressor + ${catkin_LIBRARIES} + ) + + add_executable(gpu_imgproc_node + src/gpu_imgproc_node.cpp + ) + target_include_directories(gpu_imgproc_node PUBLIC + $ + $ + ) + target_link_libraries(gpu_imgproc_node + ${CUDA_LIBRARIES} + ${OpenCV_LIBRARIES} + ${catkin_LIBRARIES} + gpu_imgproc + ) + + install( + TARGETS gpu_imgproc_node gpu_imgproc color_space + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION bin) +else() + add_library(jpeg_compressor SHARED + src/accelerator/jpeg_compressor.cpp + ) + + if (NVJPEG_FOUND AND LibJpegTurbo_FOUND) + target_include_directories(jpeg_compressor PUBLIC + ${CUDA_INCLUDE_DIRS} + ${LibJpegTurbo_INCLUDE_DIRS} + ${NVJPEG_INCLUDE_DIRS} + ) + target_link_libraries(jpeg_compressor + ${CUDA_nppicc_LIBRARY} + ${LibJpegTurbo_LIBRARY} + ${NVJPEG_LIBRARY} + ${CUDART_LIBRARY} + ${CULIBOS} + ${catkin_LIBARIES} + ) + elseif (NVJPEG_FOUND) + target_include_directories(jpeg_compressor PUBLIC + ${CUDA_INCLUDE_DIRS} + ${NVJPEG_INCLUDE_DIRS} + ) + target_link_libraries(jpeg_compressor + ${CUDA_nppicc_LIBRARY} + ${NVJPEG_LIBRARY} + ${CUDART_LIBRARY} + ${CULIBOS} + ${catkin_LIBARIES} + ) + elseif (LibJpegTurbo_FOUND) + target_include_directories(jpeg_compressor PUBLIC + ${CUDA_INCLUDE_DIRS} + ${LibJpegTurbo_INCLUDE_DIRS} + ) + target_link_libraries(jpeg_compressor + ${LibJpegTurbo_LIBRARY} + ${catkin_LIBARIES} + ) + else() + message(FATAL_ERROR "No JPEG encoder found") + endif() + + add_library(rectifier SHARED + src/accelerator/rectifier.cpp) + + if (NPP_FOUND AND OPENCV_FOUND) + message("Using NPP") + target_include_directories(rectifier PUBLIC + ${CUDA_INCLUDE_DIRS} + ${OpenCV_INCLUDE_DIRS} + ) + target_link_libraries(rectifier + ${CUDA_nppidei_LIBRARY} + ${CUDA_nppig_LIBRARY} + ${CUDA_nppicc_LIBRARY} + ${CUDA_nppisu_LIBRARY} + ${OpenCV_LIBRARIES} + ${catkin_LIBRARIES} + ) + elseif(OPENCV_FOUND) + message("Using OpenCV") + target_include_directories(rectifier PUBLIC + ${OpenCV_INCLUDE_DIRS} + ) + target_link_libraries(rectifier + ${OpenCV_LIBRARIES} + ${catkin_LIBRARIES} + ) + else() + message(FATAL_ERROR "No rectifier found") + endif() + + add_library(gpu_imgproc SHARED + src/gpu_imgproc.cpp + ) + + target_link_libraries(gpu_imgproc + rectifier + jpeg_compressor + ${catkin_LIBRARIES} + ) + target_include_directories(gpu_imgproc PUBLIC + $ + $ + ) + + add_executable(gpu_imgproc_node + src/gpu_imgproc_node.cpp + ) + target_include_directories(gpu_imgproc_node PUBLIC + $ + $ + ) + target_link_libraries(gpu_imgproc_node + ${catkin_LIBRARIES} + gpu_imgproc + ) + # target_compile_options(gpu_imgproc_node PRIVATE -Wall -Wextra -Wpedantic -Wunused-function) + + install( + TARGETS gpu_imgproc_node gpu_imgproc + ARCHIVE DESTINATION lib + LIBRARY DESTINATION lib + RUNTIME DESTINATION bin) +endif() \ No newline at end of file diff --git a/gpu_imgproc/LICENSE b/gpu_imgproc/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/gpu_imgproc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/gpu_imgproc/README.md b/gpu_imgproc/README.md new file mode 100644 index 0000000..9b914d0 --- /dev/null +++ b/gpu_imgproc/README.md @@ -0,0 +1 @@ +# gpu_imgproc \ No newline at end of file diff --git a/gpu_imgproc/cmake/FindLibJpegTurbo.cmake b/gpu_imgproc/cmake/FindLibJpegTurbo.cmake new file mode 100644 index 0000000..c00bcea --- /dev/null +++ b/gpu_imgproc/cmake/FindLibJpegTurbo.cmake @@ -0,0 +1,31 @@ +# - Try to find libjpeg-turbo +# Once done, this will define +# +# LibJpegTurbo_FOUND - system has libjpeg-turbo +# LibJpegTurbo_INCLUDE_DIRS - the libjpeg-turbo include directories +# LibJpegTurbo_LIBRARY - link these to use libjpeg-turbo +# +# this file is modeled after http://www.cmake.org/Wiki/CMake:How_To_Find_Libraries + +include(LibFindMacros) + +# Use pkg-config to get hints about paths +libfind_pkg_check_modules(LibJpegTurbo_PKGCONF LibJpegTurbo) + +# Include dir +find_path(LibJpegTurbo_INCLUDE_DIRS + NAMES turbojpeg.h + HINTS ${LibJpegTurbo_PKGCONF_INCLUDE_DIRS} /opt/libjpeg-turbo/include /usr/include/ + ) + +# Finally the library itself +find_library(LibJpegTurbo_LIBRARY + NAMES turbojpeg + HINTS ${LibJpegTurbo_PKGCONF_LIBRARY_DIRS} /opt/libjpeg-turbo/lib /usr/lib/ + ) + +# Set the include dir variables and the libraries and let libfind_process do the rest. +# NOTE: Singular variables for this library, plural for libraries this this lib depends on. +set(LibJpegTurbo_PROCESS_INCLUDES LibJpegTurbo_INCLUDE_DIRS) +set(LibJpegTurbo_PROCESS_LIBS LibJpegTurbo_LIBRARY) +libfind_process(LibJpegTurbo) diff --git a/gpu_imgproc/cmake/FindNVJPEG.cmake b/gpu_imgproc/cmake/FindNVJPEG.cmake new file mode 100644 index 0000000..7f91517 --- /dev/null +++ b/gpu_imgproc/cmake/FindNVJPEG.cmake @@ -0,0 +1,78 @@ +# Derived from: caffe2/cmake/Modules/FindJpegTurbo.cmake +# +# - Try to find nvjpeg +# +# The following variables are optionally searched for defaults +# NVJPEG_ROOT_DIR: Base directory where all NVJPEG components are found +# +# The following are set after configuration is done: +# NVJPEG_FOUND +# NVJPEG_INCLUDE_DIR +# NVJPEG_LIBRARY + +set(NVJPEG_ROOT_DIR "" CACHE PATH "Folder contains NVJPEG") + +find_path(NVJPEG_INCLUDE_DIR nvjpeg.h + PATHS ${NVJPEG_ROOT_DIR} ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} + PATH_SUFFIXES include) + +find_library(NVJPEG_LIBRARY libnvjpeg_static.a nvjpeg + PATHS ${NVJPEG_ROOT_DIR} ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES} + PATH_SUFFIXES lib lib64) + +execute_process(COMMAND grep NVJPEG_VER_MAJOR ${NVJPEG_INCLUDE_DIR}/nvjpeg.h + COMMAND sed "s;.*NVJPEG_VER_MAJOR \\(.*\\);\\1;" + OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE NVJPEG_VERSION) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(NVJPEG + REQUIRED_VARS NVJPEG_INCLUDE_DIR NVJPEG_LIBRARY + VERSION_VAR NVJPEG_VERSION) + +if(NVJPEG_FOUND) + # set includes and link libs for nvJpeg + + if (POLICY CMP0075) + cmake_policy(SET CMP0075 NEW) + endif() + + set(CMAKE_REQUIRED_INCLUDES_OLD ${CMAKE_REQUIRED_INCLUDES_OLD}) + list(APPEND CMAKE_REQUIRED_INCLUDES "${NVJPEG_INCLUDE_DIR}") + list(APPEND CMAKE_REQUIRED_INCLUDES "${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}") + set(CMAKE_REQUIRED_LIBRARIES_OLD ${CMAKE_REQUIRED_LIBRARIES}) + foreach(DIR ${CMAKE_CUDA_IMPLICIT_LINK_DIRECTORIES}) + list(APPEND CMAKE_REQUIRED_LIBRARIES "-L${DIR}") + endforeach(DIR) + + list(APPEND CMAKE_REQUIRED_LIBRARIES "${NVJPEG_LIBRARY}" cudart_static culibos dl m pthread rt) + include(CheckCXXSymbolExists) + check_cxx_symbol_exists("nvjpegCreateEx" "nvjpeg.h" NVJPEG_LIBRARY_0_2_0) + check_cxx_symbol_exists("nvjpegBufferPinnedCreate" "nvjpeg.h" NVJPEG_DECOUPLED_API) + check_cxx_symbol_exists("nvjpegDecodeBatchedPreAllocate" "nvjpeg.h" NVJPEG_PREALLOCATE_API) + + include(CheckCXXSourceCompiles) + check_cxx_source_compiles( + "#include + int main(){ + return NVJPEG_BACKEND_LOSSLESS_JPEG != 6; + }" + NVJPEG_LOSSLESS_SUPPORTED) + + set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES_OLD}) + set(CMAKE_REQUIRED_INCLUDES ${CMAKE_REQUIRED_INCLUDES_OLD}) + + mark_as_advanced(NVJPEG_ROOT_DIR NVJPEG_LIBRARY_RELEASE NVJPEG_LIBRARY_DEBUG) + message("nvJPEG found in ${NVJPEG_INCLUDE_DIR}") + if (NVJPEG_DECOUPLED_API) + message("nvJPEG is using new API") + else() + message(FATAL_ERROR "nvjpegBufferPinnedCreate is required but not present in the available version of nvJPEG") + endif() + if (NVJPEG_LOSSLESS_SUPPORTED) + message("nvJPEG lossless supported") + else() + message("nvJPEG lossless NOT supported") + endif() +else() + message("nvJPEG NOT found") +endif() \ No newline at end of file diff --git a/gpu_imgproc/cmake/LibFindMacros.cmake b/gpu_imgproc/cmake/LibFindMacros.cmake new file mode 100644 index 0000000..d1703f1 --- /dev/null +++ b/gpu_imgproc/cmake/LibFindMacros.cmake @@ -0,0 +1,98 @@ +# Works the same as find_package, but forwards the "REQUIRED" and "QUIET" arguments +# used for the current package. For this to work, the first parameter must be the +# prefix of the current package, then the prefix of the new package etc, which are +# passed to find_package. +macro (libfind_package PREFIX) + set (LIBFIND_PACKAGE_ARGS ${ARGN}) + if (${PREFIX}_FIND_QUIETLY) + set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} QUIET) + endif (${PREFIX}_FIND_QUIETLY) + if (${PREFIX}_FIND_REQUIRED) + set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} REQUIRED) + endif (${PREFIX}_FIND_REQUIRED) + find_package(${LIBFIND_PACKAGE_ARGS}) +endmacro (libfind_package) + +# CMake developers made the UsePkgConfig system deprecated in the same release (2.6) +# where they added pkg_check_modules. Consequently I need to support both in my scripts +# to avoid those deprecated warnings. Here's a helper that does just that. +# Works identically to pkg_check_modules, except that no checks are needed prior to use. +macro (libfind_pkg_check_modules PREFIX PKGNAME) + if (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) + include(UsePkgConfig) + pkgconfig(${PKGNAME} ${PREFIX}_INCLUDE_DIRS ${PREFIX}_LIBRARY_DIRS ${PREFIX}_LDFLAGS ${PREFIX}_CFLAGS) + else (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) + find_package(PkgConfig) + if (PKG_CONFIG_FOUND) + pkg_check_modules(${PREFIX} ${PKGNAME}) + endif (PKG_CONFIG_FOUND) + endif (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4) +endmacro (libfind_pkg_check_modules) + +# Do the final processing once the paths have been detected. +# If include dirs are needed, ${PREFIX}_PROCESS_INCLUDES should be set to contain +# all the variables, each of which contain one include directory. +# Ditto for ${PREFIX}_PROCESS_LIBS and library files. +# Will set ${PREFIX}_FOUND, ${PREFIX}_INCLUDE_DIRS and ${PREFIX}_LIBRARIES. +# Also handles errors in case library detection was required, etc. +macro (libfind_process PREFIX) + # Skip processing if already processed during this run + if (NOT ${PREFIX}_FOUND) + # Start with the assumption that the library was found + set (${PREFIX}_FOUND TRUE) + + # Process all includes and set _FOUND to false if any are missing + foreach (i ${${PREFIX}_PROCESS_INCLUDES}) + if (${i}) + set (${PREFIX}_INCLUDE_DIRS ${${PREFIX}_INCLUDE_DIRS} ${${i}}) + mark_as_advanced(${i}) + else (${i}) + set (${PREFIX}_FOUND FALSE) + endif (${i}) + endforeach (i) + + # Process all libraries and set _FOUND to false if any are missing + foreach (i ${${PREFIX}_PROCESS_LIBS}) + if (${i}) + set (${PREFIX}_LIBRARIES ${${PREFIX}_LIBRARIES} ${${i}}) + mark_as_advanced(${i}) + else (${i}) + set (${PREFIX}_FOUND FALSE) + endif (${i}) + endforeach (i) + + # Print message and/or exit on fatal error + if (${PREFIX}_FOUND) + if (NOT ${PREFIX}_FIND_QUIETLY) + message (STATUS "Found ${PREFIX} ${${PREFIX}_VERSION}") + endif (NOT ${PREFIX}_FIND_QUIETLY) + else (${PREFIX}_FOUND) + if (${PREFIX}_FIND_REQUIRED) + foreach (i ${${PREFIX}_PROCESS_INCLUDES} ${${PREFIX}_PROCESS_LIBS}) + message("${i}=${${i}}") + endforeach (i) + message (FATAL_ERROR "Required library ${PREFIX} NOT FOUND.\nInstall the library (dev version) and try again. If the library is already installed, use ccmake to set the missing variables manually.") + endif (${PREFIX}_FIND_REQUIRED) + endif (${PREFIX}_FOUND) + endif (NOT ${PREFIX}_FOUND) +endmacro (libfind_process) + +macro(libfind_library PREFIX basename) + set(TMP "") + if(MSVC80) + set(TMP -vc80) + endif(MSVC80) + if(MSVC90) + set(TMP -vc90) + endif(MSVC90) + set(${PREFIX}_LIBNAMES ${basename}${TMP}) + if(${ARGC} GREATER 2) + set(${PREFIX}_LIBNAMES ${basename}${TMP}-${ARGV2}) + string(REGEX REPLACE "\\." "_" TMP ${${PREFIX}_LIBNAMES}) + set(${PREFIX}_LIBNAMES ${${PREFIX}_LIBNAMES} ${TMP}) + endif(${ARGC} GREATER 2) + find_library(${PREFIX}_LIBRARY + NAMES ${${PREFIX}_LIBNAMES} + PATHS ${${PREFIX}_PKGCONF_LIBRARY_DIRS} + ) +endmacro(libfind_library) diff --git a/gpu_imgproc/include/accelerator/color_space.hpp b/gpu_imgproc/include/accelerator/color_space.hpp new file mode 100644 index 0000000..b1ba84a --- /dev/null +++ b/gpu_imgproc/include/accelerator/color_space.hpp @@ -0,0 +1,29 @@ +#ifndef JETSON_ENCODER_COMPRESSED_IMAGE_TRANSPORT_RGB8_TO_BGR8 +#define JETSON_ENCODER_COMPRESSED_IMAGE_TRANSPORT_RGB8_TO_BGR8 + +#include + +#include + +// namespace jetson_encoder_compressed_image_transport +// { +typedef enum ColorSpaceStandard { + ColorSpaceStandard_BT709 = 1, + ColorSpaceStandard_Unspecified = 2, + ColorSpaceStandard_Reserved = 3, + ColorSpaceStandard_FCC = 4, + ColorSpaceStandard_BT470 = 5, + ColorSpaceStandard_BT601 = 6, + ColorSpaceStandard_SMPTE240M = 7, + ColorSpaceStandard_YCgCo = 8, + ColorSpaceStandard_BT2020 = 9, + ColorSpaceStandard_BT2020C = 10 +} ColorSpaceStandard; + +cudaError_t cudaRGB8ToBGR8(uint8_t * input, int width, int height, int step); + +cudaError_t cudaBGRToYUV420(uint8_t * input, uint8_t * output, int width, int height, int matrix = 0); +cudaError_t cudaRGBToYUV420(uint8_t * input, uint8_t * output, int width, int height, int matrix = 0); +// } // namespace jetson_encoder_compressed_image_transport + +#endif // JETSON_ENCODER_COMPRESSED_IMAGE_TRANSPORT_RGB8_TO_BGR8 \ No newline at end of file diff --git a/gpu_imgproc/include/accelerator/jpeg_compressor.hpp b/gpu_imgproc/include/accelerator/jpeg_compressor.hpp new file mode 100644 index 0000000..fe64d05 --- /dev/null +++ b/gpu_imgproc/include/accelerator/jpeg_compressor.hpp @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include + +#ifdef TURBOJPEG_AVAILABLE +#include +#endif + +#ifdef JETSON_AVAILABLE +#include +#include +#endif + +#ifdef NVJPEG_AVAILABLE +#include +#endif + +class NvJPEGEncoder; + +namespace JpegCompressor { +using Image = sensor_msgs::Image; +using CompressedImage = sensor_msgs::CompressedImage; +typedef std::shared_ptr CompressedImagePtr; + +enum class ImageFormat { + RGB, + BGR +}; + +#ifdef TURBOJPEG_AVAILABLE +class CPUCompressor { +public: + CPUCompressor(); + ~CPUCompressor(); + + CompressedImagePtr compress(const Image &msg, int quality = 90, int format = TJPF_RGB, int sampling = TJ_420); +private: + tjhandle handle_; + unsigned char *jpegBuf_; + unsigned long size_; +}; +#endif + +#ifdef JETSON_AVAILABLE +class JetsonCompressor { +public: + JetsonCompressor(std::string name); + ~JetsonCompressor(); + + CompressedImagePtr compress(const Image &msg, int quality = 90, ImageFormat format = ImageFormat::RGB); +private: + NvJPEGEncoder *encoder_; + size_t image_size{}; + size_t yuv_size{}; + cuda::memory::device::unique_ptr dev_image; + cuda::memory::host::unique_ptr host_yuv; + cuda::memory::device::unique_ptr dev_yuv; +}; +#endif + +#ifdef NVJPEG_AVAILABLE +class NVJPEGCompressor { +public: + NVJPEGCompressor(); + ~NVJPEGCompressor(); + + CompressedImagePtr compress(const Image &msg, int quality = 90, ImageFormat format = ImageFormat::RGB); +private: + // void setNVJPEGParams(int quality, ImageFormat format); + void setNVImage(const Image &msg); + + cudaStream_t stream_; + nvjpegHandle_t handle_; + nvjpegEncoderState_t state_; + nvjpegEncoderParams_t params_; + nvjpegInputFormat_t input_format_; + nvjpegChromaSubsampling_t subsampling_; + nvjpegImage_t nv_image_; +}; +#endif + +} // namespace JpegCompressor \ No newline at end of file diff --git a/gpu_imgproc/include/accelerator/rectifier.hpp b/gpu_imgproc/include/accelerator/rectifier.hpp new file mode 100644 index 0000000..4ff626f --- /dev/null +++ b/gpu_imgproc/include/accelerator/rectifier.hpp @@ -0,0 +1,88 @@ +#pragma once + +#include +#include +// #include + +#ifdef OPENCV_AVAILABLE +#include +#endif +#ifdef OPENCV_CUDA_AVAILABLE +#include +#endif + +#if NPP_AVAILABLE +#include +#endif + +using CameraInfo = sensor_msgs::CameraInfo; +using Image = sensor_msgs::Image; +typedef std::shared_ptr ImagePtr; + +namespace Rectifier { + +enum class Implementation { + NPP, + OpenCV_CPU, + OpenCV_GPU +}; + +enum class MappingImpl { + NPP, + OpenCV +}; + +#if NPP_AVAILABLE +class NPPRectifier { +public: + NPPRectifier(int width, int height, + const Npp32f *map_x, const Npp32f *map_y); + NPPRectifier(const CameraInfo &info, + MappingImpl impl = MappingImpl::NPP, + double alpha = 0.0); + ~NPPRectifier(); + + ImagePtr rectify(const Image &msg); +private: + Npp32f *pxl_map_x_; + Npp32f *pxl_map_y_; + int pxl_map_x_step_; + int pxl_map_y_step_; + int interpolation_; + cudaStream_t stream_; +}; +#endif + +#ifdef OPENCV_AVAILABLE +class OpenCVRectifierCPU { +public: + OpenCVRectifierCPU(const CameraInfo &info, + MappingImpl impl = MappingImpl::OpenCV, + double alpha = 0.0); + ~OpenCVRectifierCPU(); + + ImagePtr rectify(const Image &msg); +private: + cv::Mat map_x_; + cv::Mat map_y_; + cv::Mat camera_intrinsics_; + cv::Mat distortion_coeffs_; +}; +#endif + +#ifdef OPENCV_CUDA_AVAILABLE +class OpenCVRectifierGPU { +public: + OpenCVRectifierGPU(const CameraInfo &info, + MappingImpl impl = MappingImpl::OpenCV, + double alpha = 0.0); + ~OpenCVRectifierGPU(); + + ImagePtr rectify(const Image &msg); +private: + cv::cuda::GpuMat map_x_; + cv::cuda::GpuMat map_y_; +}; +#endif + +} // namespace Rectifier \ No newline at end of file diff --git a/gpu_imgproc/include/gpu_imgproc/gpu_imgproc.hpp b/gpu_imgproc/include/gpu_imgproc/gpu_imgproc.hpp new file mode 100644 index 0000000..9c6a4bf --- /dev/null +++ b/gpu_imgproc/include/gpu_imgproc/gpu_imgproc.hpp @@ -0,0 +1,63 @@ +#pragma once + +#include +// #include + +#include "accelerator/rectifier.hpp" +#include "accelerator/jpeg_compressor.hpp" +// #include + +namespace gpu_imgproc { + +using Image = sensor_msgs::Image; +using CompressedImage = sensor_msgs::CompressedImage; +using CompressedImagePtr = std::shared_ptr; +using CameraInfo = sensor_msgs::CameraInfo; + +class GpuImgProc { +public: + explicit GpuImgProc(ros::NodeHandle &nh, ros::NodeHandle &pnh); + virtual ~GpuImgProc(); + +private: + void imageCallback(const Image::ConstPtr &msg); + void cameraInfoCallback(const CameraInfo::ConstPtr &msg); + + ros::NodeHandle node_; + ros::NodeHandle private_node_; + +#if NPP_AVAILABLE + std::shared_ptr npp_rectifier_; +#endif +#ifdef OPENCV_AVAILABLE + std::shared_ptr cv_cpu_rectifier_; +#endif +#ifdef OPENCV_CUDA_AVAILABLE + std::shared_ptr cv_gpu_rectifier_; +#endif +#ifdef JETSON_AVAILABLE + std::shared_ptr raw_compressor_; + std::shared_ptr rect_compressor_; +#elif NVJPEG_AVAILABLE + std::shared_ptr raw_compressor_; + std::shared_ptr rect_compressor_; +#elif TURBOJPEG_AVAILABLE + std::shared_ptr raw_compressor_; + std::shared_ptr rect_compressor_; +#endif + + ros::Subscriber img_sub_; + ros::Subscriber info_sub_; + + ros::Publisher rectified_pub_; + ros::Publisher compressed_pub_; + ros::Publisher rect_compressed_pub_; + + Rectifier::Implementation rectifier_impl_; + Rectifier::MappingImpl mapping_impl_; + bool rectifier_active_; + double alpha_; +}; + + +} // namespace gpu_imgproc \ No newline at end of file diff --git a/gpu_imgproc/launch/mul_v4l2.launch.xml b/gpu_imgproc/launch/mul_v4l2.launch.xml new file mode 100644 index 0000000..010a6f6 --- /dev/null +++ b/gpu_imgproc/launch/mul_v4l2.launch.xml @@ -0,0 +1,41 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/gpu_imgproc/launch/v4l2_camera.launch.xml b/gpu_imgproc/launch/v4l2_camera.launch.xml new file mode 100644 index 0000000..4859989 --- /dev/null +++ b/gpu_imgproc/launch/v4l2_camera.launch.xml @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/gpu_imgproc/package.xml b/gpu_imgproc/package.xml new file mode 100644 index 0000000..deae3f1 --- /dev/null +++ b/gpu_imgproc/package.xml @@ -0,0 +1,28 @@ + + + + gpu_imgproc + 0.1.0 + GPU ROS Topic Image Processing + MAP IV + + Apache 2 + Tier IV + + catkin + + roscpp + + sensor_msgs + libopencv-dev + cv_bridge + image_geometry + libturbojpeg + + ament_cmake_gtest + ament_lint_auto + + + catkin + + \ No newline at end of file diff --git a/gpu_imgproc/src/accelerator/color_space.cu b/gpu_imgproc/src/accelerator/color_space.cu new file mode 100644 index 0000000..821487e --- /dev/null +++ b/gpu_imgproc/src/accelerator/color_space.cu @@ -0,0 +1,222 @@ +// Copyright 2022 Daisuke Nishimatsu +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "accelerator/color_space.hpp" + +// namespace jetson_encoder_compressed_image_transport +// { +inline __device__ __host__ int iDivUp(int a, int b) {return (a % b != 0) ? (a / b + 1) : (a / b);} + +__global__ void RGB8ToBGR8(uint8_t * input, int width, int height, int step) +{ + //2D Index of current thread + const int x_index = blockIdx.x * blockDim.x + threadIdx.x; + const int y_index = blockIdx.y * blockDim.y + threadIdx.y; + + //Only valid threads perform memory I/O + if ((x_index < width) && (y_index < height)) { + //Location of colored pixel in input + const int color_tid = y_index * step + (3 * x_index); + const unsigned char t = input[color_tid + 0]; + input[color_tid + 0] = input[color_tid + 2]; + input[color_tid + 2] = t; + } +} + +cudaError_t cudaRGB8ToBGR8(uint8_t * input, int width, int height, int step) +{ + if (!input) { + return cudaErrorInvalidDevicePointer; + } + + const dim3 blockDim(16, 16); + const dim3 gridDim(iDivUp(width, blockDim.x), iDivUp(height, blockDim.y)); + + RGB8ToBGR8 << < gridDim, blockDim >> > (input, width, height, step); + + return cudaGetLastError(); +} + +__constant__ float matRGB2YUV[3][3]; + + +inline void getConstants(int matrix, float & wr, float & wb, int & black, int & white, int & max) +{ + black = 16; + white = 235; + max = 255; + + switch (matrix) { + case ColorSpaceStandard_BT709: + default: + wr = 0.2126f; wb = 0.0722f; + break; + + case ColorSpaceStandard_FCC: + wr = 0.30f; wb = 0.11f; + break; + + case ColorSpaceStandard_BT470: + case ColorSpaceStandard_BT601: + wr = 0.2990f; wb = 0.1140f; + break; + + case ColorSpaceStandard_SMPTE240M: + wr = 0.212f; wb = 0.087f; + break; + + case ColorSpaceStandard_BT2020: + case ColorSpaceStandard_BT2020C: + wr = 0.2627f; wb = 0.0593f; + // 10-bit only + black = 64 << 6; white = 940 << 6; + max = (1 << 16) - 1; + break; + } +} + +void setMatRGB2YUV(int matrix) +{ + float wr, wb; + int black, white, max; + getConstants(matrix, wr, wb, black, white, max); + float mat[3][3] = { + wr, 1.0f - wb - wr, wb, + -0.5f * wr / (1.0f - wb), -0.5f * (1 - wb - wr) / (1.0f - wb), 0.5f, + 0.5f, -0.5f * (1.0f - wb - wr) / (1.0f - wr), -0.5f * wb / (1.0f - wr), + }; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + mat[i][j] = (float)(1.0 * (white - black) / max * mat[i][j]); + } + } + cudaMemcpyToSymbol(matRGB2YUV, mat, sizeof(mat)); +} + +template +__device__ inline YUVUnit RGBToY(RGBUnit r, RGBUnit g, RGBUnit b) +{ + const YUVUnit low = 1 << (sizeof(YUVUnit) * 8 - 4); + return matRGB2YUV[0][0] * r + matRGB2YUV[0][1] * g + matRGB2YUV[0][2] * b + low; +} + +template +__device__ inline YUVUnit RGBToU(RGBUnit r, RGBUnit g, RGBUnit b) +{ + const YUVUnit mid = 1 << (sizeof(YUVUnit) * 8 - 1); + return matRGB2YUV[1][0] * r + matRGB2YUV[1][1] * g + matRGB2YUV[1][2] * b + mid; +} + +template +__device__ inline YUVUnit RGBToV(RGBUnit r, RGBUnit g, RGBUnit b) +{ + const YUVUnit mid = 1 << (sizeof(YUVUnit) * 8 - 1); + return matRGB2YUV[2][0] * r + matRGB2YUV[2][1] * g + matRGB2YUV[2][2] * b + mid; +} + +template +__global__ static void BGRToYUVKernel( + uint8_t * input, uint8_t * output, int yuv_pitch, int width, int height) +{ + int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; + int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; + if (x + 1 >= width || y + 1 >= height) { + return; + } + + uint8_t * src = input + x * 3 + y * width * 3; + + uint8_t * int2a = src; + uint8_t * int2b = src + width * 3; + + uint8_t b = (int2a[0] + int2a[3] + int2b[0] + int2b[3]) / 4, + g = (int2a[1] + int2a[4] + int2b[1] + int2b[4]) / 4, + r = (int2a[2] + int2a[5] + int2b[2] + int2b[5]) / 4; + + uint8_t * dst = output + x + y * width; + + dst[0] = RGBToY(int2a[0 + 2], int2a[0 + 1], int2a[0 + 0]); + dst[1] = RGBToY(int2a[1 * 3 + 2], int2a[1 * 3 + 1], int2a[1 * 3 + 0]); + dst[width] = RGBToY(int2b[0 + 2], int2b[0 + 1], int2b[0 + 0]); + dst[width + 1] = RGBToY(int2b[1 * 3 + 2], int2b[1 * 3 + 1], int2b[1 * 3 + 0]); + *(output + width * height + + static_cast(width / 2) * (static_cast(y / 2)) + x / 2) = + RGBToU(r, g, b); + *(output + width * height + static_cast(width / 2) * static_cast(height / 2) + + static_cast(width / 2) * + (static_cast(y / 2)) + x / 2) = RGBToV(r, g, b); +} + +template +__global__ static void RGBToYUVKernel( + uint8_t * input, uint8_t * output, int yuv_pitch, int width, int height) +{ + int x = (threadIdx.x + blockIdx.x * blockDim.x) * 2; + int y = (threadIdx.y + blockIdx.y * blockDim.y) * 2; + if (x + 1 >= width || y + 1 >= height) { + return; + } + + uint8_t * src = input + x * 3 + y * width * 3; + + uint8_t * int2a = src; + uint8_t * int2b = src + width * 3; + + uint8_t r = (int2a[0] + int2a[3] + int2b[0] + int2b[3]) / 4, + g = (int2a[1] + int2a[4] + int2b[1] + int2b[4]) / 4, + b = (int2a[2] + int2a[5] + int2b[2] + int2b[5]) / 4; + + uint8_t * dst = output + x + y * width; + + dst[0] = RGBToY(int2a[0 + 0], int2a[0 + 1], int2a[0 + 2]); + dst[1] = RGBToY(int2a[1 * 3 + 0], int2a[1 * 3 + 1], int2a[1 * 3 + 2]); + dst[width] = RGBToY(int2b[0 + 0], int2b[0 + 1], int2b[0 + 2]); + dst[width + 1] = RGBToY(int2b[1 * 3 + 0], int2b[1 * 3 + 1], int2b[1 * 3 + 2]); + *(output + width * height + + static_cast(width / 2) * (static_cast(y / 2)) + x / 2) = + RGBToU(r, g, b); + *(output + width * height + static_cast(width / 2) * static_cast(height / 2) + + static_cast(width / 2) * + (static_cast(y / 2)) + x / 2) = RGBToV(r, g, b); +} + +cudaError_t cudaBGRToYUV420(uint8_t * input, uint8_t * output, int width, int height, int matrix) +{ + if (!input) { + return cudaErrorInvalidDevicePointer; + } + + setMatRGB2YUV(matrix); + BGRToYUVKernel + << < dim3((width + 63) / 32 / 2, (height + 3) / 2 / 2), dim3(32, 2) >> > + (input, output, 4 * width, width, height); + + return cudaGetLastError(); +} + +cudaError_t cudaRGBToYUV420(uint8_t * input, uint8_t * output, int width, int height, int matrix) +{ + if (!input) { + return cudaErrorInvalidDevicePointer; + } + + setMatRGB2YUV(matrix); + RGBToYUVKernel + << < dim3((width + 63) / 32 / 2, (height + 3) / 2 / 2), dim3(32, 2) >> > + (input, output, 4 * width, width, height); + + return cudaGetLastError(); +} + +// } // namespace jetson_encoder_compressed_image_transport \ No newline at end of file diff --git a/gpu_imgproc/src/accelerator/jpeg_compressor.cpp b/gpu_imgproc/src/accelerator/jpeg_compressor.cpp new file mode 100644 index 0000000..714ac8b --- /dev/null +++ b/gpu_imgproc/src/accelerator/jpeg_compressor.cpp @@ -0,0 +1,208 @@ +#include +#include + +#include + +#include "accelerator/jpeg_compressor.hpp" +#include "accelerator/color_space.hpp" + +#if defined(JETSON_AVAILABLE) || defined(NVJPEG_AVAILABLE) +#include +#endif + +#define TEST_ERROR(cond, str) if(cond) { \ + fprintf(stderr, "%s\n", str); } + +#define CHECK_CUDA(status) \ + if (status != cudaSuccess) { \ + ROS_ERROR("CUDA failure: '%s' at %s:%d", cudaGetErrorString(status), __FILE__, __LINE__); \ + } + +#define CHECK_NVJPEG(call) \ + { \ + nvjpegStatus_t _e = (call); \ + if (_e != NVJPEG_STATUS_SUCCESS) { \ + ROS_ERROR("NVJPEG failure: '%d' at %s:%d", _e, __FILE__, __LINE__); \ + exit(1); \ + } \ + } + +namespace JpegCompressor { + +#ifdef TURBOJPEG_AVAILABLE +CPUCompressor::CPUCompressor() + : jpegBuf_(nullptr), size_(0) { + handle_ = tjInitCompress(); +} + +CPUCompressor::~CPUCompressor() { + if (jpegBuf_) + tjFree(jpegBuf_); + tjDestroy(handle_); +} + +CompressedImagePtr CPUCompressor::compress(const Image &msg, int quality, int format, int sampling) { + CompressedImagePtr compressed_msg = std::make_shared(); + compressed_msg->header = msg.header; + compressed_msg->format = "jpeg"; + + if (jpegBuf_) { + tjFree(jpegBuf_); + jpegBuf_ = nullptr; + } + + int tjres = tjCompress2(handle_, + msg.data.data(), + msg.width, + 0, + msg.height, + format, + &jpegBuf_, + &size_, + sampling, + quality, + TJFLAG_FASTDCT); + + TEST_ERROR(tjres != 0, tjGetErrorStr2(handle_)); + + compressed_msg->data.resize(size_); + memcpy(compressed_msg->data.data(), jpegBuf_, size_); + + return compressed_msg; +} +#endif + +#ifdef JETSON_AVAILABLE +JetsonCompressor::JetsonCompressor(std::string name) { + encoder_ = NvJPEGEncoder::createJPEGEncoder(name.c_str()); +} + +JetsonCompressor::~JetsonCompressor() { + delete encoder_; +} + +CompressedImagePtr JetsonCompressor::compress(const Image &msg, int quality, ImageFormat format) { + CompressedImagePtr compressed_msg = std::make_shared(); + compressed_msg->header = msg.header; + compressed_msg->format = "jpeg"; + + int width = msg.width; + int height = msg.height; + const auto &img = msg.data; + + if (image_size < img.size()) { + dev_image = cuda::memory::device::make_unique(img.size()); + yuv_size = + width * height + + (static_cast(width / 2) * static_cast(height / 2)) * 2; + host_yuv = cuda::memory::host::make_unique(yuv_size); + dev_yuv = cuda::memory::device::make_unique(yuv_size); + image_size = img.size(); + } + + cuda::memory::copy(dev_image.get(), img.data(), img.size()); + + if (format == ImageFormat::RGB) { + TEST_ERROR(cudaRGBToYUV420(dev_image.get(), dev_yuv.get(), width, height) != + cudaSuccess, "failed to convert rgb8 to yuv420"); + } else { + TEST_ERROR(cudaBGRToYUV420(dev_image.get(), dev_yuv.get(), width, height) != + cudaSuccess, "failed to convert bgr8 to yuv420"); + } + + cuda::memory::copy(host_yuv.get(), dev_yuv.get(), yuv_size); + + NvBuffer buffer(V4L2_PIX_FMT_YUV420M, width, height, 0); + TEST_ERROR(buffer.allocateMemory() != 0, "NvBuffer allocation failed"); + + auto image_data = reinterpret_cast(host_yuv.get()); + + for (uint32_t i = 0; i < buffer.n_planes; ++i) { + NvBuffer::NvBufferPlane &plane = buffer.planes[i]; + plane.bytesused = plane.fmt.stride * plane.fmt.height; + memcpy(plane.data, image_data, plane.bytesused); + image_data += plane.bytesused; + } + + size_t out_buf_size = width * height * 3 / 2; + compressed_msg->data.resize(out_buf_size); + auto out_data = compressed_msg->data.data(); + + TEST_ERROR( + encoder_->encodeFromBuffer(buffer, JCS_YCbCr, &out_data, + out_buf_size, quality), + "NvJpeg Encoder Error"); + + buffer.deallocateMemory(); + + compressed_msg->data.resize(out_buf_size); + + return compressed_msg; +} +#endif + +#ifdef NVJPEG_AVAILABLE +NVJPEGCompressor::NVJPEGCompressor() { + CHECK_CUDA(cudaStreamCreate(&stream_)); + // CHECK_NVJPEG(nvjpegCreateEx(NVJPEG_BACKEND_DEFAULT, NULL, NULL, NVJPEG_FLAGS_DEFAULT, &handle_)) + CHECK_NVJPEG(nvjpegCreateSimple(&handle_)); + CHECK_NVJPEG(nvjpegEncoderStateCreate(handle_, &state_, stream_)); + CHECK_NVJPEG(nvjpegEncoderParamsCreate(handle_, ¶ms_, stream_)); + + nvjpegEncoderParamsSetSamplingFactors(params_, NVJPEG_CSS_420, stream_); + + std::memset(&nv_image_, 0, sizeof(nv_image_)); +} + +NVJPEGCompressor::~NVJPEGCompressor() { + CHECK_NVJPEG(nvjpegEncoderParamsDestroy(params_)); + CHECK_NVJPEG(nvjpegEncoderStateDestroy(state_)); + CHECK_NVJPEG(nvjpegDestroy(handle_)); + CHECK_CUDA(cudaStreamDestroy(stream_)); +} + +CompressedImagePtr NVJPEGCompressor::compress(const Image &msg, int quality, ImageFormat format) { + #warning TODO: implement format conversion or get rid of the parameter + CompressedImagePtr compressed_msg = std::make_shared(); + compressed_msg->header = msg.header; + compressed_msg->format = "jpeg"; + + nvjpegEncoderParamsSetQuality(params_, quality, stream_); + + setNVImage(msg); + CHECK_NVJPEG(nvjpegEncodeImage(handle_, state_, params_, &nv_image_, NVJPEG_INPUT_RGBI, + msg.width, msg.height, stream_)); + + unsigned long out_buf_size = 0; + + CHECK_NVJPEG(nvjpegEncodeRetrieveBitstream(handle_, state_, NULL, &out_buf_size, stream_)); + compressed_msg->data.resize(out_buf_size); + CHECK_NVJPEG(nvjpegEncodeRetrieveBitstream(handle_, state_, compressed_msg->data.data(), + &out_buf_size, stream_)); + + CHECK_CUDA(cudaStreamSynchronize(stream_)); + + return compressed_msg; +} + +void NVJPEGCompressor::setNVImage(const Image &msg) { + unsigned char *p = nullptr; + CHECK_CUDA(cudaMallocAsync((void **)&p, msg.data.size(), stream_)); + if (nv_image_.channel[0] != NULL) { + CHECK_CUDA(cudaFreeAsync(nv_image_.channel[0], stream_)); + } + + CHECK_CUDA(cudaMemcpyAsync(p, msg.data.data(), msg.data.size(), cudaMemcpyHostToDevice, stream_)); + + // int channels = image.size() / (image.width * image.height); + int channels = 3; + + std::memset(&nv_image_, 0, sizeof(nv_image_)); + + // Assuming RGBI/BGRI + nv_image_.pitch[0] = msg.width * channels; + nv_image_.channel[0] = p; +} +#endif + +} // namespace JpegCompressor \ No newline at end of file diff --git a/gpu_imgproc/src/accelerator/rectifier.cpp b/gpu_imgproc/src/accelerator/rectifier.cpp new file mode 100644 index 0000000..6dde9a0 --- /dev/null +++ b/gpu_imgproc/src/accelerator/rectifier.cpp @@ -0,0 +1,312 @@ +#include +#include "accelerator/rectifier.hpp" +// #include + +#if NPP_AVAILABLE +#include +#include +#include +#include +#endif + +#ifdef OPENCV_AVAILABLE +#include +#include +#include +#include + +#ifdef OPENCV_CUDA_AVAILABLE +// #include +// #include +#include +#include +#include +#include +#include +#include +#endif +#endif + + +#define CHECK_NPP(status) \ + if (status != NPP_SUCCESS) { \ + ROS_ERROR("NPP failure: '%d' at %s:%d", status, __FILE__, __LINE__); \ + } + +#define CHECK_CUDA(status) \ + if (status != cudaSuccess) { \ + ROS_ERROR("CUDA failure: '%s' at %s:%d", cudaGetErrorName(status), __FILE__, __LINE__); \ + } + +namespace Rectifier { + +static void compute_maps(int width, int height, const double *D, const double *P, + float *map_x, float *map_y) { + ROS_WARN("No support for alpha in non-OpenCV mapping"); + + double fx = P[0]; + double fy = P[5]; + double cx = P[2]; + double cy = P[6]; + + double k1 = D[0]; + double k2 = D[1]; + double p1 = D[2]; + double p2 = D[3]; + double k3 = D[4]; + + for (int v = 0; v < height; v++) { + for (int u = 0; u < width; u++) { + double x = (u - cx) / fx; + double y = (v - cy) / fy; + double r2 = x * x + y * y; + double r4 = r2 * r2; + double r6 = r4 * r2; + double cdist = 1 + k1 * r2 + k2 * r4 + k3 * r6; + double xd = x * cdist; + double yd = y * cdist; + double x2 = xd * xd; + double y2 = yd * yd; + double xy = xd * yd; + double kr = 1 + p1 * r2 + p2 * r4; + map_x[v * width + u] = (float)(fx * (xd * kr + 2 * p1 * xy + p2 * (r2 + 2 * x2)) + cx); + map_y[v * width + u] = (float)(fy * (yd * kr + p1 * (r2 + 2 * y2) + 2 * p2 * xy) + cy); + } + } +} + +#ifdef OPENCV_AVAILABLE +static void compute_maps_opencv(const CameraInfo &info, float *map_x, float *map_y, double alpha = 0.0) { + cv::Mat camera_intrinsics(3, 3, CV_64F); + cv::Mat distortion_coefficients(1, 5, CV_64F); + + for (int row=0; row<3; row++) { + for (int col=0; col<3; col++) { + camera_intrinsics.at(row, col) = info.K[row * 3 + col]; + } + } + + for (int col=0; col<5; col++) { + distortion_coefficients.at(col) = info.D[col]; + } + + cv::Mat new_intrinsics = cv::getOptimalNewCameraMatrix(camera_intrinsics, + distortion_coefficients, + cv::Size(info.width, info.height), + alpha); + + cv::Mat m1(info.height, info.width, CV_32FC1, map_x); + cv::Mat m2(info.height, info.width, CV_32FC1, map_y); + + cv::initUndistortRectifyMap(camera_intrinsics, + distortion_coefficients, + cv::Mat(), + new_intrinsics, + cv::Size(info.width, info.height), + CV_32FC1, + m1, m2); +} +#endif + +#if NPP_AVAILABLE +NPPRectifier::NPPRectifier(int width, int height, + const Npp32f *map_x, const Npp32f *map_y) + : pxl_map_x_(nullptr), pxl_map_y_(nullptr) { + cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking); + + pxl_map_x_ = nppiMalloc_32f_C1(width, height, &pxl_map_x_step_); + if (pxl_map_x_ == nullptr) { + ROS_ERROR("Failed to allocate GPU memory"); + return; + } + pxl_map_y_ = nppiMalloc_32f_C1(width, height, &pxl_map_y_step_); + if (pxl_map_y_ == nullptr) { + ROS_ERROR("Failed to allocate GPU memory"); + return; + } + CHECK_CUDA(cudaMemcpy2D(pxl_map_x_, pxl_map_x_step_, map_x, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice)); + CHECK_CUDA(cudaMemcpy2D(pxl_map_y_, pxl_map_y_step_, map_y, width * sizeof(float), width * sizeof(float), height, cudaMemcpyHostToDevice)); +} + +NPPRectifier::NPPRectifier(const CameraInfo& info, MappingImpl impl, double alpha) { + cudaStreamCreateWithFlags(&stream_, cudaStreamNonBlocking); + + nppSetStream(stream_); + + pxl_map_x_ = nppiMalloc_32f_C1(info.width, info.height, &pxl_map_x_step_); + if (pxl_map_x_ == nullptr) { + ROS_ERROR("Failed to allocate GPU memory"); + return; + } + pxl_map_y_ = nppiMalloc_32f_C1(info.width, info.height, &pxl_map_y_step_); + if (pxl_map_y_ == nullptr) { + ROS_ERROR("Failed to allocate GPU memory"); + return; + } + + std::cout << "Rectifying image with " << info.width << "x" << info.height << " pixels" << std::endl; + + // Create rectification map + // TODO: Verify this works + float *map_x = new float[info.width * info.height]; + float *map_y = new float[info.width * info.height]; + +#ifdef OPENCV_AVAILABLE + if (impl == MappingImpl::OpenCV) + compute_maps_opencv(info, map_x, map_y, alpha); + else +#endif + compute_maps(info.width, info.height, + info.D.data(), info.P.data(), + map_x, map_y); + + std::cout << "Copying rectification map to GPU" << std::endl; + + CHECK_CUDA(cudaMemcpy2D(pxl_map_x_, pxl_map_x_step_, map_x, info.width * sizeof(float), info.width * sizeof(float), info.height, cudaMemcpyHostToDevice)); + CHECK_CUDA(cudaMemcpy2D(pxl_map_y_, pxl_map_y_step_, map_y, info.width * sizeof(float), info.width * sizeof(float), info.height, cudaMemcpyHostToDevice)); + + delete[] map_x; + delete[] map_y; +} + +NPPRectifier::~NPPRectifier() { + if (pxl_map_x_ != nullptr) { + nppiFree(pxl_map_x_); + } + + if (pxl_map_y_ != nullptr) { + nppiFree(pxl_map_y_); + } + + cudaStreamDestroy(stream_); +} + +ImagePtr NPPRectifier::rectify(const Image &msg) { + ImagePtr result = std::make_shared(); + result->header = msg.header; + result->height = msg.height; + result->width = msg.width; + result->encoding = msg.encoding; + result->is_bigendian = msg.is_bigendian; + result->step = msg.step; + + result->data.resize(msg.data.size()); + + NppiRect src_roi = {0, 0, (int)msg.width, (int)msg.height}; + NppiSize src_size = {(int)msg.width, (int)msg.height}; + NppiSize dst_roi_size = {(int)msg.width, (int)msg.height}; + int src_step; + int dst_step; + + Npp8u *src = nppiMalloc_8u_C3(msg.width, msg.height, &src_step); + Npp8u *dst = nppiMalloc_8u_C3(msg.width, msg.height, &dst_step); + + CHECK_CUDA(cudaMemcpy2D(src, src_step, msg.data.data(), msg.step, msg.width * 3, msg.height, cudaMemcpyHostToDevice)); + + NppiInterpolationMode interpolation = NPPI_INTER_LINEAR; + + CHECK_NPP(nppiRemap_8u_C3R( + src, src_size, src_step, src_roi, + pxl_map_x_, pxl_map_x_step_, pxl_map_y_, pxl_map_y_step_, + dst, dst_step, dst_roi_size, interpolation)); + + CHECK_CUDA(cudaMemcpy2D(result->data.data(), result->step, dst, dst_step, msg.width * 3, msg.height, cudaMemcpyDeviceToHost)); + + // cv::Mat image(msg.height, msg.width, CV_8UC3, result->data.data(), result->step); + // cv::cvtColor(image, image, cv::COLOR_RGB2BGR); + // // save with timestamp + // std::string filename = "rectified_" + std::to_string(msg.header.stamp.sec) + "_" + std::to_string(msg.header.stamp.nanosec) + ".png"; + // imwrite(filename, image); + + nppiFree(src); + nppiFree(dst); + + return result; +} +#endif + +#ifdef OPENCV_AVAILABLE +OpenCVRectifierCPU::OpenCVRectifierCPU(const CameraInfo &info, MappingImpl impl, double alpha) { + map_x_ = cv::Mat(info.height, info.width, CV_32FC1); + map_y_ = cv::Mat(info.height, info.width, CV_32FC1); + + if (impl == MappingImpl::OpenCV) + compute_maps_opencv(info, map_x_.ptr(), map_y_.ptr(), alpha); + else + compute_maps(info.width, info.height, + info.D.data(), info.P.data(), + map_x_.ptr(), map_y_.ptr()); +} + +OpenCVRectifierCPU::~OpenCVRectifierCPU() {} + +ImagePtr OpenCVRectifierCPU::rectify(const Image &msg) { + ImagePtr result = std::make_shared(); + result->header = msg.header; + result->height = msg.height; + result->width = msg.width; + result->encoding = msg.encoding; + result->is_bigendian = msg.is_bigendian; + result->step = msg.step; + + result->data.resize(msg.data.size()); + + cv::Mat src(msg.height, msg.width, CV_8UC3, (void *)msg.data.data()); + cv::Mat dst(msg.height, msg.width, CV_8UC3, (void *)result->data.data()); + + cv::remap(src, dst, map_x_, map_y_, cv::INTER_LINEAR); + + return result; +} +#endif + +#ifdef OPENCV_CUDA_AVAILABLE +OpenCVRectifierGPU::OpenCVRectifierGPU(const CameraInfo &info, MappingImpl impl, double alpha) { + cv::Mat map_x(info.height, info.width, CV_32FC1); + cv::Mat map_y(info.height, info.width, CV_32FC1); + + if (impl == MappingImpl::OpenCV) + compute_maps_opencv(info, map_x.ptr(), map_y.ptr(), alpha); + else + compute_maps(info.width, info.height, + info.D.data(), info.P.data(), + map_x.ptr(), map_y.ptr()); + + map_x_ = cv::cuda::GpuMat(map_x); + map_y_ = cv::cuda::GpuMat(map_y); +} + +OpenCVRectifierGPU::~OpenCVRectifierGPU() {} + +ImagePtr OpenCVRectifierGPU::rectify(const Image &msg) { + ImagePtr result = std::make_shared(); + result->header = msg.header; + result->height = msg.height; + result->width = msg.width; + result->encoding = msg.encoding; + result->is_bigendian = msg.is_bigendian; + result->step = msg.step; + + result->data.resize(msg.data.size()); + + cv::Mat src(msg.height, msg.width, CV_8UC3, (void *)msg.data.data()); + cv::cuda::GpuMat d_src = cv::cuda::GpuMat(src); + cv::cuda::GpuMat d_dst = cv::cuda::GpuMat(cv::Size(msg.width, msg.height), src.type()); + + cv::cuda::remap(d_src, d_dst, map_x_, map_y_, cv::INTER_LINEAR, cv::BORDER_CONSTANT); + + // copy back to result + cv::Mat dst(msg.height, msg.width, CV_8UC3, (void *)result->data.data()); + d_dst.download(dst); + + // cv::Mat image(msg.height, msg.width, CV_8UC3, result->data.data(), result->step); + // cv::cvtColor(image, image, cv::COLOR_RGB2BGR); + // // save with timestamp + // std::string filename = "rectified_" + std::to_string(msg.header.stamp.sec) + "_" + std::to_string(msg.header.stamp.nanosec) + ".png"; + // imwrite(filename, image); + + return result; +} +#endif + +} // namespace Rectifier \ No newline at end of file diff --git a/gpu_imgproc/src/gpu_imgproc.cpp b/gpu_imgproc/src/gpu_imgproc.cpp new file mode 100644 index 0000000..333bbd7 --- /dev/null +++ b/gpu_imgproc/src/gpu_imgproc.cpp @@ -0,0 +1,228 @@ +#include "gpu_imgproc/gpu_imgproc.hpp" + +#include + +namespace gpu_imgproc { + +GpuImgProc::GpuImgProc(ros::NodeHandle &nh, ros::NodeHandle &pnh) + : node_(nh), private_node_(pnh), rectifier_active_(false) { + ROS_INFO("Initializing node gpu_imgproc"); + + // std::string image_raw_topic = this->declare_parameter("image_raw_topic", "/camera/image_raw"); + // std::string camera_info_topic = this->declare_parameter("camera_info_topic", "/camera/camera_info"); + // std::string image_rect_topic = this->declare_parameter("image_rect_topic", "/camera/image_rect"); + std::string rect_impl; + bool use_opencv_map_init; + + private_node_.getParam("rect_impl", rect_impl); + private_node_.getParam("use_opencv_map_init", use_opencv_map_init); + private_node_.getParam("alpha", alpha_); + + // RCLCPP_INFO(this->get_logger(), "Subscribing to %s", image_raw_topic.c_str()); + // RCLCPP_INFO(this->get_logger(), "Subscribing to %s", camera_info_topic.c_str()); + // RCLCPP_INFO(this->get_logger(), "Publishing to %s", image_rect_topic.c_str()); + + std::string available_impls = ""; +#ifdef NPP_AVAILABLE + available_impls += "npp"; +#endif +#ifdef OPENCV_AVAILABLE + if (available_impls != "") { + available_impls += ", "; + } + available_impls += "opencv_cpu"; +#endif +#ifdef OPENCV_CUDA_AVAILABLE + if (available_impls != "") { + available_impls += ", "; + } + available_impls += "opencv_gpu"; +#endif + + if (available_impls == "") { + ROS_ERROR( + "No rectification implementations available. Please make sure that at least one of the following libraries is installed:\n" + "- OpenCV\n" + "- OpenCV CUDA\n" + "- NVIDIA Performance Primitives\n"); + return; + } + + if (0) { +#ifdef NPP_AVAILABLE + } else if (rect_impl == "npp") { + ROS_INFO("Using NPP implementation for rectification"); + rectifier_impl_ = Rectifier::Implementation::NPP; +#endif +#ifdef OPENCV_AVAILABLE + } else if (rect_impl == "opencv_cpu") { + ROS_INFO("Using OpenCV CPU implementation for rectification"); + rectifier_impl_ = Rectifier::Implementation::OpenCV_CPU; +#endif +#ifdef OPENCV_CUDA_AVAILABLE + } else if (rect_impl == "opencv_gpu") { + ROS_INFO("Using GPU OpenCV implementation for rectification"); + rectifier_impl_ = Rectifier::Implementation::OpenCV_GPU; +#endif + } else { + ROS_INFO("Available implementations: %s", available_impls.c_str()); + return; + } + + if (use_opencv_map_init) { + ROS_INFO("Using OpenCV map initialization"); + mapping_impl_ = Rectifier::MappingImpl::OpenCV; + } else { + ROS_INFO("Using Non-OpenCV map initialization"); + mapping_impl_ = Rectifier::MappingImpl::NPP; + } + +#ifdef JETSON_AVAILABLE + raw_compressor_ = std::make_shared("raw_compressor"); + rect_compressor_ = std::make_shared("rect_compressor"); +#elif NVJPEG_AVAILABLE + raw_compressor_ = std::make_shared(); + rect_compressor_ = std::make_shared(); +#elif TURBOJPEG_AVAILABLE + raw_compressor_ = std::make_shared(); + rect_compressor_ = std::make_shared(); +#else + ROS_ERROR("No JPEG compressor available"); + return; +#endif + + rectified_pub_ = node_.advertise("image_rect", 10); + compressed_pub_ = node_.advertise("image_raw/compressed", 10); + rect_compressed_pub_ = node_.advertise("image_rect/compressed", 10); + + img_sub_ = node_.subscribe("image_raw", 10, &GpuImgProc::imageCallback, this); + + info_sub_ = node_.subscribe( + "camera_info", 10, &GpuImgProc::cameraInfoCallback, this + ); +} + +GpuImgProc::~GpuImgProc() { + ROS_INFO("Shutting down node gpu_imgproc"); +} + +void GpuImgProc::imageCallback(const Image::ConstPtr &msg) { + // RCLCPP_INFO(this->get_logger(), "Received image"); + + std::future rectified_msg; + if (rectifier_active_) { + // std::cout << "Rectifying image" << std::endl; + rectified_msg = + std::async(std::launch::async, [this, msg]() { + ImagePtr rect_img; + CompressedImagePtr rect_comp_img; + if (false) { +#ifdef NPP_AVAILABLE + } else if (rectifier_impl_ == Rectifier::Implementation::NPP) { + rect_img = npp_rectifier_->rectify(*msg); + rect_comp_img = rect_compressor_->compress(*rect_img, 60); +#endif +#ifdef OPENCV_AVAILABLE + } else if (rectifier_impl_ == Rectifier::Implementation::OpenCV_CPU) { + rect_img = cv_cpu_rectifier_->rectify(*msg); + rect_comp_img = rect_compressor_->compress(*rect_img, 60); +#endif +#ifdef OPENCV_CUDA_AVAILABLE + } else if (rectifier_impl_ == Rectifier::Implementation::OpenCV_GPU) { + rect_img = cv_gpu_rectifier_->rectify(*msg); + rect_comp_img = rect_compressor_->compress(*rect_img, 60); +#endif + } else { + ROS_ERROR("Invalid implementation"); + return; + } + + // TODO: Do not copy on publish + rectified_pub_.publish(*rect_img); + rect_compressed_pub_.publish(*rect_comp_img); + }); + } else { + std::cout << "Not rectifying image" << std::endl; + } + + std::future compressed_msg = + std::async(std::launch::async, [this, msg]() { + compressed_pub_.publish(*raw_compressor_->compress(*msg, 60)); + }); + + if (rectifier_active_) { + rectified_msg.wait(); + } + compressed_msg.wait(); +} + +void GpuImgProc::cameraInfoCallback(const CameraInfo::ConstPtr &msg) { + ROS_INFO("Received camera info"); + + if (msg->D.size() == 0 || msg->P.size() == 0) { + ROS_ERROR("Camera info message does not contain distortion or projection matrix"); + return; + } + + switch(rectifier_impl_) { + case Rectifier::Implementation::NPP: +#if NPP_AVAILABLE + ROS_INFO("Initializing NPP rectifier"); + npp_rectifier_ = std::make_shared(*msg, mapping_impl_, alpha_); + if (npp_rectifier_) { + ROS_INFO("Initialized NPP rectifier"); + rectifier_active_ = true; + } else { + ROS_ERROR("Failed to initialize NPP rectifier"); + return; + } + break; +#else + ROS_ERROR("NPP not enabled"); + return; +#endif + case Rectifier::Implementation::OpenCV_CPU: +#ifdef OPENCV_AVAILABLE + ROS_INFO("Initializing OpenCV CPU rectifier"); + cv_cpu_rectifier_ = std::make_shared(*msg, mapping_impl_, alpha_); + if (cv_cpu_rectifier_) { + ROS_INFO("Initialized OpenCV CPU rectifier"); + rectifier_active_ = true; + } else { + ROS_ERROR("Failed to initialize OpenCV rectifier"); + return; + } + break; +#else + ROS_ERROR("OpenCV not enabled"); + return; +#endif + case Rectifier::Implementation::OpenCV_GPU: +#ifdef OPENCV_CUDA_AVAILABLE + ROS_INFO("Initializing OpenCV GPU rectifier"); + cv_gpu_rectifier_ = std::make_shared(*msg, mapping_impl_, alpha_); + if (cv_gpu_rectifier_) { + ROS_INFO("Initialized OpenCV GPU rectifier"); + rectifier_active_ = true; + } else { + ROS_ERROR("Failed to initialize OpenCV rectifier"); + return; + } + break; +#else + ROS_ERROR("OpenCV CUDA not enabled"); + return; +#endif + default: + ROS_ERROR("Invalid rectifier implementation"); + return; + } + + if (rectifier_active_) { + // unsubscribe + // info_sub_.reset(); + // TODO: ROS1 equivalent? + info_sub_.shutdown(); + } +} +} // namespace gpu_imgproc \ No newline at end of file diff --git a/gpu_imgproc/src/gpu_imgproc_node.cpp b/gpu_imgproc/src/gpu_imgproc_node.cpp new file mode 100644 index 0000000..6b90429 --- /dev/null +++ b/gpu_imgproc/src/gpu_imgproc_node.cpp @@ -0,0 +1,17 @@ +#include +#include "gpu_imgproc/gpu_imgproc.hpp" + + +int main(int argc, char ** argv) +{ + ros::init(argc, argv, "gpu_imgproc"); + + ros::NodeHandle nh; + ros::NodeHandle nh_priv("~"); + + gpu_imgproc::GpuImgProc n(nh, nh_priv); + + ros::spin(); + + return 0; +} \ No newline at end of file diff --git a/package.xml b/package.xml deleted file mode 100644 index fee9e38..0000000 --- a/package.xml +++ /dev/null @@ -1,30 +0,0 @@ - - - - v4l2_camera - 0.5.0 - A ROS 2 camera driver using Video4Linux2 - Sander G. van Dijk - Apache License 2.0 - - Sander G. van Dijk - - ament_cmake_ros - - rclcpp - rclcpp_components - - rclcpp - rclcpp_components - - sensor_msgs - image_transport - camera_info_manager - - ament_lint_auto - ament_lint_common - - - ament_cmake - - diff --git a/src/v4l2_camera_compose_test.cpp b/src/v4l2_camera_compose_test.cpp deleted file mode 100644 index 5860096..0000000 --- a/src/v4l2_camera_compose_test.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2019 Bold Hearts -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "v4l2_camera/v4l2_camera.hpp" - -#include - -class ComposeTest : public rclcpp::Node -{ -public: - explicit ComposeTest(rclcpp::NodeOptions const & options) - : rclcpp::Node{"compose_test", options} - { - img_sub_ = - create_subscription( - "/image_raw", - 10, - [this](sensor_msgs::msg::Image::UniquePtr img) { - std::stringstream ss; - ss << "Image message address [RECEIVE]:\t" << img.get(); - RCLCPP_DEBUG(get_logger(), "%s", ss.str().c_str()); - }); - } - -private: - rclcpp::Subscription::SharedPtr img_sub_; -}; - -int main(int argc, char ** argv) -{ - rclcpp::init(argc, argv); - - rclcpp::executors::MultiThreadedExecutor exec{}; - - auto intra_comms_options = rclcpp::NodeOptions{}.use_intra_process_comms(true); - auto camera_node = std::make_shared(intra_comms_options); - auto test_node = std::make_shared(intra_comms_options); - - exec.add_node(camera_node); - exec.add_node(test_node); - - exec.spin(); - - rclcpp::shutdown(); - camera_node = nullptr; - test_node = nullptr; - - return 0; -} diff --git a/CHANGELOG.rst b/v4l2_camera/CHANGELOG.rst similarity index 100% rename from CHANGELOG.rst rename to v4l2_camera/CHANGELOG.rst diff --git a/CMakeLists.txt b/v4l2_camera/CMakeLists.txt similarity index 57% rename from CMakeLists.txt rename to v4l2_camera/CMakeLists.txt index bc23bd0..ccf2fbd 100644 --- a/CMakeLists.txt +++ b/v4l2_camera/CMakeLists.txt @@ -11,13 +11,44 @@ if(NOT CMAKE_CXX_STANDARD) set(CMAKE_CXX_STANDARD 14) endif() -if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") - add_compile_options(-Wall -Wextra -Wpedantic) -endif() +# if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") +# add_compile_options(-Wall -Wextra -Wpedantic) +# endif() # find dependencies -find_package(ament_cmake_auto REQUIRED) -ament_auto_find_build_dependencies() +find_package(catkin REQUIRED COMPONENTS + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs +) + +catkin_package( + INCLUDE_DIRS + include + CATKIN_DEPENDS + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs + LIBRARIES + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs +) + +include_directories( + include + ${catkin_INCLUDE_DIRS} +) + find_package(CUDA) if (CUDA_FOUND) @@ -25,6 +56,7 @@ if (CUDA_FOUND) cuda_add_library(v4l2_camera src/v4l2_camera.cpp src/v4l2_camera_device.cpp + src/v4l2_camera_nodelet.cpp ) target_compile_definitions(v4l2_camera PUBLIC ENABLE_CUDA @@ -39,20 +71,15 @@ if (CUDA_FOUND) else() add_library(v4l2_camera src/v4l2_camera.cpp - src/v4l2_camera_device.cpp) + src/v4l2_camera_device.cpp + src/v4l2_camera_nodelet.cpp + ) endif() -rclcpp_components_register_nodes(v4l2_camera "v4l2_camera::V4L2Camera") target_include_directories(v4l2_camera PUBLIC $ $) -ament_target_dependencies(v4l2_camera - "rclcpp" - "rclcpp_components" - "image_transport" - "camera_info_manager") - target_compile_options(v4l2_camera PRIVATE -Werror) # Causes the visibility macros to use dllexport rather than dllimport, @@ -75,40 +102,23 @@ add_executable(v4l2_camera_node src/v4l2_camera_node.cpp) target_include_directories(v4l2_camera_node PUBLIC $ $) -target_link_libraries(v4l2_camera_node v4l2_camera) +target_link_libraries(v4l2_camera_node + v4l2_camera + ${catkin_LIBRARIES} + ) target_compile_options(v4l2_camera_node PRIVATE -Werror) -add_executable(v4l2_camera_compose_test src/v4l2_camera_compose_test.cpp) -target_include_directories(v4l2_camera_compose_test PUBLIC - $ - $) -target_link_libraries(v4l2_camera_compose_test v4l2_camera) -target_compile_options(v4l2_camera_compose_test PRIVATE -Werror) +# add_executable(v4l2_camera_compose_test src/v4l2_camera_compose_test.cpp) +# target_include_directories(v4l2_camera_compose_test PUBLIC +# $ +# $) +# target_link_libraries(v4l2_camera_compose_test v4l2_camera) +# target_compile_options(v4l2_camera_compose_test PRIVATE -Werror) -install(TARGETS v4l2_camera_node v4l2_camera_compose_test +# install(TARGETS v4l2_camera_node v4l2_camera_compose_test +# EXPORT export_${PROJECT_NAME} +# DESTINATION lib/${PROJECT_NAME}) + +install(TARGETS v4l2_camera_node EXPORT export_${PROJECT_NAME} DESTINATION lib/${PROJECT_NAME}) - -if(BUILD_TESTING) - find_package(ament_lint_auto REQUIRED) - set(ament_cmake_copyright_FOUND TRUE) - ament_lint_auto_find_test_dependencies() -endif() - -ament_export_include_directories( - include -) -ament_export_targets( - export_${PROJECT_NAME} -) -ament_export_libraries( - v4l2_camera -) - -ament_export_dependencies( - "camera_info_manager" -) - -ament_auto_package(INSTALL_TO_SHARE - launch -) diff --git a/LICENSE b/v4l2_camera/LICENSE similarity index 100% rename from LICENSE rename to v4l2_camera/LICENSE diff --git a/README.md b/v4l2_camera/README.md similarity index 100% rename from README.md rename to v4l2_camera/README.md diff --git a/include/v4l2_camera/control.hpp b/v4l2_camera/include/v4l2_camera/control.hpp similarity index 100% rename from include/v4l2_camera/control.hpp rename to v4l2_camera/include/v4l2_camera/control.hpp diff --git a/include/v4l2_camera/fourcc.hpp b/v4l2_camera/include/v4l2_camera/fourcc.hpp similarity index 100% rename from include/v4l2_camera/fourcc.hpp rename to v4l2_camera/include/v4l2_camera/fourcc.hpp diff --git a/include/v4l2_camera/image_format.hpp b/v4l2_camera/include/v4l2_camera/image_format.hpp similarity index 100% rename from include/v4l2_camera/image_format.hpp rename to v4l2_camera/include/v4l2_camera/image_format.hpp diff --git a/include/v4l2_camera/pixel_format.hpp b/v4l2_camera/include/v4l2_camera/pixel_format.hpp similarity index 100% rename from include/v4l2_camera/pixel_format.hpp rename to v4l2_camera/include/v4l2_camera/pixel_format.hpp diff --git a/include/v4l2_camera/v4l2_camera.hpp b/v4l2_camera/include/v4l2_camera/v4l2_camera.hpp similarity index 67% rename from include/v4l2_camera/v4l2_camera.hpp rename to v4l2_camera/include/v4l2_camera/v4l2_camera.hpp index a1855e7..84fe8a5 100644 --- a/include/v4l2_camera/v4l2_camera.hpp +++ b/v4l2_camera/include/v4l2_camera/v4l2_camera.hpp @@ -17,18 +17,19 @@ #include "v4l2_camera/v4l2_camera.hpp" #include "v4l2_camera/v4l2_camera_device.hpp" +#include "v4l2_camera/yuv422_yuy2_image_encodings.h" -#include -#include +#include +#include #include -#include -#include +#include #include #include #include #include +#include #include "v4l2_camera/visibility_control.h" @@ -70,36 +71,33 @@ struct GPUMemoryManager } } }; - - -void cudaErrorCheck(const cudaError_t e) -{ - if (e != cudaSuccess) { - std::stringstream ss; - ss << cudaGetErrorName(e) << " : " << cudaGetErrorString(e); - throw std::runtime_error{ss.str()}; - } -} #endif -class V4L2Camera : public rclcpp::Node +class V4L2Camera { public: - explicit V4L2Camera(rclcpp::NodeOptions const & options); - + explicit V4L2Camera(ros::NodeHandle node, ros::NodeHandle private_nh); virtual ~V4L2Camera(); private: + ros::NodeHandle node; + ros::NodeHandle private_nh; + double publish_rate_; + std::string device; + bool use_v4l2_buffer_timestamps; + int timestamp_offset; + using ImageSize = std::vector; using TimePerFrame = std::vector; - + std::shared_ptr camera_; // Publisher used for intra process comm - rclcpp::Publisher::SharedPtr image_pub_; - rclcpp::Publisher::SharedPtr info_pub_; + ros::Publisher image_pub_; + ros::Publisher info_pub_; // Publisher used for inter process comm + image_transport::ImageTransport image_transport_; image_transport::CameraPublisher camera_transport_pub_; std::shared_ptr cinfo_; @@ -107,17 +105,24 @@ class V4L2Camera : public rclcpp::Node std::thread capture_thread_; std::atomic canceled_; - std::string camera_frame_id_; std::string output_encoding_; + std::string camera_info_url_; + std::string camera_frame_id_; + std::string pixel_format_; + + int image_size_width; + int image_size_height; + + int time_per_frame_first; + int time_per_frame_second; std::map control_name_to_id_; - rclcpp::node_interfaces::OnSetParametersCallbackHandle::SharedPtr on_set_parameters_callback_; - double publish_rate_; - rclcpp::TimerBase::SharedPtr image_pub_timer_; + ros::Timer image_pub_timer_; bool publish_next_frame_; + bool use_image_transport_; #ifdef ENABLE_CUDA // Memory region to communicate with GPU @@ -127,23 +132,24 @@ class V4L2Camera : public rclcpp::Node #endif void createParameters(); - bool handleParameter(rclcpp::Parameter const & param); - bool requestPixelFormat(std::string const & fourcc); - bool requestImageSize(ImageSize const & size); + bool requestImageSize(std::vector const & size); + bool requestTimePerFrame(TimePerFrame const & tpf); - bool requestTimePerFrame(TimePerFrame const & timePerFrame); - sensor_msgs::msg::Image::UniquePtr convert(sensor_msgs::msg::Image const & img) const; -#ifdef ENABLE_CUDA - sensor_msgs::msg::Image::UniquePtr convertOnGpu(sensor_msgs::msg::Image const & img); -#endif + void publishTimer(); + bool checkCameraInfo( - sensor_msgs::msg::Image const & img, - sensor_msgs::msg::CameraInfo const & ci); + sensor_msgs::Image const & img, + sensor_msgs::CameraInfo const & ci); +sensor_msgs::ImagePtr convert(sensor_msgs::Image& img); +#ifdef ENABLE_CUDA +sensor_msgs::ImagePtr convertOnGpu(sensor_msgs::Image& img); +#endif }; + } // namespace v4l2_camera -#endif // V4L2_CAMERA__V4L2_CAMERA_HPP_ +#endif // V4L2_CAMERA__V4L2_CAMERA_HPP_ \ No newline at end of file diff --git a/include/v4l2_camera/v4l2_camera_device.hpp b/v4l2_camera/include/v4l2_camera/v4l2_camera_device.hpp similarity index 94% rename from include/v4l2_camera/v4l2_camera_device.hpp rename to v4l2_camera/include/v4l2_camera/v4l2_camera_device.hpp index 359e226..626d58a 100644 --- a/include/v4l2_camera/v4l2_camera_device.hpp +++ b/v4l2_camera/include/v4l2_camera/v4l2_camera_device.hpp @@ -15,8 +15,8 @@ #ifndef V4L2_CAMERA__V4L2_CAMERA_DEVICE_HPP_ #define V4L2_CAMERA__V4L2_CAMERA_DEVICE_HPP_ -#include -#include +#include +#include #include #include @@ -27,6 +27,8 @@ #include "v4l2_camera/control.hpp" #include "v4l2_camera/image_format.hpp" #include "v4l2_camera/pixel_format.hpp" +#include "v4l2_camera/yuv422_yuy2_image_encodings.h" + namespace v4l2_camera { @@ -36,7 +38,7 @@ namespace v4l2_camera class V4l2CameraDevice { public: - explicit V4l2CameraDevice(std::string device, bool use_v4l2_buffer_timestamps, rclcpp::Duration timestamp_offset_duration); + explicit V4l2CameraDevice(std::string device, bool use_v4l2_buffer_timestamps, ros::Duration timestamp_offset_duration); bool open(); bool start(); @@ -86,7 +88,7 @@ class V4l2CameraDevice void setTSCOffset(); - sensor_msgs::msg::Image::UniquePtr capture(); + sensor_msgs::ImagePtr capture(); private: /// Image buffer @@ -100,7 +102,7 @@ class V4l2CameraDevice std::string device_; int fd_; bool use_v4l2_buffer_timestamps_; - rclcpp::Duration timestamp_offset_; + ros::Duration timestamp_offset_; uint64_t tsc_offset_; v4l2_capability capabilities_; diff --git a/include/v4l2_camera/visibility_control.h b/v4l2_camera/include/v4l2_camera/visibility_control.h similarity index 100% rename from include/v4l2_camera/visibility_control.h rename to v4l2_camera/include/v4l2_camera/visibility_control.h diff --git a/v4l2_camera/include/v4l2_camera/yuv422_yuy2_image_encodings.h b/v4l2_camera/include/v4l2_camera/yuv422_yuy2_image_encodings.h new file mode 100644 index 0000000..53437a9 --- /dev/null +++ b/v4l2_camera/include/v4l2_camera/yuv422_yuy2_image_encodings.h @@ -0,0 +1,30 @@ +// Copyright 2019 Bold Hearts +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef V4L2_CAMERA__YUV422_YUY2_IMAGE_ENCODINGS_HPP_ +#define V4L2_CAMERA__YUV422_YUY2_IMAGE_ENCODINGS_HPP_ + +#include +#include +#include + +namespace sensor_msgs +{ + namespace image_encodings + { + const std::string YUV422_YUY2="yuv422_yuy2"; + } // namespace image_encodings +} // namespace sensor_msgs + +#endif // V4L2_CAMERA__V4L2_CAMERA_HPP_ \ No newline at end of file diff --git a/v4l2_camera/launch/camera_intrinsics.yaml b/v4l2_camera/launch/camera_intrinsics.yaml new file mode 100644 index 0000000..c9a6b5d --- /dev/null +++ b/v4l2_camera/launch/camera_intrinsics.yaml @@ -0,0 +1,20 @@ +image_width: 1920 +image_height: 1280 +camera_name: camera +camera_matrix: + rows: 3 + cols: 3 + data: [297.189841, 0.000000, 363.614683, 0.000000, 309.466647, 268.003944, 0.000000, 0.000000, 1.000000] +distortion_model: plumb_bob +distortion_coefficients: + rows: 1 + cols: 5 + data: [-0.130284, 0.011396, -0.000738, -0.000092, 0.000000] +rectification_matrix: + rows: 3 + cols: 3 + data: [1.000000, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000, 0.000000, 0.000000, 1.000000] +projection_matrix: + rows: 3 + cols: 4 + data: [221.130844, 0.000000, 373.670860, 0.000000, 0.000000, 265.967377, 259.232878, 0.000000, 0.000000, 0.000000, 1.000000, 0.000000] diff --git a/launch/v4l2_camera.launch.py b/v4l2_camera/launch/v4l2_camera.launch.py similarity index 100% rename from launch/v4l2_camera.launch.py rename to v4l2_camera/launch/v4l2_camera.launch.py diff --git a/v4l2_camera/launch/v4l2_camera.launch.xml b/v4l2_camera/launch/v4l2_camera.launch.xml new file mode 100644 index 0000000..8663911 --- /dev/null +++ b/v4l2_camera/launch/v4l2_camera.launch.xml @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/v4l2_camera/package.xml b/v4l2_camera/package.xml new file mode 100644 index 0000000..01dc8fe --- /dev/null +++ b/v4l2_camera/package.xml @@ -0,0 +1,37 @@ + + + + v4l2_camera + 0.5.0 + A ROS 1 camera driver using Video4Linux2 + Kosuke TOKUDA + Apache License 2.0 + + Kosuke Tokuda + + catkin + + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs + + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs + + camera_info_manager + image_transport + nodelet + pluginlib + roscpp + sensor_msgs + + + + diff --git a/src/v4l2_camera.cpp b/v4l2_camera/src/v4l2_camera.cpp similarity index 59% rename from src/v4l2_camera.cpp rename to v4l2_camera/src/v4l2_camera.cpp index 190c8b9..d8f0798 100644 --- a/src/v4l2_camera.cpp +++ b/v4l2_camera/src/v4l2_camera.cpp @@ -14,8 +14,7 @@ #include "v4l2_camera/v4l2_camera.hpp" -#include -#include +#include #include #include @@ -27,7 +26,6 @@ #include "v4l2_camera/fourcc.hpp" -#include "rclcpp_components/register_node_macro.hpp" #include "v4l2_camera/v4l2_camera_device.hpp" #ifdef ENABLE_CUDA @@ -39,73 +37,66 @@ using namespace std::chrono_literals; namespace v4l2_camera { - -V4L2Camera::V4L2Camera(rclcpp::NodeOptions const & options) -: rclcpp::Node{"v4l2_camera", options}, - canceled_{false} +V4L2Camera::V4L2Camera(ros::NodeHandle node, ros::NodeHandle private_nh) + : image_transport_(private_nh), + node(node), + private_nh(private_nh), + canceled_{false} { - // Prepare publisher - // This should happen before registering on_set_parameters_callback, - // else transport plugins will fail to declare their parameters - bool use_sensor_data_qos = declare_parameter("use_sensor_data_qos", false); - publish_rate_ = declare_parameter("publish_rate", -1.0); + private_nh.getParam("publish_rate", publish_rate_); + private_nh.getParam("video_device", device); + private_nh.getParam("use_v4l2_buffer_timestamps", use_v4l2_buffer_timestamps); + private_nh.getParam("timestamp_offset", timestamp_offset); + private_nh.getParam("use_image_transport", use_image_transport_); + if(std::abs(publish_rate_) < std::numeric_limits::epsilon()){ - RCLCPP_WARN(get_logger(), "Invalid publish_rate = 0. Use default value -1 instead"); + ROS_WARN("Invalid publish_rate = 0. Use default value -1 instead"); publish_rate_ = -1.0; } - if(publish_rate_ > 0){ - const auto publish_period = rclcpp::Rate(publish_rate_).period(); - image_pub_timer_ = this->create_wall_timer(publish_period, [this](){this->publish_next_frame_=true;}); - publish_next_frame_ = false; - } - else{ - publish_next_frame_ = true; + // if(publish_rate_ > 0){ + // const auto publish_period = ros::Duration(publish_rate_); + // image_pub_timer_ = node.createTimer(publish_period, &V4L2Camera::publishTimer, this); + // publish_next_frame_ = false; + // } + // else{ + // publish_next_frame_ = true; + // } + if (use_image_transport_) { + camera_transport_pub_ = image_transport_.advertiseCamera("image_raw", 10); + } else { + image_pub_ = node.advertise("image_raw", 10); + info_pub_ = node.advertise("camera_info", 10); } - const auto qos = use_sensor_data_qos ? rclcpp::SensorDataQoS() : rclcpp::QoS(10); - camera_transport_pub_ = image_transport::create_camera_publisher(this, "image_raw", - qos.get_rmw_qos_profile()); - // Prepare camera - auto device_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - device_descriptor.description = "Path to video device"; - device_descriptor.read_only = true; - auto device = declare_parameter("video_device", "/dev/video0", device_descriptor); - - auto use_v4l2_buffer_timestamps_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - use_v4l2_buffer_timestamps_descriptor.description = "Use v4l2 buffer timestamps"; - auto use_v4l2_buffer_timestamps = declare_parameter("use_v4l2_buffer_timestamps", true, use_v4l2_buffer_timestamps_descriptor); - - auto timestamp_offset_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - timestamp_offset_descriptor.description = "Timestamp offset in nanoseconds"; - auto timestamp_offset = declare_parameter("timestamp_offset", 0, timestamp_offset_descriptor); - rclcpp::Duration timestamp_offset_duration = rclcpp::Duration::from_nanoseconds(timestamp_offset); + ros::Duration timestamp_offset_duration(0, timestamp_offset); camera_ = std::make_shared(device, use_v4l2_buffer_timestamps, timestamp_offset_duration); - + if (!camera_->open()) { return; } - cinfo_ = std::make_shared(this, camera_->getCameraName()); + auto camera_info_url_ = ""; + cinfo_ = std::make_shared(private_nh, camera_->getCameraName(), camera_info_url_); #ifdef ENABLE_CUDA src_dev_ = std::allocate_shared(allocator_); dst_dev_ = std::allocate_shared(allocator_); #endif - // Read parameters and set up callback createParameters(); // Start the camera if (!camera_->start()) { - return; + exit(1); } // Start capture thread capture_thread_ = std::thread{ [this]() -> void { - while (rclcpp::ok() && !canceled_.load()) { - RCLCPP_DEBUG(get_logger(), "Capture..."); + while (ros::ok() && !canceled_.load()) { + ROS_DEBUG("Capture..."); auto img = camera_->capture(); + if (img == nullptr) { // Failed capturing image, assume it is temporarily and continue a bit later std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -126,9 +117,9 @@ V4L2Camera::V4L2Camera(rclcpp::NodeOptions const & options) img->header.stamp = stamp; img->header.frame_id = camera_frame_id_; - auto ci = std::make_unique(cinfo_->getCameraInfo()); + auto ci = std::make_unique(cinfo_->getCameraInfo()); if (!checkCameraInfo(*img, *ci)) { - *ci = sensor_msgs::msg::CameraInfo{}; + *ci = sensor_msgs::CameraInfo{}; ci->height = img->height; ci->width = img->width; } @@ -137,54 +128,45 @@ V4L2Camera::V4L2Camera(rclcpp::NodeOptions const & options) ci->header.frame_id = camera_frame_id_; publish_next_frame_ = publish_rate_ < 0; - camera_transport_pub_.publish(*img, *ci); + if (use_image_transport_) { + camera_transport_pub_.publish(*img, *ci); + } else { + image_pub_.publish(*img); + info_pub_.publish(*ci); + } } } }; } -V4L2Camera::~V4L2Camera() -{ - canceled_.store(true); - if (capture_thread_.joinable()) { - capture_thread_.join(); - } -} - void V4L2Camera::createParameters() { // Node parameters - auto output_encoding_description = rcl_interfaces::msg::ParameterDescriptor{}; - output_encoding_description.description = "ROS image encoding to use for the output image"; - output_encoding_description.additional_constraints = - "Currently supported: 'rgb8', 'yuv422' or 'mono'"; - output_encoding_ = declare_parameter( - "output_encoding", std::string{"rgb8"}, - output_encoding_description); + ROS_INFO("Currently supported: 'rgb8', 'yuv422' or 'mono'"); + private_nh.param("output_encoding", + output_encoding_, + std::string{"rgb8"}); // Camera info parameters - auto camera_info_url = declare_parameter("camera_info_url", ""); - if (get_parameter("camera_info_url", camera_info_url)) { - if (cinfo_->validateURL(camera_info_url)) { - cinfo_->loadCameraInfo(camera_info_url); + private_nh.param("camera_info_url", + camera_info_url_, + ""); + + + if (private_nh.getParam("camera_info_url", camera_info_url_)) { + if (cinfo_->validateURL(camera_info_url_)) { + cinfo_->loadCameraInfo(camera_info_url_); } else { - RCLCPP_WARN(get_logger(), "Invalid camera info URL: %s", camera_info_url.c_str()); + ROS_WARN("Invalid camera info URL: %s", camera_info_url_.c_str()); } } - - auto camera_frame_id_description = rcl_interfaces::msg::ParameterDescriptor{}; - camera_frame_id_description.description = "Frame id inserted in published image"; - camera_frame_id_description.read_only = true; - camera_frame_id_ = declare_parameter( - "camera_frame_id", "camera", - camera_frame_id_description); - + ROS_INFO("Frame id inserted in published image"); + private_nh.param("camera_frame_id", + camera_frame_id_, + "camera"); // Format parameters // Pixel format auto const & image_formats = camera_->getImageFormats(); - auto pixel_format_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - pixel_format_descriptor.name = "pixel_format"; - pixel_format_descriptor.description = "Pixel format (FourCC)"; auto pixel_format_constraints = std::ostringstream{}; for (auto const & format : image_formats) { pixel_format_constraints << @@ -193,16 +175,17 @@ void V4L2Camera::createParameters() } auto str = pixel_format_constraints.str(); str = str.substr(0, str.size() - 2); - pixel_format_descriptor.additional_constraints = str; - auto pixel_format = - declare_parameter("pixel_format", "YUYV", pixel_format_descriptor); - requestPixelFormat(pixel_format); + // pixel_format_descriptor.additional_constraints = str; + + ROS_INFO("Pixel format (FourCC)"); + private_nh.param("pixel_format", + pixel_format_, + "YUYV"); + + requestPixelFormat(pixel_format_); // Image size auto image_size = ImageSize{}; - auto image_size_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - image_size_descriptor.name = "image_size"; - image_size_descriptor.description = "Image width & height"; // List available image sizes per format auto const & image_sizes = camera_->getImageSizes(); @@ -215,8 +198,7 @@ void V4L2Camera::createParameters() auto iter = image_sizes.find(format.pixelFormat); if (iter == image_sizes.end()) { - RCLCPP_ERROR_STREAM( - get_logger(), + ROS_ERROR_STREAM( "No sizes available to create parameter description for format: " << format.description); continue; } @@ -241,21 +223,21 @@ void V4L2Camera::createParameters() } } - image_size_descriptor.additional_constraints = image_sizes_constraints.str(); - image_size = declare_parameter("image_size", {640, 480}, image_size_descriptor); + ROS_INFO("Image width & height"); + private_nh.param("image_size",image_size_width, 640); + private_nh.param("image_size",image_size_height, 480); + image_size = {image_size_width, image_size_height}; requestImageSize(image_size); // Time per frame if (camera_->timePerFrameSupported()) { auto tpf = camera_->getCurrentTimePerFrame(); - auto time_per_frame_descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - time_per_frame_descriptor.name = "time_per_frame"; - time_per_frame_descriptor.description = "Desired period between successive frames in seconds"; - time_per_frame_descriptor.additional_constraints = - "Length 2 array, with numerator and denominator"; - auto time_per_frame = declare_parameter( - "time_per_frame", {tpf.first, tpf.second}, - time_per_frame_descriptor); + + ROS_INFO("Desired period between successive frames in seconds"); + private_nh.param("time_per_frame_first", time_per_frame_first, tpf.first); + private_nh.param("time_per_frame_second", time_per_frame_second, tpf.second); + TimePerFrame time_per_frame = {static_cast(tpf.first), static_cast(tpf.second)}; + requestTimePerFrame(time_per_frame); } @@ -272,28 +254,28 @@ void V4L2Camera::createParameters() for (auto const & c : camera_->getControls()) { auto name = toParamName(c.name); - auto descriptor = rcl_interfaces::msg::ParameterDescriptor{}; - descriptor.name = name; - descriptor.description = c.name; switch (c.type) { case ControlType::INT: { auto current_value = camera_->getControlValue(c.id); - auto range = rcl_interfaces::msg::IntegerRange{}; - range.from_value = c.minimum; - range.to_value = c.maximum; - descriptor.integer_range.push_back(range); if (current_value < c.minimum || c.maximum < current_value) { current_value = c.defaultValue; } - auto value = declare_parameter(name, current_value, descriptor); + ROS_INFO_STREAM(c.name); + int32_t value; + private_nh.param(name, + value, + current_value); camera_->setControlValue(c.id, value); break; } case ControlType::BOOL: { - auto value = - declare_parameter(name, camera_->getControlValue(c.id) != 0, descriptor); + ROS_INFO_STREAM(c.name); + bool value; + private_nh.param(name, + value, + camera_->getControlValue(c.id) != 0); camera_->setControlValue(c.id, value); break; } @@ -304,14 +286,16 @@ void V4L2Camera::createParameters() sstr << o.first << " - " << o.second << ", "; } auto str = sstr.str(); - descriptor.additional_constraints = str.substr(0, str.size() - 2); - auto value = declare_parameter(name, camera_->getControlValue(c.id), descriptor); + ROS_INFO_STREAM(c.name); + int32_t value; + private_nh.param(name, + value, + camera_->getControlValue(c.id)); camera_->setControlValue(c.id, value); break; } default: - RCLCPP_WARN( - get_logger(), + ROS_WARN( "Control type not currently supported: %s, for control: %s", std::to_string(unsigned(c.type)).c_str(), c.name.c_str()); @@ -321,67 +305,21 @@ void V4L2Camera::createParameters() } // Register callback for parameter value setting - on_set_parameters_callback_ = add_on_set_parameters_callback( - [this](std::vector parameters) -> rcl_interfaces::msg::SetParametersResult { - auto result = rcl_interfaces::msg::SetParametersResult(); - result.successful = true; - for (auto const & p : parameters) { - result.successful &= handleParameter(p); - } - return result; - }); -} - -bool V4L2Camera::handleParameter(rclcpp::Parameter const & param) -{ - auto name = std::string{param.get_name()}; - if (control_name_to_id_.find(name) != control_name_to_id_.end()) { - switch (param.get_type()) { - case rclcpp::ParameterType::PARAMETER_BOOL: - return camera_->setControlValue(control_name_to_id_[name], param.as_bool()); - case rclcpp::ParameterType::PARAMETER_INTEGER: - return camera_->setControlValue(control_name_to_id_[name], param.as_int()); - default: - RCLCPP_WARN( - get_logger(), - "Control parameter type not currently supported: %s, for parameter: %s", - std::to_string(unsigned(param.get_type())).c_str(), param.get_name().c_str()); - } - } else if (param.get_name() == "output_encoding") { - output_encoding_ = param.as_string(); - return true; - } else if (param.get_name() == "pixel_format") { - camera_->stop(); - auto success = requestPixelFormat(param.as_string()); - camera_->start(); - return success; - } else if (param.get_name() == "image_size") { - camera_->stop(); - auto success = requestImageSize(param.as_integer_array()); - camera_->start(); - return success; - } else if (param.get_name() == "time_per_frame") { - camera_->stop(); - auto success = requestTimePerFrame(param.as_integer_array()); - camera_->start(); - return success; - } else if (param.get_name() == "camera_info_url") { - auto camera_info_url = param.as_string(); - if (cinfo_->validateURL(camera_info_url)) { - return cinfo_->loadCameraInfo(camera_info_url); - } else { - RCLCPP_WARN(get_logger(), "Invalid camera info URL: %s", camera_info_url.c_str()); - return false; - } - } - - return false; + // on_set_parameters_callback_ = add_on_set_parameters_callback( + // [this](std::vector parameters) -> rcl_interfaces::msg::SetParametersResult { + // auto result = rcl_interfaces::msg::SetParametersResult(); + // result.successful = true; + // for (auto const & p : parameters) { + // result.successful &= handleParameter(p); + // } + // return result; + // }); } bool V4L2Camera::requestPixelFormat(std::string const & fourcc) { if (fourcc.size() != 4) { - RCLCPP_ERROR(get_logger(), "Invalid pixel format size: must be a 4 character code (FOURCC)."); + ROS_ERROR("Invalid pixel format size: must be a 4 character code (FOURCC)."); return false; } @@ -400,8 +338,7 @@ bool V4L2Camera::requestPixelFormat(std::string const & fourcc) bool V4L2Camera::requestImageSize(std::vector const & size) { if (size.size() != 2) { - RCLCPP_WARN( - get_logger(), "Invalid image size; expected dimensions: 2, actual: %lu", size.size()); + ROS_WARN("Invalid image size; expected dimensions: 2, actual: %lu", size.size()); return false; } @@ -419,8 +356,8 @@ bool V4L2Camera::requestImageSize(std::vector const & size) bool V4L2Camera::requestTimePerFrame(TimePerFrame const & tpf) { if (tpf.size() != 2) { - RCLCPP_WARN( - get_logger(), "Invalid time per frame; expected dimensions: 2, actual: %lu", tpf.size()); + ROS_WARN( + "Invalid time per frame; expected dimensions: 2, actual: %lu", tpf.size()); return false; } @@ -455,6 +392,7 @@ static unsigned char CLIPVALUE(int val) * [ B ] [ 1.0 2.041 0.002 ] [ V ] * */ + static void YUV2RGB( const unsigned char y, const unsigned char u, const unsigned char v, unsigned char * r, unsigned char * g, unsigned char * b) @@ -503,17 +441,14 @@ static void yuyv2rgb(unsigned char const * YUV, unsigned char * RGB, int NumPixe } } -sensor_msgs::msg::Image::UniquePtr V4L2Camera::convert(sensor_msgs::msg::Image const & img) const +sensor_msgs::ImagePtr V4L2Camera::convert(sensor_msgs::Image& img) { - RCLCPP_DEBUG( - get_logger(), - "Converting: %s -> %s", img.encoding.c_str(), output_encoding_.c_str()); - // TODO(sander): temporary until cv_bridge and image_proc are available in ROS 2 - if (img.encoding == sensor_msgs::image_encodings::YUV422_YUY2 && - output_encoding_ == sensor_msgs::image_encodings::RGB8) + if ((img.encoding == sensor_msgs::image_encodings::YUV422 + || img.encoding == sensor_msgs::image_encodings::YUV422_YUY2) + && output_encoding_ == sensor_msgs::image_encodings::RGB8) { - auto outImg = std::make_unique(); + auto outImg = boost::make_shared(); outImg->width = img.width; outImg->height = img.height; outImg->step = img.width * 3; @@ -526,33 +461,33 @@ sensor_msgs::msg::Image::UniquePtr V4L2Camera::convert(sensor_msgs::msg::Image c } return outImg; } else { - RCLCPP_WARN_ONCE( - get_logger(), + ROS_WARN_ONCE( "Conversion not supported yet: %s -> %s", img.encoding.c_str(), output_encoding_.c_str()); return nullptr; } } -bool V4L2Camera::checkCameraInfo( - sensor_msgs::msg::Image const & img, - sensor_msgs::msg::CameraInfo const & ci) +#ifdef ENABLE_CUDA +void cudaErrorCheck(const cudaError_t e) { - return ci.width == img.width && ci.height == img.height; + if (e != cudaSuccess) { + std::stringstream ss; + ss << cudaGetErrorName(e) << " : " << cudaGetErrorString(e); + throw std::runtime_error{ss.str()}; + } } -#ifdef ENABLE_CUDA -sensor_msgs::msg::Image::UniquePtr V4L2Camera::convertOnGpu(sensor_msgs::msg::Image const & img) +sensor_msgs::ImagePtr V4L2Camera::convertOnGpu(sensor_msgs::Image& img) { if ((img.encoding != sensor_msgs::image_encodings::YUV422 && - img.encoding != sensor_msgs::image_encodings::YUV422_YUY2) || + img.encoding != sensor_msgs::image_encodings::YUV422_YUY2) || output_encoding_ != sensor_msgs::image_encodings::RGB8) { - RCLCPP_WARN_ONCE( - get_logger(), + ROS_WARN_ONCE( "Conversion not supported yet: %s -> %s", img.encoding.c_str(), output_encoding_.c_str()); return nullptr; } - auto outImg = std::make_unique(); + auto outImg = boost::make_shared(); outImg->width = img.width; outImg->height = img.height; outImg->step = img.width * 3; @@ -586,8 +521,9 @@ sensor_msgs::msg::Image::UniquePtr V4L2Camera::convertOnGpu(sensor_msgs::msg::Im dst_dev_->step_bytes, roi); } + if (res != NPP_SUCCESS) { - throw std::runtime_error{"NPPI operation failed"}; + ROS_ERROR("NPP Error: %d", res); } cudaErrorCheck(cudaMemcpy2DAsync(static_cast(outImg->data.data()), @@ -603,6 +539,24 @@ sensor_msgs::msg::Image::UniquePtr V4L2Camera::convertOnGpu(sensor_msgs::msg::Im return outImg; } #endif -} // namespace v4l2_camera -RCLCPP_COMPONENTS_REGISTER_NODE(v4l2_camera::V4L2Camera) +void V4L2Camera::publishTimer() +{ + this->publish_next_frame_=true; +} + +bool V4L2Camera::checkCameraInfo( + sensor_msgs::Image const & img, + sensor_msgs::CameraInfo const & ci) +{ + return ci.width == img.width && ci.height == img.height; +} + +V4L2Camera::~V4L2Camera() +{ + canceled_.store(true); + if (capture_thread_.joinable()) { + capture_thread_.join(); + } +} +} // namespace v4l2_camera diff --git a/v4l2_camera/src/v4l2_camera_compose_test.cpp b/v4l2_camera/src/v4l2_camera_compose_test.cpp new file mode 100644 index 0000000..61b7b21 --- /dev/null +++ b/v4l2_camera/src/v4l2_camera_compose_test.cpp @@ -0,0 +1,60 @@ +// Copyright 2019 Bold Hearts +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// #include "v4l2_camera/v4l2_camera.hpp" + +// #include + +// class ComposeTest : public rclcpp::Node +// { +// public: +// explicit ComposeTest(rclcpp::NodeOptions const & options) +// : rclcpp::Node{"compose_test", options} +// { +// img_sub_ = +// create_subscription( +// "/image_raw", +// 10, +// [this](sensor_msgs::msg::Image::UniquePtr img) { +// std::stringstream ss; +// ss << "Image message address [RECEIVE]:\t" << img.get(); +// RCLCPP_DEBUG(get_logger(), "%s", ss.str().c_str()); +// }); +// } + +// private: +// rclcpp::Subscription::SharedPtr img_sub_; +// }; + +// int main(int argc, char ** argv) +// { +// rclcpp::init(argc, argv); + +// rclcpp::executors::MultiThreadedExecutor exec{}; + +// auto intra_comms_options = rclcpp::NodeOptions{}.use_intra_process_comms(true); +// auto camera_node = std::make_shared(intra_comms_options); +// auto test_node = std::make_shared(intra_comms_options); + +// exec.add_node(camera_node); +// exec.add_node(test_node); + +// exec.spin(); + +// rclcpp::shutdown(); +// camera_node = nullptr; +// test_node = nullptr; + +// return 0; +// } diff --git a/src/v4l2_camera_device.cpp b/v4l2_camera/src/v4l2_camera_device.cpp similarity index 77% rename from src/v4l2_camera_device.cpp rename to v4l2_camera/src/v4l2_camera_device.cpp index 35414fe..39189bb 100644 --- a/src/v4l2_camera_device.cpp +++ b/v4l2_camera/src/v4l2_camera_device.cpp @@ -14,8 +14,8 @@ #include "v4l2_camera/v4l2_camera_device.hpp" -#include -#include +#include +#include #include #include @@ -32,10 +32,10 @@ #include "v4l2_camera/fourcc.hpp" using v4l2_camera::V4l2CameraDevice; -using sensor_msgs::msg::Image; +using sensor_msgs::Image; -V4l2CameraDevice::V4l2CameraDevice(std::string device, bool use_v4l2_buffer_timestamps, rclcpp::Duration timestamp_offset_duration) -: device_{std::move(device)}, use_v4l2_buffer_timestamps_{use_v4l2_buffer_timestamps}, timestamp_offset_{timestamp_offset_duration} +V4l2CameraDevice::V4l2CameraDevice(std::string device, bool use_v4l2_buffer_timestamps, ros::Duration timestamp_offset_duration) +: device_{device}, use_v4l2_buffer_timestamps_{use_v4l2_buffer_timestamps}, timestamp_offset_{timestamp_offset_duration} { } @@ -43,13 +43,13 @@ bool V4l2CameraDevice::open() { // Check if TSC offset applies setTSCOffset(); - + ROS_INFO_STREAM("device" << device_); fd_ = ::open(device_.c_str(), O_RDWR); if (fd_ < 0) { auto msg = std::ostringstream{}; msg << "Failed opening device " << device_ << ": " << strerror(errno) << " (" << errno << ")"; - RCLCPP_ERROR(rclcpp::get_logger("v4l2_camera"), "%s", msg.str().c_str()); + ROS_ERROR("%s", msg.str().c_str()); return false; } @@ -59,28 +59,14 @@ bool V4l2CameraDevice::open() auto canRead = capabilities_.capabilities & V4L2_CAP_READWRITE; auto canStream = capabilities_.capabilities & V4L2_CAP_STREAMING; - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - "Driver: %s", capabilities_.driver); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - "Version: %s", std::to_string(capabilities_.version).c_str()); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - "Device: %s", capabilities_.card); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - "Location: %s", capabilities_.bus_info); - - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - "Capabilities:"); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - " Read/write: %s", (canRead ? "YES" : "NO")); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - " Streaming: %s", (canStream ? "YES" : "NO")); + ROS_INFO("Driver: %s", capabilities_.driver); + ROS_INFO("Version: %s", std::to_string(capabilities_.version).c_str()); + ROS_INFO("Device: %s", capabilities_.card); + ROS_INFO("Location: %s", capabilities_.bus_info); + + ROS_INFO("Capabilities:"); + ROS_INFO(" Read/write: %s", (canRead ? "YES" : "NO")); + ROS_INFO(" Streaming: %s", (canStream ? "YES" : "NO")); // Get current data (pixel) format auto formatReq = v4l2_format{}; @@ -88,8 +74,7 @@ bool V4l2CameraDevice::open() ioctl(fd_, VIDIOC_G_FMT, &formatReq); cur_data_format_ = PixelFormat{formatReq.fmt.pix}; - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), + ROS_INFO( "Current pixel format: %s @ %sx%s", FourCC::toString(cur_data_format_.pixelFormat).c_str(), std::to_string(cur_data_format_.width).c_str(), std::to_string(cur_data_format_.height).c_str()); @@ -102,31 +87,27 @@ bool V4l2CameraDevice::open() listControls(); // Log results - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Available pixel formats: "); + ROS_INFO("Available pixel formats: "); for (auto const & format : image_formats_) { - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), - " %s - %s", FourCC::toString(format.pixelFormat).c_str(), format.description.c_str()); + ROS_INFO(" %s - %s", FourCC::toString(format.pixelFormat).c_str(), format.description.c_str()); } - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Available controls: "); + ROS_INFO("Available controls: "); for (auto const & control : controls_) { - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), + ROS_INFO( " %s (%s) = %s", control.name.c_str(), std::to_string(static_cast(control.type)).c_str(), std::to_string(getControlValue(control.id)).c_str()); } if (timePerFrameSupported()) { - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Time-per-frame support: YES"); - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), + ROS_INFO("Time-per-frame support: YES"); + ROS_INFO( " Current time per frame: %d/%d s", capture_parm_.timeperframe.numerator, capture_parm_.timeperframe.denominator); - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), " Available intervals:"); + ROS_INFO(" Available intervals:"); for (auto const & kv : frame_intervals_) { auto oss = std::ostringstream{}; oss << FourCC::toString(std::get<0>(kv.first)) << " " << std::get<1>(kv.first) << "x" << @@ -134,11 +115,10 @@ bool V4l2CameraDevice::open() for (auto const & i : kv.second) { oss << " " << i.first << "/" << i.second; } - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), " %s", oss.str().c_str()); + ROS_INFO(" %s", oss.str().c_str()); } } else { - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), "Time-per-frame support: NO"); + ROS_INFO("Time-per-frame support: NO"); } return true; @@ -146,7 +126,7 @@ bool V4l2CameraDevice::open() bool V4l2CameraDevice::start() { - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Starting camera"); + ROS_INFO("Starting camera"); if (!initMemoryMapping()) { return false; } @@ -159,8 +139,7 @@ bool V4l2CameraDevice::start() buf.index = buffer.index; if (-1 == ioctl(fd_, VIDIOC_QBUF, &buf)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Buffer failure on capture start: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return false; @@ -170,8 +149,7 @@ bool V4l2CameraDevice::start() // Start stream unsigned type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl(fd_, VIDIOC_STREAMON, &type)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed stream start: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return false; @@ -181,12 +159,11 @@ bool V4l2CameraDevice::start() bool V4l2CameraDevice::stop() { - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Stopping camera"); + ROS_INFO("Stopping camera"); // Stop stream unsigned type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl(fd_, VIDIOC_STREAMOFF, &type)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed stream stop"); return false; } @@ -240,63 +217,62 @@ void V4l2CameraDevice::setTSCOffset() } } -Image::UniquePtr V4l2CameraDevice::capture() +sensor_msgs::ImagePtr V4l2CameraDevice::capture() { auto buf = v4l2_buffer{}; - rclcpp::Time buf_stamp; - + ros::Time buf_stamp; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; // Dequeue buffer with new image if (-1 == ioctl(fd_, VIDIOC_DQBUF, &buf)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Error dequeueing buffer: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return nullptr; } - if (use_v4l2_buffer_timestamps_) { - buf_stamp = rclcpp::Time(static_cast(buf.timestamp.tv_sec) * 1e9 - + static_cast(buf.timestamp.tv_usec) * 1e3 - + getTimeOffset() - tsc_offset_); - - } - else { - buf_stamp = rclcpp::Clock{RCL_SYSTEM_TIME}.now(); - } + if (use_v4l2_buffer_timestamps_) { + buf_stamp = ros::Time(static_cast(buf.timestamp.tv_sec) + + static_cast(buf.timestamp.tv_usec) * 1e-6 + + static_cast(getTimeOffset() - tsc_offset_) * 1e-9); + + } + else { + buf_stamp = ros::Time::now(); + } + buf_stamp = buf_stamp + timestamp_offset_; // Requeue buffer to be reused for new captures if (-1 == ioctl(fd_, VIDIOC_QBUF, &buf)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Error re-queueing buffer: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return nullptr; } // Create image object - auto img = std::make_unique(); - img->header.stamp = buf_stamp; - img->width = cur_data_format_.width; - img->height = cur_data_format_.height; - img->step = cur_data_format_.bytesPerLine; + auto img_ptr = boost::make_shared(); + img_ptr->header.stamp = buf_stamp; + img_ptr->width = cur_data_format_.width; + img_ptr->height = cur_data_format_.height; + img_ptr->step = cur_data_format_.bytesPerLine; + if (cur_data_format_.pixelFormat == V4L2_PIX_FMT_YUYV) { - img->encoding = sensor_msgs::image_encodings::YUV422_YUY2; + img_ptr->encoding = sensor_msgs::image_encodings::YUV422_YUY2; } else if (cur_data_format_.pixelFormat == V4L2_PIX_FMT_UYVY) { - img->encoding = sensor_msgs::image_encodings::YUV422; + img_ptr->encoding = sensor_msgs::image_encodings::YUV422; } else if (cur_data_format_.pixelFormat == V4L2_PIX_FMT_GREY) { - img->encoding = sensor_msgs::image_encodings::MONO8; + img_ptr->encoding = sensor_msgs::image_encodings::MONO8; } else { - RCLCPP_WARN(rclcpp::get_logger("v4l2_camera"), "Current pixel format is not supported yet"); + ROS_WARN("Current pixel format is not supported yet"); } - img->data.resize(cur_data_format_.imageByteSize); + img_ptr->data.resize(cur_data_format_.imageByteSize); auto const & buffer = buffers_[buf.index]; - std::copy(buffer.start, buffer.start + img->data.size(), img->data.begin()); - return img; + std::copy(buffer.start, buffer.start + img_ptr->data.size(), img_ptr->data.begin()); + return img_ptr; } int32_t V4l2CameraDevice::getControlValue(uint32_t id) @@ -304,8 +280,7 @@ int32_t V4l2CameraDevice::getControlValue(uint32_t id) auto ctrl = v4l2_control{}; ctrl.id = id; if (-1 == ioctl(fd_, VIDIOC_G_CTRL, &ctrl)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed getting value for control %s: %s (%s); returning 0!", std::to_string(id).c_str(), strerror(errno), std::to_string(errno).c_str()); return 0; @@ -322,8 +297,7 @@ bool V4l2CameraDevice::setControlValue(uint32_t id, int32_t value) auto control = std::find_if( controls_.begin(), controls_.end(), [id](Control const & c) {return c.id == id;}); - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed setting value for control %s to %s: %s (%s)", control->name.c_str(), std::to_string(value).c_str(), strerror(errno), std::to_string(errno).c_str()); return false; @@ -339,21 +313,19 @@ bool V4l2CameraDevice::requestDataFormat(const PixelFormat & format) formatReq.fmt.pix.width = format.width; formatReq.fmt.pix.height = format.height; - RCLCPP_INFO( - rclcpp::get_logger("v4l2_camera"), + ROS_INFO( "Requesting format: %sx%s", std::to_string(format.width).c_str(), std::to_string(format.height).c_str()); // Perform request if (-1 == ioctl(fd_, VIDIOC_S_FMT, &formatReq)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed requesting pixel format: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return false; } - RCLCPP_INFO(rclcpp::get_logger("v4l2_camera"), "Success"); + ROS_INFO("Success"); cur_data_format_ = PixelFormat{formatReq.fmt.pix}; return true; } @@ -361,8 +333,7 @@ bool V4l2CameraDevice::requestDataFormat(const PixelFormat & format) bool V4l2CameraDevice::requestTimePerFrame(std::pair tpf) { if (!timePerFrameSupported()) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Device does not support setting time per frame"); return false; } @@ -376,8 +347,7 @@ bool V4l2CameraDevice::requestTimePerFrame(std::pair tpf) // Perform request if (-1 == ioctl(fd_, VIDIOC_S_PARM, &parmReq)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed requesting time per frame: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return false; @@ -386,8 +356,7 @@ bool V4l2CameraDevice::requestTimePerFrame(std::pair tpf) if (parmReq.parm.capture.timeperframe.numerator != tpf.first || parmReq.parm.capture.timeperframe.denominator != tpf.second) { - RCLCPP_WARN( - rclcpp::get_logger("v4l2_camera"), + ROS_WARN( "Requesting time per frame succeeded, but driver overwrote value: %d/%d", parmReq.parm.capture.timeperframe.numerator, parmReq.parm.capture.timeperframe.denominator); @@ -402,8 +371,7 @@ void V4l2CameraDevice::getCaptureParameters() struct v4l2_streamparm streamParm; streamParm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; if (-1 == ioctl(fd_, VIDIOC_G_PARM, &streamParm)) { - RCLCPP_ERROR( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR( "Failed requesting streaming parameters: %s (%s)", strerror(errno), std::to_string(errno).c_str()); return; @@ -435,8 +403,7 @@ void V4l2CameraDevice::listImageSizes() frmSizeEnum.pixel_format = f.pixelFormat; if (-1 == ioctl(fd_, VIDIOC_ENUM_FRAMESIZES, &frmSizeEnum)) { - RCLCPP_ERROR_STREAM( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR_STREAM( "Failed listing frame size " << strerror(errno) << " (" << errno << ")"); continue; } @@ -452,8 +419,7 @@ void V4l2CameraDevice::listImageSizes() image_sizes_[f.pixelFormat] = listContinuousImageSizes(frmSizeEnum); break; default: - RCLCPP_WARN_STREAM( - rclcpp::get_logger("v4l2_camera"), + ROS_WARN_STREAM( "Frame size type not supported: " << frmSizeEnum.type); continue; } @@ -499,9 +465,7 @@ V4l2CameraDevice::ImageSizesDescription V4l2CameraDevice::listContinuousImageSiz void V4l2CameraDevice::listFrameIntervals() { if (!timePerFrameSupported()) { - RCLCPP_WARN( - rclcpp::get_logger( - "v4l2_camera"), "Time per frame not supported, cannot list intervals"); + ROS_WARN("Time per frame not supported, cannot list intervals"); return; } @@ -520,15 +484,13 @@ void V4l2CameraDevice::listFrameIntervals() frmIvalEnum.height = s.second; if (-1 == ioctl(fd_, VIDIOC_ENUM_FRAMEINTERVALS, &frmIvalEnum)) { - RCLCPP_ERROR_STREAM( - rclcpp::get_logger("v4l2_camera"), + ROS_ERROR_STREAM( "Failed listing frame interval " << strerror(errno) << " (" << errno << ")"); continue; } if (frmIvalEnum.type != V4L2_FRMIVAL_TYPE_DISCRETE) { - RCLCPP_WARN( - rclcpp::get_logger("v4l2_camera"), + ROS_WARN( "Listing of non-discrete frame intervals is not currently supported"); continue; } @@ -549,8 +511,7 @@ void V4l2CameraDevice::listFrameIntervals() break; default: - RCLCPP_WARN( - rclcpp::get_logger("v4l2_camera"), + ROS_WARN( "Listing of frame intervals for non-discrete image sizes is not currently supported"); } } @@ -611,7 +572,7 @@ bool V4l2CameraDevice::initMemoryMapping() // Didn't get more than 1 buffer if (req.count < 2) { - RCLCPP_ERROR(rclcpp::get_logger("v4l2_camera"), "Insufficient buffer memory"); + ROS_ERROR("Insufficient buffer memory"); return false; } @@ -638,7 +599,7 @@ bool V4l2CameraDevice::initMemoryMapping() fd_, buf.m.offset)); if (MAP_FAILED == buffers_[i].start) { - RCLCPP_ERROR(rclcpp::get_logger("v4l2_camera"), "Failed mapping device memory"); + ROS_ERROR("Failed mapping device memory"); return false; } } diff --git a/src/v4l2_camera_node.cpp b/v4l2_camera/src/v4l2_camera_node.cpp similarity index 80% rename from src/v4l2_camera_node.cpp rename to v4l2_camera/src/v4l2_camera_node.cpp index 10c6614..78c45af 100644 --- a/src/v4l2_camera_node.cpp +++ b/v4l2_camera/src/v4l2_camera_node.cpp @@ -11,20 +11,22 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +#include +#include #include "v4l2_camera/v4l2_camera.hpp" -#include int main(int argc, char ** argv) { - rclcpp::init(argc, argv); + ros::init(argc, argv, "v4l2_camera"); - auto node = std::make_shared(rclcpp::NodeOptions{}); + ros::NodeHandle nh; + ros::NodeHandle nh_priv("~"); - rclcpp::spin(node); - rclcpp::shutdown(); - node = nullptr; + v4l2_camera::V4L2Camera n(nh, nh_priv); + ros::spin(); + return 0; } diff --git a/v4l2_camera/src/v4l2_camera_nodelet.cpp b/v4l2_camera/src/v4l2_camera_nodelet.cpp new file mode 100644 index 0000000..4a23b6c --- /dev/null +++ b/v4l2_camera/src/v4l2_camera_nodelet.cpp @@ -0,0 +1,29 @@ +#include +#include +#include +#include + +#include "v4l2_camera/v4l2_camera.hpp" +#include "v4l2_camera/v4l2_camera_device.hpp" + +namespace v4l2_camera +{ +class V4L2CameraNodelet : public nodelet::Nodelet +{ + public: + V4L2CameraNodelet(){} + ~V4L2CameraNodelet(){} + + private: + virtual void onInit(void); + std::shared_ptr camera_; +}; + +void V4L2CameraNodelet::onInit() +{ + camera_.reset(new v4l2_camera::V4L2Camera(getNodeHandle(), getPrivateNodeHandle())); +} + +} // namespace v4l2_camera + +PLUGINLIB_EXPORT_CLASS(v4l2_camera::V4L2CameraNodelet, nodelet::Nodelet)