diff --git a/CMakeLists.txt b/CMakeLists.txt index cdc2a4e4..2db24e9d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -31,7 +31,7 @@ find_package(Qt5Core REQUIRED) find_package(Qt5Widgets REQUIRED) find_package(Qt5OpenGL REQUIRED) # Note: Bluefox SDK is not compatible with some components of OpenCV. Take care when enabling more. -find_package(OpenCV REQUIRED COMPONENTS core imgproc imgcodecs videoio) +find_package(OpenCV REQUIRED COMPONENTS core imgproc imgcodecs videoio calib3d) include(src/shared/CMakeLists.txt.inc) @@ -142,6 +142,7 @@ set (SRCS ${SRCS} src/app/gui/maskwidget.cpp src/app/gui/automatedcolorcalibwidget.cpp + src/app/gui/camera_intrinsic_calib_widget.cpp src/app/gui/cameracalibwidget.cpp src/app/gui/colorpicker.cpp src/app/gui/glLUTwidget.cpp @@ -155,6 +156,7 @@ set (SRCS ${SRCS} src/app/plugins/plugin_mask.cpp src/app/plugins/plugin_cameracalib.cpp + src/app/plugins/plugin_camera_intrinsic_calib.cpp src/app/plugins/plugin_colorcalib.cpp src/app/plugins/plugin_colorthreshold.cpp src/app/plugins/plugin_detect_balls.cpp @@ -183,6 +185,7 @@ qt5_wrap_cpp (MOC_SRCS src/app/gui/maskwidget.h src/app/gui/automatedcolorcalibwidget.h + src/app/gui/camera_intrinsic_calib_widget.h src/app/gui/cameracalibwidget.h src/app/gui/glLUTwidget.h src/app/gui/glwidget.h @@ -201,9 +204,12 @@ qt5_wrap_cpp (MOC_SRCS src/app/plugins/plugin_colorcalib.h src/app/plugins/plugin_colorthreshold.h src/app/plugins/plugin_auto_color_calibration.h + src/app/plugins/plugin_camera_intrinsic_calib.h src/app/stacks/multistack_robocup_ssl.h + src/shared/util/camera_parameters.h + ${OPTIONAL_HEADERS} ) diff --git a/src/app/gui/camera_intrinsic_calib_widget.cpp b/src/app/gui/camera_intrinsic_calib_widget.cpp new file mode 100644 index 00000000..bbd0c5fb --- /dev/null +++ b/src/app/gui/camera_intrinsic_calib_widget.cpp @@ -0,0 +1,261 @@ +#include "camera_intrinsic_calib_widget.h" + +#include +#include +#include +#include + +CameraIntrinsicCalibrationWidget::CameraIntrinsicCalibrationWidget(CameraParameters& camera_params) + : camera_params{camera_params} { + auto calibration_steps_layout = new QVBoxLayout; + + // pattern configuration + { + auto pattern_config_layout = new QVBoxLayout; + + // pattern select dropdown + { + auto pattern_selector_label = new QLabel(tr("Pattern type:")); + + pattern_selector = new QComboBox(); + pattern_selector->addItems({"Checkerboard", "Circles", "Asymmetric Circles"}); + + auto hbox = new QHBoxLayout; + hbox->addWidget(pattern_selector_label); + hbox->addWidget(pattern_selector); + + pattern_config_layout->addLayout(hbox); + } + + // pattern size config + { + auto grid_dimensions_label = new QLabel(tr("Grid Dimensions (width x height):")); + + grid_width = new QSpinBox(); + grid_width->setMinimum(2); + grid_width->setValue(camera_params.additional_calibration_information->grid_width->getInt()); + connect(grid_width, SIGNAL(valueChanged(int)), this, SLOT(grid_width_changed(int))); + connect(camera_params.additional_calibration_information->grid_width, SIGNAL(hasChanged(VarType*)), this, + SLOT(grid_width_vartype_changed(VarType*))); + + auto grid_dim_separator_label = new QLabel(tr("x")); + + grid_height = new QSpinBox(); + grid_height->setMinimum(2); + grid_height->setValue(camera_params.additional_calibration_information->grid_height->getInt()); + connect(grid_height, SIGNAL(valueChanged(int)), this, SLOT(grid_height_changed(int))); + connect(camera_params.additional_calibration_information->grid_height, SIGNAL(hasChanged(VarType*)), this, + SLOT(grid_height_vartype_changed(VarType*))); + + auto hbox = new QHBoxLayout; + hbox->addWidget(grid_dimensions_label); + hbox->addStretch(); + hbox->addWidget(grid_width); + hbox->addWidget(grid_dim_separator_label); + hbox->addWidget(grid_height); + + pattern_config_layout->addLayout(hbox); + } + + auto pattern_config_groupbox = new QGroupBox(tr("Pattern Configuration")); + pattern_config_groupbox->setLayout(pattern_config_layout); + + calibration_steps_layout->addWidget(pattern_config_groupbox); + } + + // calibration instructions + { + auto calibration_instructions_layout = new QVBoxLayout; + + calibration_instructions_layout->addWidget( + new QLabel(tr("Enable pattern detection here and enable display in the " + "VisualizationPlugin.\n" + "Verify that your pattern is detected properly.\nIf not " + "detected double check the grid size and pattern type " + "and verify that greyscale image has good contrast."))); + + // detect pattern checkbox + { + auto label = new QLabel(tr("Detect Pattern")); + + detect_pattern_checkbox = new QCheckBox(); + + auto hbox = new QHBoxLayout; + hbox->addWidget(label); + hbox->addWidget(detect_pattern_checkbox); + + calibration_instructions_layout->addLayout(hbox); + } + + // do corner subpixel correction checkbox + { + auto label = new QLabel(tr("Do Corner Subpixel Correction:")); + + corner_subpixel_correction_checkbox = new QCheckBox(); + corner_subpixel_correction_checkbox->setChecked(true); + + auto hbox = new QHBoxLayout; + hbox->addWidget(label); + hbox->addWidget(corner_subpixel_correction_checkbox); + + calibration_instructions_layout->addLayout(hbox); + + calibration_instructions_layout->addWidget( + new QLabel(tr("When you can see a chessboard in the image, you can " + "start capturing data.\n" + "After each new sample, " + "a calibration will be done and the " + "calibration error will be shown as RMS.\n" + "Make sure to capture enough images from different " + "poses (like ~30)."))); + + capture_button = new QPushButton(tr("Capture")); + capture_button->setCheckable(true); + connect(capture_button, SIGNAL(clicked()), this, SLOT(updateConfigurationEnabled())); + calibration_instructions_layout->addWidget(capture_button); + + calibrate_button = new QPushButton(tr("Calibrate")); + connect(calibrate_button, SIGNAL(clicked()), this, SLOT(calibrateClicked())); + calibration_instructions_layout->addWidget(calibrate_button); + + calibration_instructions_layout->addWidget( + new QLabel(tr("Images where a chessboard was detected " + "are saved in 'test-data/intrinsic_calibration'.\n" + "You can load all images again to redo or tune " + "the calibration."))); + + load_images_button = new QPushButton(tr("Load saved images")); + connect(load_images_button, SIGNAL(clicked()), this, SLOT(loadImagesClicked())); + calibration_instructions_layout->addWidget(load_images_button); + + reset_model_button = new QPushButton(tr("Reset model")); + connect(reset_model_button, SIGNAL(clicked()), this, SLOT(resetModelClicked())); + calibration_instructions_layout->addWidget(reset_model_button); + } + + // images loaded + { + auto hbox = new QHBoxLayout; + hbox->addWidget(new QLabel(tr("Images loaded: "))); + + images_loaded_label = new QLabel(tr("0 / 0")); + hbox->addWidget(images_loaded_label); + + calibration_instructions_layout->addLayout(hbox); + } + + auto calibration_instructions_groupbox = new QGroupBox(tr("Calibration Instructions")); + calibration_instructions_groupbox->setLayout(calibration_instructions_layout); + + calibration_steps_layout->addWidget(calibration_instructions_groupbox); + } + + // capture control buttons + { + auto capture_control_layout = new QVBoxLayout; + auto capture_control_groupbox = new QGroupBox(tr("Calibration Data")); + capture_control_groupbox->setLayout(capture_control_layout); + + // captured data info + { + auto hbox = new QHBoxLayout; + hbox->addWidget(new QLabel(tr("Number of data points: "))); + + num_data_points_label = new QLabel(tr("0")); + hbox->addWidget(num_data_points_label); + + capture_control_layout->addLayout(hbox); + } + + // calibration RMS error + { + auto hbox = new QHBoxLayout; + hbox->addWidget(new QLabel(tr("Calibration RMS: "))); + + rms_label = new QLabel(tr("-")); + hbox->addWidget(rms_label); + + capture_control_layout->addLayout(hbox); + } + + capture_control_layout->addWidget( + new QLabel(tr("The calibration result can be found under \n" + "Camera Calibrator -> Camera Parameters -> Intrinsic Parameters"))); + + capture_control_layout->addSpacing(50); + + // control buttons + { + clear_data_button = new QPushButton(tr("Clear Data")); + connect(clear_data_button, SIGNAL(clicked()), this, SLOT(clearDataClicked())); + + auto hbox = new QHBoxLayout; + hbox->addWidget(clear_data_button); + + capture_control_layout->addLayout(hbox); + } + + calibration_steps_layout->addWidget(capture_control_groupbox); + } + + // push widgets to top + calibration_steps_layout->addStretch(); + + this->setLayout(calibration_steps_layout); +} + +void CameraIntrinsicCalibrationWidget::setNumDataPoints(int n) { num_data_points_label->setText(QString("%1").arg(n)); } + +void CameraIntrinsicCalibrationWidget::clearDataClicked() { + setImagesLoaded(0, 0); + should_clear_data = true; +} + +void CameraIntrinsicCalibrationWidget::calibrateClicked() { + should_calibrate = true; + updateConfigurationEnabled(); +} + +void CameraIntrinsicCalibrationWidget::updateConfigurationEnabled() { + pattern_selector->setEnabled(isConfigurationEnabled()); + grid_width->setEnabled(isConfigurationEnabled()); + grid_height->setEnabled(isConfigurationEnabled()); + clear_data_button->setEnabled(isConfigurationEnabled()); + detect_pattern_checkbox->setEnabled(isConfigurationEnabled()); + corner_subpixel_correction_checkbox->setEnabled(isConfigurationEnabled()); + load_images_button->setEnabled(isConfigurationEnabled()); + calibrate_button->setEnabled(isConfigurationEnabled()); + reset_model_button->setEnabled(isConfigurationEnabled()); +} + +void CameraIntrinsicCalibrationWidget::loadImagesClicked() { + setImagesLoaded(0, 0); + should_load_images = true; + updateConfigurationEnabled(); +} + +void CameraIntrinsicCalibrationWidget::resetModelClicked() const { camera_params.intrinsic_parameters->reset(); } + +void CameraIntrinsicCalibrationWidget::setRms(double rms) { rms_label->setText(QString("%1").arg(rms)); } + +void CameraIntrinsicCalibrationWidget::grid_height_changed(int height) const { + camera_params.additional_calibration_information->grid_height->setInt(height); +} + +void CameraIntrinsicCalibrationWidget::grid_height_vartype_changed(VarType* varType) { + grid_height->setValue(((VarInt*)varType)->getInt()); +} + +void CameraIntrinsicCalibrationWidget::grid_width_changed(int width) const { + camera_params.additional_calibration_information->grid_width->setInt(width); +} + +void CameraIntrinsicCalibrationWidget::grid_width_vartype_changed(VarType* varType) { + grid_width->setValue(((VarInt*)varType)->getInt()); +} + +void CameraIntrinsicCalibrationWidget::setImagesLoaded(int n, int total) { + images_loaded_label->setText(QString("%1 / %2").arg(n).arg(total)); +} + +void CameraIntrinsicCalibrationWidget::imagesLoaded() { updateConfigurationEnabled(); } diff --git a/src/app/gui/camera_intrinsic_calib_widget.h b/src/app/gui/camera_intrinsic_calib_widget.h new file mode 100644 index 00000000..c2b36c81 --- /dev/null +++ b/src/app/gui/camera_intrinsic_calib_widget.h @@ -0,0 +1,71 @@ +#ifndef CAMERA_INTRINSIC_CALIB_WIDGET_H +#define CAMERA_INTRINSIC_CALIB_WIDGET_H + +#include + +#include +#include +#include +#include +#include +#include + +class CameraIntrinsicCalibrationWidget : public QWidget { + Q_OBJECT + public: + enum class Pattern : int { CHECKERBOARD = 0, CIRCLES, ASYMMETRIC_CIRCLES }; + + public: + explicit CameraIntrinsicCalibrationWidget(CameraParameters &camera_params); + ~CameraIntrinsicCalibrationWidget() override = default; + + CameraParameters &camera_params; + + protected: + QComboBox *pattern_selector; + QSpinBox *grid_width; + QSpinBox *grid_height; + QCheckBox *detect_pattern_checkbox; + QCheckBox *corner_subpixel_correction_checkbox; + QLabel *num_data_points_label; + QLabel *rms_label; + QLabel *images_loaded_label; + QPushButton *clear_data_button; + QPushButton *capture_button; + QPushButton *calibrate_button; + QPushButton *load_images_button; + QPushButton *reset_model_button; + + public: + bool patternDetectionEnabled() const { return detect_pattern_checkbox->isChecked(); } + bool cornerSubPixCorrectionEnabled() const { return corner_subpixel_correction_checkbox->isChecked(); } + bool isCapturing() const { return capture_button->isChecked(); } + bool isLoadingFiles() const { return should_load_images; } + bool isConfigurationEnabled() const { return !isCapturing() && !isLoadingFiles() && !calibrating; } + void setNumDataPoints(int n); + Pattern getPattern() const { return static_cast(pattern_selector->currentIndex()); } + void setImagesLoaded(int n, int total); + void imagesLoaded(); + + public slots: + void clearDataClicked(); + void calibrateClicked(); + void updateConfigurationEnabled(); + void loadImagesClicked(); + void grid_height_changed(int) const; + void grid_height_vartype_changed(VarType* varType); + void grid_width_changed(int) const; + void grid_width_vartype_changed(VarType* varType); + void resetModelClicked() const; + + public: + void setRms(double rms); + + public: + bool should_clear_data = false; + bool should_load_images = false; + bool should_calibrate = false; + bool calibrating = false; +}; + +#endif /* CAMERA_INTRINSIC_CALIB_WIDGET_H */ diff --git a/src/app/gui/cameracalibwidget.cpp b/src/app/gui/cameracalibwidget.cpp index b9b7fb3f..4e8b913b 100644 --- a/src/app/gui/cameracalibwidget.cpp +++ b/src/app/gui/cameracalibwidget.cpp @@ -34,21 +34,39 @@ CameraCalibrationWidget::CameraCalibrationWidget(CameraParameters &_cp) : camera // The calibration points and the fit button: QGroupBox* calibrationStepsBox = new QGroupBox(tr("Calibration Steps")); - QLabel* globalCameraIdLabel = new QLabel("Global camera id "); - globalCameraId = new QLineEdit(QString::number(camera_parameters.additional_calibration_information->camera_index->getInt())); + auto globalCameraIdLabel = new QLabel("Update control points based on the " + "" + "global camera id: "); + globalCameraIdLabel->setTextFormat(Qt::RichText); + globalCameraIdLabel->setTextInteractionFlags(Qt::TextBrowserInteraction); + globalCameraIdLabel->setOpenExternalLinks(true); + + globalCameraId = new QLineEdit(QString::number(camera_parameters.additional_calibration_information->global_camera_id->getInt())); globalCameraId->setValidator(new QIntValidator(0, 7, this)); globalCameraId->setMaximumWidth(30); + connect(globalCameraId, SIGNAL(textChanged(QString)), + this, SLOT(global_camera_id_changed())); + connect(camera_parameters.additional_calibration_information->global_camera_id, SIGNAL(hasChanged(VarType*)), + this, SLOT(global_camera_id_vartype_changed())); + + openCvCalibrationCheckBox = new QCheckBox("Use OpenCV calibration separate intrinsic and extrinsic model"); + openCvCalibrationCheckBox->setChecked(camera_parameters.use_opencv_model->getBool()); + connect(openCvCalibrationCheckBox, SIGNAL(stateChanged(int)), this, SLOT(opencvCalibrationTypeChanged(int))); + connect(camera_parameters.use_opencv_model, SIGNAL(hasChanged(VarType *)), this, SLOT(opencvCalibrationTypeChangedVarType())); + QPushButton* updateControlPointsButton = new QPushButton(tr("Update control points")); connect(updateControlPointsButton, SIGNAL(clicked()), SLOT(is_clicked_update_control_points())); QPushButton* initialCalibrationButton = new QPushButton(tr("Do initial calibration")); connect(initialCalibrationButton, SIGNAL(clicked()), SLOT(is_clicked_initial())); - QPushButton* fullCalibrationButton = new QPushButton(tr("Do full calibration")); + fullCalibrationButton = new QPushButton(tr("Do full calibration")); connect(fullCalibrationButton, SIGNAL(clicked()), SLOT(is_clicked_full())); QPushButton* additionalPointsButton = new QPushButton(tr("Detect additional calibration points")); connect(additionalPointsButton, SIGNAL(clicked()), SLOT(edges_is_clicked())); QPushButton* resetButton = new QPushButton(tr("Reset")); connect(resetButton, SIGNAL(clicked()), SLOT(is_clicked_reset())); - + QGroupBox* calibrationParametersBox = new QGroupBox(tr("Calibration Parameters")); // The slider for the width of the line search corridor: QLabel* widthLabel = new QLabel("Line Search Corridor Width (in mm) "); @@ -59,7 +77,7 @@ CameraCalibrationWidget::CameraCalibrationWidget(CameraParameters &_cp) : camera lineSearchCorridorWidthLabelRight = new QLabel(); lineSearchCorridorWidthLabelRight->setNum(200); connect(lineSearchCorridorWidthSlider, SIGNAL(valueChanged(int)), this, SLOT(line_search_slider_changed(int))); - + QGroupBox* cameraParametersBox = new QGroupBox(tr("Initial Camera Parameters")); // The slider for height control: QLabel* heightLabel = new QLabel("Camera Height (in mm) "); @@ -79,17 +97,17 @@ CameraCalibrationWidget::CameraCalibrationWidget(CameraParameters &_cp) : camera distortionLabelRight = new QLabel(); distortionLabelRight->setNum(1./100. * (double)(distortionSlider->value())); connect(distortionSlider, SIGNAL(valueChanged(int)), this, SLOT(distortion_slider_changed(int))); - + // Layout for calibration control: QHBoxLayout *hbox = new QHBoxLayout; hbox->addWidget(globalCameraIdLabel); hbox->addWidget(globalCameraId); hbox->addWidget(updateControlPointsButton); - QGroupBox* globalCameraSelectBox = new QGroupBox(); - globalCameraSelectBox->setLayout(hbox); + hbox->addStretch(1); QVBoxLayout *vbox = new QVBoxLayout; - vbox->addWidget(globalCameraSelectBox); + vbox->addLayout(hbox); + vbox->addWidget(openCvCalibrationCheckBox); vbox->addWidget(initialCalibrationButton); vbox->addWidget(additionalPointsButton); vbox->addWidget(fullCalibrationButton); @@ -111,7 +129,7 @@ CameraCalibrationWidget::CameraCalibrationWidget(CameraParameters &_cp) : camera gridCamera->addWidget(distortionSlider,1,1); gridCamera->addWidget(distortionLabelRight,1,2); cameraParametersBox->setLayout(gridCamera); - + // Overall layout: QVBoxLayout *vbox2 = new QVBoxLayout; vbox2->addWidget(calibrationStepsBox); @@ -152,7 +170,11 @@ void CameraCalibrationWidget::is_clicked_full() void CameraCalibrationWidget::is_clicked_reset() { - camera_parameters.reset(); + if(camera_parameters.use_opencv_model->getBool()) { + camera_parameters.extrinsic_parameters->reset(); + } else { + camera_parameters.reset(); + } set_slider_from_vars(); } @@ -163,8 +185,11 @@ void CameraCalibrationWidget::edges_is_clicked() void CameraCalibrationWidget::set_slider_from_vars() { - cameraHeightSlider->setValue((int)camera_parameters.tz->getDouble()); - distortionSlider->setValue((int)(camera_parameters.distortion->getDouble()*100)); + if(!camera_parameters.use_opencv_model->getBool()) { + cameraHeightSlider->setValue((int)camera_parameters.tz->getDouble()); + distortionSlider->setValue( + (int)(camera_parameters.distortion->getDouble() * 100)); + } lineSearchCorridorWidthSlider->setValue((int)(camera_parameters.additional_calibration_information->line_search_corridor_width->getDouble())); } @@ -186,3 +211,27 @@ void CameraCalibrationWidget::line_search_slider_changed(int val) camera_parameters.additional_calibration_information->line_search_corridor_width->setDouble(val); lineSearchCorridorWidthLabelRight->setNum(val); } + +void CameraCalibrationWidget::global_camera_id_changed() { + camera_parameters.additional_calibration_information->global_camera_id->setInt(globalCameraId->text().toInt()); +} + +void CameraCalibrationWidget::global_camera_id_vartype_changed() { + globalCameraId->setText(QString::number(camera_parameters.additional_calibration_information->global_camera_id->getInt())); +} + +void CameraCalibrationWidget::opencvCalibrationTypeChanged(int state) { + camera_parameters.use_opencv_model->setBool(state == Qt::Checked); + setEnabledBasedOnModel(); +} + +void CameraCalibrationWidget::opencvCalibrationTypeChangedVarType() { + openCvCalibrationCheckBox->setChecked(camera_parameters.use_opencv_model->getBool()); + setEnabledBasedOnModel(); +} + +void CameraCalibrationWidget::setEnabledBasedOnModel() { + bool opencv_model = camera_parameters.use_opencv_model->getBool(); + cameraHeightSlider->setEnabled(!opencv_model); + distortionSlider->setEnabled(!opencv_model); +} diff --git a/src/app/gui/cameracalibwidget.h b/src/app/gui/cameracalibwidget.h index 8d2a98ba..60a77f3f 100644 --- a/src/app/gui/cameracalibwidget.h +++ b/src/app/gui/cameracalibwidget.h @@ -37,16 +37,18 @@ Q_OBJECT void focusInEvent ( QFocusEvent * event ); CameraCalibrationWidget(CameraParameters &cp); ~CameraCalibrationWidget(); - + CameraParameters& camera_parameters; - + bool getDetectEdges() {return detectEdges;} - + void resetDetectEdges() {detectEdges = false;} - - void set_slider_from_vars(); - + + void set_slider_from_vars(); + protected: + QPushButton* fullCalibrationButton; + QSlider* lineSearchCorridorWidthSlider; QLabel* lineSearchCorridorWidthLabelRight; QSlider* cameraHeightSlider; @@ -54,6 +56,7 @@ Q_OBJECT QSlider* distortionSlider; QLabel* distortionLabelRight; QLineEdit* globalCameraId; + QCheckBox* openCvCalibrationCheckBox; bool detectEdges; public slots: @@ -65,6 +68,13 @@ Q_OBJECT void cameraheight_slider_changed(int val); void distortion_slider_changed(int val); void line_search_slider_changed(int val); + void global_camera_id_changed(); + void global_camera_id_vartype_changed(); + void opencvCalibrationTypeChanged(int val); + void opencvCalibrationTypeChangedVarType(); + +private: + void setEnabledBasedOnModel(); }; #endif diff --git a/src/app/plugins/plugin_camera_intrinsic_calib.cpp b/src/app/plugins/plugin_camera_intrinsic_calib.cpp new file mode 100644 index 00000000..e84f2fe2 --- /dev/null +++ b/src/app/plugins/plugin_camera_intrinsic_calib.cpp @@ -0,0 +1,360 @@ +#include "plugin_camera_intrinsic_calib.h" + +#include + +#include +#include + +#include "conversions_greyscale.h" + +PluginCameraIntrinsicCalibration::PluginCameraIntrinsicCalibration(FrameBuffer *buffer, + CameraParameters &_camera_params) + : VisionPlugin(buffer), + settings(new VarList("Camera Intrinsic Calibration")), + widget(new CameraIntrinsicCalibrationWidget(_camera_params)), + camera_params(_camera_params) { + worker = new PluginCameraIntrinsicCalibrationWorker(_camera_params, widget); + + chessboard_capture_dt = new VarDouble("chessboard capture dT", 0.2); + + auto corner_sub_pixel_list = new VarList("corner sub pixel detection"); + corner_sub_pixel_list->addChild(worker->corner_sub_pixel_windows_size); + corner_sub_pixel_list->addChild(worker->corner_sub_pixel_max_iterations); + corner_sub_pixel_list->addChild(worker->corner_sub_pixel_epsilon); + + settings->addChild(worker->image_storage->image_dir); + settings->addChild(worker->reduced_image_width); + settings->addChild(chessboard_capture_dt); + settings->addChild(worker->corner_diff_sq_threshold); + settings->addChild(corner_sub_pixel_list); + settings->addChild(worker->fixFocalLength); + settings->addChild(worker->fixPrinciplePoint); + settings->addChild(worker->initializeCameraMatrix); + + connect(this, SIGNAL(startLoadImages()), worker, SLOT(loadImages())); + connect(this, SIGNAL(startCalibration()), worker, SLOT(calibrate())); + connect(this, SIGNAL(startSaveImages()), worker->image_storage, SLOT(saveImages())); +} + +PluginCameraIntrinsicCalibration::~PluginCameraIntrinsicCalibration() { + worker->deleteLater(); + delete widget; + delete worker; + delete chessboard_capture_dt; +} + +VarList *PluginCameraIntrinsicCalibration::getSettings() { return settings.get(); } + +std::string PluginCameraIntrinsicCalibration::getName() { return "Camera Intrinsic Calibration"; } + +QWidget *PluginCameraIntrinsicCalibration::getControlWidget() { return static_cast(worker->widget); } + +ProcessResult PluginCameraIntrinsicCalibration::process(FrameData *data, RenderOptions *options) { + (void)options; + + Image *img_calibration; + if ((img_calibration = reinterpret_cast *>(data->map.get("img_calibration"))) == nullptr) { + img_calibration = reinterpret_cast *>(data->map.insert("img_calibration", new Image())); + } + + ConversionsGreyscale::cvColor2Grey(data->video, img_calibration); + + Chessboard *chessboard; + if ((chessboard = reinterpret_cast(data->map.get("chessboard"))) == nullptr) { + chessboard = reinterpret_cast(data->map.insert("chessboard", new Chessboard())); + } + + // cv expects row major order and image stores col major. + // height and width are swapped intentionally! + cv::Mat greyscale_mat(img_calibration->getHeight(), img_calibration->getWidth(), CV_8UC1, img_calibration->getData()); + worker->imageSize = greyscale_mat.size(); + + if (widget->should_load_images) { + widget->should_load_images = false; + emit startLoadImages(); + } + + if (widget->patternDetectionEnabled() || widget->isCapturing()) { + worker->detectChessboard(greyscale_mat, chessboard); + } + + if (widget->isCapturing() && chessboard->pattern_was_found) { + double captureDiff = data->video.getTime() - lastChessboardCaptureFrame; + if (captureDiff < chessboard_capture_dt->getDouble()) { + return ProcessingOk; + } + lastChessboardCaptureFrame = data->video.getTime(); + + bool added = worker->addChessboard(chessboard); + + if (added) { + worker->image_storage->image_save_mutex.lock(); + worker->image_storage->images_to_save.push(greyscale_mat.clone()); + worker->image_storage->image_save_mutex.unlock(); + emit startSaveImages(); + } + } + + if (widget->should_calibrate) { + widget->should_calibrate = false; + emit startCalibration(); + } + + if (widget->should_clear_data) { + widget->should_clear_data = false; + worker->clearData(); + } + + return ProcessingOk; +} + +PluginCameraIntrinsicCalibrationWorker::PluginCameraIntrinsicCalibrationWorker(CameraParameters &_camera_params, + CameraIntrinsicCalibrationWidget *widget) + : widget(widget), camera_params(_camera_params) { + corner_sub_pixel_windows_size = new VarInt("window size", 5, 1); + corner_sub_pixel_max_iterations = new VarInt("max iterations", 30, 1); + corner_sub_pixel_epsilon = new VarDouble("epsilon", 0.1, 1e-10); + corner_diff_sq_threshold = new VarDouble("corner sq_diff threshold", 500); + reduced_image_width = new VarDouble("reduced image width for chessboard detection", 900.0); + fixFocalLength = new VarBool("Fix focal length", true); + fixPrinciplePoint = new VarBool("Fix principle point", true); + initializeCameraMatrix = new VarBool("Initialize camera matrix", true); + + image_storage = new ImageStorage(widget); + + thread = new QThread(); + thread->setObjectName("IntrinsicCalibration"); + moveToThread(thread); + thread->start(); +} + +PluginCameraIntrinsicCalibrationWorker::~PluginCameraIntrinsicCalibrationWorker() { + thread->quit(); + thread->deleteLater(); + + delete image_storage; + delete corner_sub_pixel_windows_size; + delete corner_sub_pixel_epsilon; + delete corner_sub_pixel_max_iterations; + delete corner_diff_sq_threshold; + delete reduced_image_width; + delete fixFocalLength; + delete fixPrinciplePoint; + delete initializeCameraMatrix; +} + +void PluginCameraIntrinsicCalibrationWorker::calibrate() { + calib_mutex.lock(); + widget->calibrating = true; + widget->updateConfigurationEnabled(); + + std::vector rvecs; + std::vector tvecs; + + int flags = 0; + if (fixFocalLength->getBool()) { + flags |= cv::CALIB_FIX_FOCAL_LENGTH; + } + if (fixPrinciplePoint->getBool()) { + flags |= cv::CALIB_FIX_PRINCIPAL_POINT; + } + if (!initializeCameraMatrix->getBool()) { + flags |= cv::CALIB_USE_INTRINSIC_GUESS; + } + + try { + std::cout << "Start calibrating with " << image_points.size() << " samples" << std::endl; + auto start = std::chrono::high_resolution_clock::now(); + + double rms = + cv::calibrateCamera(object_points, image_points, imageSize, camera_params.intrinsic_parameters->camera_mat, + camera_params.intrinsic_parameters->dist_coeffs, rvecs, tvecs, flags); + + auto finish = std::chrono::high_resolution_clock::now(); + std::chrono::duration elapsed = finish - start; + std::cout << "Calibration finished with RMS=" << rms << " in " << elapsed.count() << "s" << std::endl; + camera_params.intrinsic_parameters->updateConfigValues(); + this->widget->setRms(rms); + } catch (cv::Exception &e) { + std::cerr << "calibration failed: " << e.err << std::endl; + this->widget->setRms(-1); + } + + widget->calibrating = false; + widget->updateConfigurationEnabled(); + calib_mutex.unlock(); +} + +bool PluginCameraIntrinsicCalibrationWorker::addChessboard(const Chessboard *chessboard) { + calib_mutex.lock(); + + // Check if there is a similar sample already + for (const auto &img_points : this->image_points) { + double sq_diff_sum = 0; + for (uint i = 0; i < img_points.size(); i++) { + double diff = cv::norm(img_points[i] - chessboard->corners[i]); + sq_diff_sum += diff * diff; + } + double sq_diff = sq_diff_sum / (double)img_points.size(); + if (sq_diff < this->corner_diff_sq_threshold->getDouble()) { + calib_mutex.unlock(); + return false; + } + } + + this->image_points.push_back(chessboard->corners); + + std::vector obj; + for (int y = 0; y < chessboard->pattern_size.height; y++) { + for (int x = 0; x < chessboard->pattern_size.width; x++) { + obj.emplace_back(x, y, 0.0f); + } + } + this->object_points.push_back(obj); + + this->widget->setNumDataPoints((int)this->object_points.size()); + calib_mutex.unlock(); + return true; +} + +void PluginCameraIntrinsicCalibrationWorker::detectChessboard(const cv::Mat &greyscale_mat, + Chessboard *chessboard) const { + chessboard->pattern_size.height = this->camera_params.additional_calibration_information->grid_height->getInt(); + chessboard->pattern_size.width = this->camera_params.additional_calibration_information->grid_width->getInt(); + chessboard->corners.clear(); + + cv::Mat greyscale_mat_low_res; + double scale_factor = min(1.0, reduced_image_width->getDouble() / greyscale_mat.size().width); + cv::resize(greyscale_mat, greyscale_mat_low_res, cv::Size(), scale_factor, scale_factor); + + std::vector corners_low_res; + chessboard->pattern_was_found = this->findPattern(greyscale_mat_low_res, chessboard->pattern_size, corners_low_res); + + for (auto &corner : corners_low_res) { + double x = corner.x / scale_factor; + double y = corner.y / scale_factor; + chessboard->corners.push_back(cv::Point((int) x, (int) y)); + } + + if (chessboard->pattern_was_found && this->widget->cornerSubPixCorrectionEnabled()) { + cv::cornerSubPix( + greyscale_mat, chessboard->corners, + cv::Size(corner_sub_pixel_windows_size->getInt(), corner_sub_pixel_windows_size->getInt()), cv::Size(-1, -1), + cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::MAX_ITER, corner_sub_pixel_max_iterations->getInt(), + corner_sub_pixel_epsilon->getDouble())); + } +} + +bool PluginCameraIntrinsicCalibrationWorker::findPattern(const cv::Mat &image, const cv::Size &pattern_size, + vector &corners) const { + using Pattern = CameraIntrinsicCalibrationWidget::Pattern; + int cb_flags = cv::CALIB_CB_ADAPTIVE_THRESH + cv::CALIB_CB_FAST_CHECK + cv::CALIB_CB_NORMALIZE_IMAGE; + + switch (widget->getPattern()) { + case Pattern::CHECKERBOARD: + return cv::findChessboardCorners(image, pattern_size, corners, cb_flags); + case Pattern::CIRCLES: + return cv::findCirclesGrid(image, pattern_size, corners); + case Pattern::ASYMMETRIC_CIRCLES: + return cv::findCirclesGrid(image, pattern_size, corners, cv::CALIB_CB_ASYMMETRIC_GRID); + default: + return false; + } +} + +void PluginCameraIntrinsicCalibrationWorker::loadImages() { + std::vector images; + image_storage->readImages(images); + + int n = 0; + for (cv::Mat &mat : images) { + Chessboard image_chessboard; + detectChessboard(mat, &image_chessboard); + if (image_chessboard.pattern_was_found) { + bool added = addChessboard(&image_chessboard); + if (added) { + std::cout << "Added chessboard" << std::endl; + } else { + std::cout << "Filtered chessboard" << std::endl; + } + } else { + std::cout << "No chessboard detected" << std::endl; + } + n++; + widget->setImagesLoaded(n, (int) images.size()); + } + calibrate(); + widget->imagesLoaded(); +} + +void PluginCameraIntrinsicCalibrationWorker::clearData() { + calib_mutex.lock(); + + image_points.clear(); + object_points.clear(); + + widget->setNumDataPoints((int) object_points.size()); + widget->setRms(0.0); + + calib_mutex.unlock(); +} + +ImageStorage::ImageStorage(CameraIntrinsicCalibrationWidget *widget) : widget(widget) { + image_dir = new VarString("pattern image dir", "test-data/intrinsic_calibration"); + + thread = new QThread(); + thread->setObjectName("IntrinsicCalibrationImageStorage"); + moveToThread(thread); + thread->start(); +} + +ImageStorage::~ImageStorage() { + thread->quit(); + thread->deleteLater(); + delete image_dir; +} + +void ImageStorage::saveImages() { + image_save_mutex.lock(); + if (images_to_save.empty()) { + image_save_mutex.unlock(); + } else { + cv::Mat image = images_to_save.front(); + images_to_save.pop(); + image_save_mutex.unlock(); + saveImage(image); + saveImages(); + } +} + +void ImageStorage::saveImage(cv::Mat &image) const { + long t_now = (long)(GetTimeSec() * 1e9); + QString num = QString::number(t_now); + QString filename = QString(image_dir->getString().c_str()) + "/" + num + ".png"; + cv::imwrite(filename.toStdString(), image); +} + +void ImageStorage::readImages(std::vector &images) const { + DIR *dp; + if ((dp = opendir(image_dir->getString().c_str())) == nullptr) { + std::cerr << "Failed to open directory: " << image_dir->getString() << std::endl; + return; + } + struct dirent *dirp; + std::list imgs_to_load(0); + while ((dirp = readdir(dp))) { + std::string file_name(dirp->d_name); + if (file_name[0] != '.') { // not a hidden file or one of '..' or '.' + imgs_to_load.push_back(image_dir->getString() + "/" + file_name); + } + } + closedir(dp); + + imgs_to_load.sort(); + for (const auto ¤tImage : imgs_to_load) { + std::cout << "Loading " << currentImage << std::endl; + cv::Mat srcImg = imread(currentImage, cv::IMREAD_GRAYSCALE); + images.push_back(srcImg); + widget->setImagesLoaded(0, (int) images.size()); + } +} diff --git a/src/app/plugins/plugin_camera_intrinsic_calib.h b/src/app/plugins/plugin_camera_intrinsic_calib.h new file mode 100644 index 00000000..9c70a693 --- /dev/null +++ b/src/app/plugins/plugin_camera_intrinsic_calib.h @@ -0,0 +1,115 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +class Chessboard { +public: + std::vector corners; + cv::Size pattern_size; + bool pattern_was_found; +}; + +class ImageStorage : public QObject { + Q_OBJECT +public: + explicit ImageStorage(CameraIntrinsicCalibrationWidget *widget); + ~ImageStorage() override; + QThread *thread; + CameraIntrinsicCalibrationWidget *widget; + + std::mutex image_save_mutex; + std::queue images_to_save; + + VarString *image_dir; + + void saveImage(cv::Mat& image) const; + void readImages(std::vector &images) const; + +public slots: + void saveImages(); +}; + +class PluginCameraIntrinsicCalibrationWorker : public QObject { + Q_OBJECT +public: + PluginCameraIntrinsicCalibrationWorker( + CameraParameters &_camera_params, + CameraIntrinsicCalibrationWidget *widget); + ~PluginCameraIntrinsicCalibrationWorker() override; + QThread *thread; + std::mutex calib_mutex; + + CameraIntrinsicCalibrationWidget *widget; + ImageStorage *image_storage; + + std::vector> object_points; + std::vector> image_points; + cv::Size imageSize; + + VarInt *corner_sub_pixel_windows_size; + VarInt *corner_sub_pixel_max_iterations; + VarDouble *corner_sub_pixel_epsilon; + VarDouble *corner_diff_sq_threshold; + VarDouble *reduced_image_width; + VarBool *fixFocalLength; + VarBool *fixPrinciplePoint; + VarBool *initializeCameraMatrix; + + void detectChessboard(const cv::Mat &greyscale_mat, + Chessboard *chessboard) const; + bool findPattern(const cv::Mat &image, const cv::Size &pattern_size, + vector &corners) const; + + bool addChessboard(const Chessboard *chessboard); + + void clearData(); + +public slots: + void loadImages(); + void calibrate(); + +private: + CameraParameters camera_params; +}; + +class PluginCameraIntrinsicCalibration : public VisionPlugin { + Q_OBJECT +protected: + std::unique_ptr settings; + +public: + PluginCameraIntrinsicCalibration(FrameBuffer *_buffer, + CameraParameters &camera_params); + ~PluginCameraIntrinsicCalibration() override; + + QWidget *getControlWidget() override; + + ProcessResult process(FrameData *data, RenderOptions *options) override; + VarList *getSettings() override; + std::string getName() override; + +signals: + void startLoadImages(); + void startCalibration(); + void startSaveImages(); + +private: + PluginCameraIntrinsicCalibrationWorker *worker; + + CameraIntrinsicCalibrationWidget *widget; + CameraParameters camera_params; + + VarDouble *chessboard_capture_dt; + + double lastChessboardCaptureFrame = 0.0; +}; diff --git a/src/app/plugins/plugin_cameracalib.cpp b/src/app/plugins/plugin_cameracalib.cpp index 19234f68..17b5f03e 100644 --- a/src/app/plugins/plugin_cameracalib.cpp +++ b/src/app/plugins/plugin_cameracalib.cpp @@ -275,12 +275,6 @@ void PluginCameraCalibration::detectEdgesOnSingleArc( } } -void PluginCameraCalibration::mouseEvent( QMouseEvent * event, pixelloc loc) -{ - (void) event; - (void) loc; -} - void PluginCameraCalibration::keyPressEvent ( QKeyEvent * event ) { (void) event; diff --git a/src/app/plugins/plugin_cameracalib.h b/src/app/plugins/plugin_cameracalib.h index e1e801af..4fc8652b 100644 --- a/src/app/plugins/plugin_cameracalib.h +++ b/src/app/plugins/plugin_cameracalib.h @@ -45,7 +45,6 @@ class PluginCameraCalibration : public VisionPlugin rgbImage* rgb_image; int video_width; int video_height; - void mouseEvent ( QMouseEvent * event, pixelloc loc ); bool doing_drag; VarDouble* drag_x; diff --git a/src/app/plugins/plugin_visualize.cpp b/src/app/plugins/plugin_visualize.cpp index 93e879e6..2f26ede6 100644 --- a/src/app/plugins/plugin_visualize.cpp +++ b/src/app/plugins/plugin_visualize.cpp @@ -19,6 +19,7 @@ */ //======================================================================== #include "plugin_visualize.h" +#include "plugin_camera_intrinsic_calib.h" #include #include #include "convex_hull.h" @@ -41,9 +42,12 @@ PluginVisualize::PluginVisualize( _v_blobs = new VarBool("blobs", true); _v_camera_calibration = new VarBool("camera calibration", true); _v_calibration_result = new VarBool("calibration result", true); + _v_calibration_result_pillars = new VarBool("calibration result pillars", false); + _v_calibration_result_pillars_height = new VarDouble("calibration result pillars height", 150.0); _v_detected_edges = new VarBool("detected edges", false); _v_complete_sobel = new VarBool("complete edge detection", false); _v_complete_sobel->setBool(false); + _v_chessboard = new VarBool("chessboard", false); _v_mask_hull = new VarBool("image mask hull", true); @@ -55,9 +59,12 @@ PluginVisualize::PluginVisualize( _settings->addChild(_v_blobs); _settings->addChild(_v_camera_calibration); _settings->addChild(_v_calibration_result); + _settings->addChild(_v_calibration_result_pillars); + _settings->addChild(_v_calibration_result_pillars_height); _settings->addChild(_v_detected_edges); _settings->addChild(_v_complete_sobel); _settings->addChild(_v_mask_hull); + _settings->addChild(_v_chessboard); _threshold_lut=0; edge_image = 0; temp_grey_image = 0; @@ -114,6 +121,9 @@ void PluginVisualize::DrawCameraImage( vis_ptr[i] = color; } } + if (_v_chessboard->getBool()) { + DrawChessboard(data, vis_frame); + } } void PluginVisualize::DrawThresholdedImage( @@ -173,8 +183,15 @@ void PluginVisualize::DrawCameraCalibration( // Principal point rgb ppoint_draw_color; ppoint_draw_color.set(255, 0, 0); - int x = camera_parameters.principal_point_x->getDouble(); - int y = camera_parameters.principal_point_y->getDouble(); + int x; + int y; + if (camera_parameters.use_opencv_model->getBool()) { + x = (int) camera_parameters.intrinsic_parameters->principal_point_x->getDouble(); + y = (int) camera_parameters.intrinsic_parameters->principal_point_y->getDouble(); + } else { + x = (int) camera_parameters.principal_point_x->getDouble(); + y = (int) camera_parameters.principal_point_y->getDouble(); + } vis_frame->data.drawFatLine(x-15, y-15, x+15, y+15, ppoint_draw_color); vis_frame->data.drawFatLine(x+15, y-15, x-15, y+15, ppoint_draw_color); // Calibration points @@ -220,6 +237,25 @@ void PluginVisualize::DrawCalibrationResult( drawFieldArc(center, arc.radius->getDouble(), arc.a1->getDouble(), arc.a2->getDouble(), steps_per_line, vis_frame); } + + if (_v_calibration_result_pillars->getBool()) { + for (size_t i = 0; i < real_field.field_lines.size(); ++i) { + const FieldLine &line_segment = *(real_field.field_lines[i]); + const GVector::vector3d p1z0(line_segment.p1_x->getDouble(), + line_segment.p1_y->getDouble(), 0.0); + const GVector::vector3d p2z0(line_segment.p2_x->getDouble(), + line_segment.p2_y->getDouble(), 0.0); + const GVector::vector3d p1z1(line_segment.p1_x->getDouble(), + line_segment.p1_y->getDouble(), + _v_calibration_result_pillars_height->getDouble()); + const GVector::vector3d p2z1(line_segment.p2_x->getDouble(), + line_segment.p2_y->getDouble(), + _v_calibration_result_pillars_height->getDouble()); + drawFieldLine(p1z0, p1z1, steps_per_line, vis_frame); + drawFieldLine(p2z0, p2z1, steps_per_line, vis_frame); + } + } + real_field.field_markings_mutex.unlock(); } @@ -506,3 +542,19 @@ void PluginVisualize::drawFieldLine( lastInImage = nextInImage; } } + +void PluginVisualize::DrawChessboard(FrameData *data, + VisualizationFrame *vis_frame) { + Chessboard *chessboard; + if ((chessboard = reinterpret_cast( + data->map.get("chessboard"))) == nullptr) { + std::cerr << "chessboard_found key missing from data map.\n"; + } + + cv::Mat chessboard_img(vis_frame->data.getHeight(), + vis_frame->data.getWidth(), CV_8UC3, + vis_frame->data.getData()); + + cv::drawChessboardCorners(chessboard_img, chessboard->pattern_size, chessboard->corners, + chessboard->pattern_was_found); +} diff --git a/src/app/plugins/plugin_visualize.h b/src/app/plugins/plugin_visualize.h index 125b1cb5..46b197a2 100644 --- a/src/app/plugins/plugin_visualize.h +++ b/src/app/plugins/plugin_visualize.h @@ -62,9 +62,12 @@ class PluginVisualize : public VisionPlugin VarBool * _v_blobs; VarBool * _v_camera_calibration; VarBool * _v_calibration_result; + VarBool * _v_calibration_result_pillars; + VarDouble * _v_calibration_result_pillars_height; VarBool * _v_complete_sobel; VarBool * _v_detected_edges; VarBool * _v_mask_hull; + VarBool * _v_chessboard; const CameraParameters& camera_parameters; const RoboCupField& real_field; @@ -108,16 +111,18 @@ class PluginVisualize : public VisionPlugin void DrawSearchCorridors(FrameData* data, VisualizationFrame* vis_frame); void DrawMaskHull(FrameData* data, VisualizationFrame* vis_frame); + + static void DrawChessboard(FrameData* data, VisualizationFrame* vis_frame); public: PluginVisualize(FrameBuffer* _buffer, const CameraParameters& camera_params, const RoboCupField& real_field, const ConvexHullImageMask &mask); - ~PluginVisualize(); + ~PluginVisualize() override; void setThresholdingLUT(LUT3D * threshold_lut); - virtual ProcessResult process(FrameData * data, RenderOptions * options); - virtual VarList * getSettings(); - virtual string getName(); + ProcessResult process(FrameData * data, RenderOptions * options) override; + VarList * getSettings() override; + string getName() override; }; #endif diff --git a/src/app/stacks/stack_robocup_ssl.cpp b/src/app/stacks/stack_robocup_ssl.cpp index 4f851834..0706faca 100644 --- a/src/app/stacks/stack_robocup_ssl.cpp +++ b/src/app/stacks/stack_robocup_ssl.cpp @@ -19,6 +19,7 @@ */ //======================================================================== #include "stack_robocup_ssl.h" +#include StackRoboCupSSL::StackRoboCupSSL( RenderOptions * _opts, @@ -70,6 +71,9 @@ StackRoboCupSSL::StackRoboCupSSL( stack.push_back(new PluginColorThreshold(_fb,lut_yuv, *_image_mask)); + stack.push_back( + new PluginCameraIntrinsicCalibration(_fb, *camera_parameters)); + stack.push_back(new PluginRunlengthEncode(_fb)); stack.push_back(new PluginFindBlobs(_fb,lut_yuv)); diff --git a/src/app/stacks/stack_robocup_ssl.h b/src/app/stacks/stack_robocup_ssl.h index 6ad3a5fb..d200e01d 100644 --- a/src/app/stacks/stack_robocup_ssl.h +++ b/src/app/stacks/stack_robocup_ssl.h @@ -24,6 +24,7 @@ #include "visionstack.h" #include "lut3d.h" #include "camera_calibration.h" +#include "camera_parameters.h" #include "field.h" #include "plugin_dvr.h" #include "plugin_colorcalib.h" @@ -84,7 +85,7 @@ class StackRoboCupSSL : public VisionStack { RoboCupSSLServer* ds_udp_server_old, string cam_settings_filename); virtual string getSettingsFileName(); - virtual ~StackRoboCupSSL(); + ~StackRoboCupSSL() override; }; diff --git a/src/shared/CMakeLists.txt.inc b/src/shared/CMakeLists.txt.inc index c90c4ca1..3e55a47d 100644 --- a/src/shared/CMakeLists.txt.inc +++ b/src/shared/CMakeLists.txt.inc @@ -35,7 +35,9 @@ set (SHARED_SRCS ${shared_dir}/util/affinity_manager.cpp ${shared_dir}/util/camera_calibration.cpp + ${shared_dir}/util/camera_parameters.cpp ${shared_dir}/util/conversions.cpp + ${shared_dir}/util/conversions_greyscale.cpp ${shared_dir}/util/global_random.cpp ${shared_dir}/util/image.cpp ${shared_dir}/util/image_io.cpp diff --git a/src/shared/util/camera_calibration.cpp b/src/shared/util/camera_calibration.cpp index 0c546b51..032e8584 100644 --- a/src/shared/util/camera_calibration.cpp +++ b/src/shared/util/camera_calibration.cpp @@ -1,6 +1,5 @@ #include "camera_calibration.h" #include -#include #include #include #include @@ -33,6 +32,10 @@ CameraParameters::CameraParameters(int camera_index_, RoboCupField * field_) : new AdditionalCalibrationInformation(camera_index_, field_); q_rotate180 = Quaternion(0, 0, 1.0,0); + + intrinsic_parameters = new CameraIntrinsicParameters(); + extrinsic_parameters = new CameraExtrinsicParameters(); + use_opencv_model = new VarBool("use openCV camera model", false); } CameraParameters::~CameraParameters() { @@ -50,40 +53,92 @@ CameraParameters::~CameraParameters() { delete additional_calibration_information; } -void CameraParameters::toProtoBuffer(SSL_GeometryCameraCalibration &buffer) const { - buffer.set_focal_length(focal_length->getDouble()); - buffer.set_principal_point_x(principal_point_x->getDouble()); - buffer.set_principal_point_y(principal_point_y->getDouble()); - buffer.set_distortion(distortion->getDouble()); - buffer.set_q0(q0->getDouble()); - buffer.set_q1(q1->getDouble()); - buffer.set_q2(q2->getDouble()); - buffer.set_q3(q3->getDouble()); - buffer.set_tx(tx->getDouble()); - buffer.set_ty(ty->getDouble()); - buffer.set_tz(tz->getDouble()); - buffer.set_camera_id(additional_calibration_information->camera_index->getInt()); +/** + * Convert the rotational vector from extrinsic parameters to a Quaternion. + * Taken from: https://gist.github.com/shubh-agrawal/76754b9bfb0f4143819dbd146d15d4c8 + */ +void CameraParameters::quaternionFromOpenCVCalibration(double Q[]) const +{ + cv::Mat R; + cv::Rodrigues(extrinsic_parameters->rvec, R); + double trace = R.at(0,0) + R.at(1,1) + R.at(2,2); + + if (trace > 0.0) + { + double s = sqrt(trace + 1.0); + Q[3] = (s * 0.5); + s = 0.5 / s; + Q[0] = ((R.at(2,1) - R.at(1,2)) * s); + Q[1] = ((R.at(0,2) - R.at(2,0)) * s); + Q[2] = ((R.at(1,0) - R.at(0,1)) * s); + } + else + { + int i = R.at(0,0) < R.at(1,1) ? (R.at(1,1) < R.at(2,2) ? 2 : 1) : (R.at(0,0) < R.at(2,2) ? 2 : 0); + int j = (i + 1) % 3; + int k = (i + 2) % 3; - //--Set derived parameters: - //compute camera world coordinates: - Quaternion q; - q.set(q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); - q.invert(); + double s = sqrt(R.at(i, i) - R.at(j,j) - R.at(k,k) + 1.0); + Q[i] = s * 0.5; + s = 0.5 / s; - GVector::vector3d v_in(tx->getDouble(),ty->getDouble(),tz->getDouble()); - v_in=(-(v_in)); + Q[3] = (R.at(k,j) - R.at(j,k)) * s; + Q[j] = (R.at(j,i) + R.at(i,j)) * s; + Q[k] = (R.at(k,i) + R.at(i,k)) * s; + } +} - GVector::vector3d v_out = q.rotateVectorByQuaternion(v_in); - buffer.set_derived_camera_world_tx(v_out.x); - buffer.set_derived_camera_world_ty(v_out.y); - buffer.set_derived_camera_world_tz(v_out.z); +void CameraParameters::toProtoBuffer(SSL_GeometryCameraCalibration &buffer) const { + buffer.set_camera_id(additional_calibration_information->camera_index->getInt()); + if (use_opencv_model->getBool()) { + buffer.set_focal_length((float) (intrinsic_parameters->focal_length_x->getDouble() + intrinsic_parameters->focal_length_y->getDouble()) / 2.0f); + buffer.set_principal_point_x((float) intrinsic_parameters->principal_point_x->getDouble()); + buffer.set_principal_point_y((float) intrinsic_parameters->principal_point_y->getDouble()); + buffer.set_distortion((float) intrinsic_parameters->dist_coeff_k1->getDouble()); + double Q[4]; + quaternionFromOpenCVCalibration(Q); + buffer.set_q0((float) Q[0]); + buffer.set_q1((float) Q[1]); + buffer.set_q2((float) Q[2]); + buffer.set_q3((float) Q[3]); + buffer.set_tx((float) extrinsic_parameters->tvec.at(0, 0)); + buffer.set_ty((float) extrinsic_parameters->tvec.at(0, 1)); + buffer.set_tz((float) extrinsic_parameters->tvec.at(0, 2)); + } else { + buffer.set_focal_length((float) focal_length->getDouble()); + buffer.set_principal_point_x((float) principal_point_x->getDouble()); + buffer.set_principal_point_y((float) principal_point_y->getDouble()); + buffer.set_distortion((float) distortion->getDouble()); + buffer.set_q0((float) q0->getDouble()); + buffer.set_q1((float) q1->getDouble()); + buffer.set_q2((float) q2->getDouble()); + buffer.set_q3((float) q3->getDouble()); + buffer.set_tx((float) tx->getDouble()); + buffer.set_ty((float) ty->getDouble()); + buffer.set_tz((float) tz->getDouble()); + } + GVector::vector3d< double > world = getWorldLocation(); + buffer.set_derived_camera_world_tx(world.x); + buffer.set_derived_camera_world_ty(world.y); + buffer.set_derived_camera_world_tz(world.z); + buffer.set_pixel_image_width(additional_calibration_information->imageWidth->getInt()); buffer.set_pixel_image_height(additional_calibration_information->imageHeight->getInt()); - } -GVector::vector3d< double > CameraParameters::getWorldLocation() { +GVector::vector3d< double > CameraParameters::getWorldLocation() const { + if (use_opencv_model->getBool()) { + cv::Mat R; + cv::Rodrigues(extrinsic_parameters->rvec, R); + R = R.t(); + cv::Mat t = -R * extrinsic_parameters->tvec; + GVector::vector3d v_out; + v_out.x = t.at(0, 0); + v_out.y = t.at(0, 1); + v_out.z = t.at(0, 2); + return v_out; + } Quaternion q; q.set(q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); q.invert(); @@ -94,6 +149,9 @@ GVector::vector3d< double > CameraParameters::getWorldLocation() { } void CameraParameters::addSettingsToList(VarList& list) { + list.addChild(intrinsic_parameters->settings); + list.addChild(extrinsic_parameters->settings); + list.addChild(use_opencv_model); list.addChild(focal_length); list.addChild(principal_point_x); list.addChild(principal_point_y); @@ -161,28 +219,40 @@ void CameraParameters::radialDistortion( void CameraParameters::field2image( const GVector::vector3d &p_f, GVector::vector2d &p_i) const { - Quaternion q_field2cam = Quaternion( - q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); - q_field2cam.norm(); - GVector::vector3d translation = GVector::vector3d( - tx->getDouble(),ty->getDouble(),tz->getDouble()); - - // First transform the point from the field into the coordinate system of the - // camera - GVector::vector3d p_c = - q_field2cam.rotateVectorByQuaternion(p_f) + translation; - GVector::vector2d p_un = - GVector::vector2d(p_c.x/p_c.z, p_c.y/p_c.z); - - // Apply distortion - GVector::vector2d p_d; - radialDistortion(p_un,p_d); - // Then project from the camera coordinate system onto the image plane using - // the instrinsic parameters - p_i = focal_length->getDouble() * p_d + - GVector::vector2d(principal_point_x->getDouble(), - principal_point_y->getDouble()); + if(use_opencv_model->getBool()) { + cv::Mat src(1, 3, CV_32FC1); + cv::Mat dst(1, 2, CV_32FC1); + src.ptr(0)->x = p_f.x; + src.ptr(0)->y = p_f.y; + src.ptr(0)->z = p_f.z; + cv::projectPoints(src, extrinsic_parameters->rvec, extrinsic_parameters->tvec, intrinsic_parameters->camera_mat, intrinsic_parameters->dist_coeffs, dst); + p_i.x = dst.at(0); + p_i.y = dst.at(1); + } else { + Quaternion q_field2cam = Quaternion( + q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); + q_field2cam.norm(); + GVector::vector3d translation = GVector::vector3d( + tx->getDouble(),ty->getDouble(),tz->getDouble()); + + // First transform the point from the field into the coordinate system of the + // camera + GVector::vector3d p_c = + q_field2cam.rotateVectorByQuaternion(p_f) + translation; + GVector::vector2d p_un = + GVector::vector2d(p_c.x/p_c.z, p_c.y/p_c.z); + + // Apply distortion + GVector::vector2d p_d; + radialDistortion(p_un,p_d); + + // Then project from the camera coordinate system onto the image plane using + // the instrinsic parameters + p_i = focal_length->getDouble() * p_d + + GVector::vector2d(principal_point_x->getDouble(), + principal_point_y->getDouble()); + } } void CameraParameters::field2image( @@ -223,40 +293,69 @@ void CameraParameters::field2image( void CameraParameters::image2field( GVector::vector3d &p_f, const GVector::vector2d &p_i, double z) const { - // Undo scaling and offset - GVector::vector2d p_d( - (p_i.x - principal_point_x->getDouble()) / focal_length->getDouble(), - (p_i.y - principal_point_y->getDouble()) / focal_length->getDouble()); - - // Compensate for distortion (undistort) - GVector::vector2d p_un; - radialDistortionInv(p_un,p_d); - - // Now we got a ray on the z axis - GVector::vector3d v(p_un.x, p_un.y, 1); - - // Transform this ray into world coordinates - Quaternion q_field2cam = Quaternion( - q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); - q_field2cam.norm(); - GVector::vector3d translation = - GVector::vector3d(tx->getDouble(),ty->getDouble(),tz->getDouble()); - - Quaternion q_field2cam_inv = q_field2cam; - q_field2cam_inv.invert(); - GVector::vector3d v_in_w = - q_field2cam_inv.rotateVectorByQuaternion(v); - GVector::vector3d zero_in_w = - q_field2cam_inv.rotateVectorByQuaternion( - GVector::vector3d(0,0,0) - translation); - - // Compute the the point where the rays intersects the field - double t = GVector::ray_plane_intersect( - GVector::vector3d(0,0,z), GVector::vector3d(0,0,1).norm(), - zero_in_w, v_in_w.norm()); - - // Set p_f - p_f = zero_in_w + v_in_w.norm() * t; + if(use_opencv_model->getBool()) { + cv::Mat p_i_mat(3, 1, CV_64FC1); + + p_i_mat.ptr(0)->x = p_i.x; + p_i_mat.ptr(0)->y = p_i.y; + p_i_mat.ptr(0)->z = 1; + + /** + * Calculation is based on: + * https://stackoverflow.com/questions/12299870/computing-x-y-coordinate-3d-from-image-point + */ + + const cv::Mat &tvec = extrinsic_parameters->tvec; + const cv::Mat &rotation_mat_inv = extrinsic_parameters->rotation_mat_inv; + const cv::Mat &camera_mat_inv = intrinsic_parameters->camera_mat_inv; + const cv::Mat &right_side_mat = extrinsic_parameters->right_side_mat; + const cv::Mat left_side_mat = rotation_mat_inv * camera_mat_inv * p_i_mat; + + double right_side_mat_z = right_side_mat.at(2, 0); + double left_side_mat_z = left_side_mat.at(2, 0); + double s = (z + right_side_mat_z) / left_side_mat_z; + + cv::Mat p_f_mat = rotation_mat_inv * (s * camera_mat_inv * p_i_mat - tvec); + + p_f.x = p_f_mat.at(0).x; + p_f.y = p_f_mat.at(0).y; + p_f.z = p_f_mat.at(0).z; + } else { + // Undo scaling and offset + GVector::vector2d p_d( + (p_i.x - principal_point_x->getDouble()) / focal_length->getDouble(), + (p_i.y - principal_point_y->getDouble()) / focal_length->getDouble()); + + // Compensate for distortion (undistort) + GVector::vector2d p_un; + radialDistortionInv(p_un, p_d); + + // Now we got a ray on the z axis + GVector::vector3d v(p_un.x, p_un.y, 1); + + // Transform this ray into world coordinates + Quaternion q_field2cam = Quaternion( + q0->getDouble(),q1->getDouble(),q2->getDouble(),q3->getDouble()); + q_field2cam.norm(); + GVector::vector3d translation = + GVector::vector3d(tx->getDouble(),ty->getDouble(),tz->getDouble()); + + Quaternion q_field2cam_inv = q_field2cam; + q_field2cam_inv.invert(); + GVector::vector3d v_in_w = + q_field2cam_inv.rotateVectorByQuaternion(v); + GVector::vector3d zero_in_w = + q_field2cam_inv.rotateVectorByQuaternion( + GVector::vector3d(0,0,0) - translation); + + // Compute the the point where the rays intersects the field + double t = GVector::ray_plane_intersect( + GVector::vector3d(0,0,z), GVector::vector3d(0,0,1).norm(), + zero_in_w, v_in_w.norm()); + + // Set p_f + p_f = zero_in_w + v_in_w.norm() * t; + } } @@ -336,7 +435,6 @@ void CameraParameters::do_calibration(int cal_type) { std::vector > p_i; AdditionalCalibrationInformation* aci = additional_calibration_information; - for (int i = 0; i < AdditionalCalibrationInformation::kNumControlPoints; ++i) { p_i.push_back(GVector::vector2d( @@ -347,7 +445,11 @@ void CameraParameters::do_calibration(int cal_type) { aci->control_point_field_ys[i]->getDouble(), 0.0)); } - calibrate(p_f, p_i, cal_type); + if(use_opencv_model->getBool()) { + calibrateExtrinsicModel(p_f, p_i, cal_type); + } else { + calibrate(p_f, p_i, cal_type); + } } void CameraParameters::reset() { @@ -364,6 +466,177 @@ void CameraParameters::reset() { q3->resetToDefault(); } +bool intersection(cv::Vec4f l1, cv::Vec4f l2, cv::Point2f &r) +{ + cv::Point2f o1; + cv::Point2f x; + cv::Point2f d1; + cv::Point2f d2; + + o1.x = l1[2]; + o1.y = l1[3]; + x.x = l2[2] - l1[2]; + x.y = l2[3] - l1[3]; + d1.x = l1[0]; + d1.y = l1[1]; + d2.x = l2[0]; + d2.y = l2[1]; + + float cross = d1.x*d2.y - d1.y*d2.x; + if (abs(cross) < 1e-8) + return false; + + double t1 = (x.x * d2.y - x.y * d2.x)/cross; + r = o1 + d1 * t1; + return true; +} + +bool contains(const cv::Rect& rect, const cv::Point2d& pt, double margin) { + return rect.x - margin <= pt.x && pt.x < rect.x + rect.width + margin * 2 + && rect.y - margin <= pt.y && pt.y < rect.y + rect.height + margin * 2; +} + +void CameraParameters::calibrateExtrinsicModel( + std::vector> &p_f, + std::vector> &p_i, + int cal_type) { + + std::vector fieldPoints; + std::vector imagePoints; + + if (cal_type & FULL_ESTIMATION) { + // fitted lines through undistorted calibration points + std::vector lines_img_undistorted; + // corresponding lines in field coordinates + std::vector lines_field; + // corresponding line starting points + std::vector lines_start; + // corresponding line ending points + std::vector lines_end; + + for (const auto &calibrationData : calibrationSegments) { + if (!calibrationData.straightLine) { + // only straight lines are supported + continue; + } + + // collect all valid image points + std::vector points_img; + for (auto imgPt : calibrationData.imgPts) { + bool detected = imgPt.second; + if (detected) { + auto p_img = imgPt.first; + points_img.emplace_back(p_img.x, p_img.y); + } + } + + // undistort image points (without considering extrensic calibration) + std::vector points_undistorted; + cv::undistortPoints(points_img, + points_undistorted, + intrinsic_parameters->camera_mat, + intrinsic_parameters->dist_coeffs); + + // fit a line through the detected calibration image points + cv::Vec4f line_img; + cv::fitLine(points_undistorted, line_img, cv::DIST_L2, 0, 0.01, 0.01); + lines_img_undistorted.push_back(line_img); + // create a corresponding line in world coordinates + auto dir = (calibrationData.p2 - calibrationData.p1).norm(); + cv::Vec4f line_field(dir.x, dir.y, calibrationData.p1.x, calibrationData.p1.y); + lines_field.push_back(line_field); + // add corresponding start and end points + lines_start.emplace_back( + calibrationData.p1.x, + calibrationData.p1.y); + lines_end.emplace_back( + calibrationData.p2.x, + calibrationData.p2.y); + } + + std::vector points_intersection_img_undistorted; + std::vector points_intersection_field; + for (uint i = 0; i < lines_field.size(); i++) { + for (uint j = i+1; j < lines_field.size(); j++) { + auto line_img1 = lines_img_undistorted[i]; + auto line_img2 = lines_img_undistorted[j]; + auto line_field1 = lines_field[i]; + auto line_field2 = lines_field[j]; + + cv::Point2f p_intersection_img_undistorted; + cv::Point2f p_intersection_field; + bool found_img = intersection(line_img1, line_img2, p_intersection_img_undistorted); + bool found_field = intersection(line_field1, line_field2, p_intersection_field); + + cv::Rect2d rect_line1(lines_start[i], lines_end[i]); + cv::Rect2d rect_line2(lines_start[j], lines_end[j]); + if(found_img && found_field + // check if intersection is on line segment + && contains(rect_line1, p_intersection_field, 10) + && contains(rect_line2, p_intersection_field, 10)) { + cv::Point3d p_intersection_img_undistorted_3d( + p_intersection_img_undistorted.x, + p_intersection_img_undistorted.y, 0); + points_intersection_img_undistorted.push_back( + p_intersection_img_undistorted_3d); + cv::Point3d p_intersection_field_3d( + p_intersection_field.x, + p_intersection_field.y, 0); + points_intersection_field.push_back(p_intersection_field_3d); + } + } + } + if (!points_intersection_img_undistorted.empty()) { + std::vector zero(3); + std::vector points_img_intersect; + // project undistorted image points back to image (distort) + cv::projectPoints(points_intersection_img_undistorted, zero, zero, + intrinsic_parameters->camera_mat, + intrinsic_parameters->dist_coeffs, + points_img_intersect); + fieldPoints = points_intersection_field; + imagePoints = points_img_intersect; + } + } else { + // Use calibration points + for (uint i = 0; i < p_f.size(); i++) { + fieldPoints.emplace_back(p_f[i].x, p_f[i].y, p_f[i].z); + imagePoints.emplace_back(p_i[i].x, p_i[i].y); + } + } + + if (fieldPoints.size() < 4) { + std::cerr << "Not enough calibration points: " << fieldPoints.size() << std::endl; + return; + } + + // solve + cv::solvePnP( + fieldPoints, + imagePoints, + intrinsic_parameters->camera_mat, + intrinsic_parameters->dist_coeffs, + extrinsic_parameters->rvec, + extrinsic_parameters->tvec, + false, + cv::SOLVEPNP_ITERATIVE + ); + + // Update parameters + extrinsic_parameters->updateConfigValues(); + + // validate + std::vector imagePoints_out; + cv::projectPoints(fieldPoints, extrinsic_parameters->rvec, extrinsic_parameters->tvec, intrinsic_parameters->camera_mat, intrinsic_parameters->dist_coeffs, imagePoints_out); + double sum = 0; + for(uint i = 0; i < imagePoints_out.size(); i++) { + auto diff = imagePoints[i] - imagePoints_out[i]; + sum += cv::norm(diff) * cv::norm(diff); + } + double rmse = sqrt(sum/ fieldPoints.size()); + std::cout << "RMSE: " << rmse << std::endl; +} + void CameraParameters::calibrate( std::vector > &p_f, std::vector > &p_i, int cal_type) { @@ -563,7 +836,7 @@ void CameraParameters::calibrate( // the right call at compile time #ifdef EIGEN_WORLD_VERSION // alpha.llt().solve(-beta, &new_p); -- modify 1/15/16 - // -- move to Eigen3 structure - + // -- move to Eigen3 structure - // -- http://eigen.tuxfamily.org/dox/Eigen2ToEigen3.html new_p = alpha.llt().solve(-beta); #else @@ -762,6 +1035,10 @@ CameraParameters::AdditionalCalibrationInformation:: imageWidth->addFlags(VARTYPE_FLAG_NOLOAD_ATTRIBUTES); imageHeight = new VarInt("Image height",0,0); imageHeight->addFlags(VARTYPE_FLAG_NOLOAD_ATTRIBUTES); + grid_width = new VarInt("grid width", 7); + grid_height = new VarInt("grid height", 9); + global_camera_id = new VarInt("global camera id", camera_index_, 0, 7); + global_camera_id->setFlags(VARTYPE_FLAG_HIDDEN); } void CameraParameters::AdditionalCalibrationInformation::updateControlPoints() { @@ -872,4 +1149,7 @@ void CameraParameters::AdditionalCalibrationInformation::addSettingsToList( list.addChild(pointSeparation); list.addChild(imageWidth); list.addChild(imageHeight); + list.addChild(grid_height); + list.addChild(grid_width); + list.addChild(global_camera_id); } diff --git a/src/shared/util/camera_calibration.h b/src/shared/util/camera_calibration.h index ec57ef1b..96a6bce5 100644 --- a/src/shared/util/camera_calibration.h +++ b/src/shared/util/camera_calibration.h @@ -19,21 +19,19 @@ */ //======================================================================== +#ifndef CAMERA_CALIBRATION_H +#define CAMERA_CALIBRATION_H + #include #include #include #include #include "field.h" #include "timer.h" - -#ifndef CAMERA_CALIBRATION_H -#define CAMERA_CALIBRATION_H +#include +#include "camera_parameters.h" #include "messages_robocup_ssl_geometry.pb.h" - -//using namespace Eigen; -//USING_PART_OF_NAMESPACE_EIGEN - /*! \class CameraParameters @@ -72,22 +70,25 @@ class CameraParameters Eigen::VectorXd p_alpha; Quaternion q_rotate180; - //Quaternion q_field2cam; - //GVector::vector3d translation; - AdditionalCalibrationInformation* additional_calibration_information; - GVector::vector3d getWorldLocation(); + VarBool* use_opencv_model; + CameraIntrinsicParameters* intrinsic_parameters; + CameraExtrinsicParameters* extrinsic_parameters; + + void quaternionFromOpenCVCalibration(double Q[]) const; + GVector::vector3d getWorldLocation() const; void field2image(const GVector::vector3d &p_f, GVector::vector2d &p_i) const; void image2field(GVector::vector3d< double >& p_f, const GVector::vector2d< double >& p_i, double z) const; void calibrate(std::vector > &p_f, std::vector > &p_i, int cal_type); + void calibrateExtrinsicModel(std::vector > &p_f, std::vector > &p_i, int cal_type); double radialDistortion(double ru) const; //apply radial distortion to (undistorted) radius ru and return distorted radius double radialDistortionInv(double rd) const; //invert radial distortion from (distorted) radius rd and return undistorted radius void radialDistortionInv(GVector::vector2d &pu, const GVector::vector2d &pd) const; - void radialDistortion(const GVector::vector2d pu, GVector::vector2d &pd) const; + void radialDistortion(GVector::vector2d pu, GVector::vector2d &pd) const; double radialDistortion(double ru, double dist) const; - void radialDistortion(const GVector::vector2d pu, GVector::vector2d &pd, double dist) const; + void radialDistortion(GVector::vector2d pu, GVector::vector2d &pd, double dist) const; double calc_chisqr(std::vector > &p_f, std::vector > &p_i, Eigen::VectorXd &p, int); void field2image(GVector::vector3d &p_f, GVector::vector2d &p_i, Eigen::VectorXd &p); @@ -155,6 +156,9 @@ class CameraParameters VarInt* imageWidth; VarInt* imageHeight; + VarInt* grid_width; + VarInt* grid_height; + VarInt *global_camera_id; private: RoboCupField* field; std::vector > diff --git a/src/shared/util/camera_parameters.cpp b/src/shared/util/camera_parameters.cpp new file mode 100644 index 00000000..267f0f8d --- /dev/null +++ b/src/shared/util/camera_parameters.cpp @@ -0,0 +1,186 @@ +#include "camera_parameters.h" + +#include + +CameraIntrinsicParameters::CameraIntrinsicParameters() { + settings = new VarList("Intrinsic Parameters"); + + focal_length_x = new VarDouble("focal_length_x", 1.0); + focal_length_y = new VarDouble("focal_length_y", 1.0); + principal_point_x = new VarDouble("principal_point_x", 0.0); + principal_point_y = new VarDouble("principal_point_y", 0.0); + + dist_coeff_k1 = new VarDouble("dist_coeff_k1", 0.0); + dist_coeff_k2 = new VarDouble("dist_coeff_k2", 0.0); + dist_coeff_p1 = new VarDouble("dist_coeff_p1", 0.0); + dist_coeff_p2 = new VarDouble("dist_coeff_p2", 0.0); + dist_coeff_k3 = new VarDouble("dist_coeff_k3", 0.0); + + settings->addChild(focal_length_x); + settings->addChild(focal_length_y); + settings->addChild(principal_point_x); + settings->addChild(principal_point_y); + settings->addChild(dist_coeff_k1); + settings->addChild(dist_coeff_k2); + settings->addChild(dist_coeff_p1); + settings->addChild(dist_coeff_p2); + settings->addChild(dist_coeff_k3); + + connect(focal_length_x, SIGNAL(hasChanged(VarType*)), this, SLOT(updateCameraMat())); + connect(focal_length_y, SIGNAL(hasChanged(VarType*)), this, SLOT(updateCameraMat())); + connect(principal_point_x, SIGNAL(hasChanged(VarType*)), this, SLOT(updateCameraMat())); + connect(principal_point_y, SIGNAL(hasChanged(VarType*)), this, SLOT(updateCameraMat())); + + connect(dist_coeff_k1, SIGNAL(hasChanged(VarType*)), this, SLOT(updateDistCoeffs())); + connect(dist_coeff_k2, SIGNAL(hasChanged(VarType*)), this, SLOT(updateDistCoeffs())); + connect(dist_coeff_p1, SIGNAL(hasChanged(VarType*)), this, SLOT(updateDistCoeffs())); + connect(dist_coeff_p2, SIGNAL(hasChanged(VarType*)), this, SLOT(updateDistCoeffs())); + connect(dist_coeff_k3, SIGNAL(hasChanged(VarType*)), this, SLOT(updateDistCoeffs())); + + camera_mat = cv::Mat::eye(3, 3, CV_64FC1); + dist_coeffs = cv::Mat(5, 1, CV_64FC1, cv::Scalar(0)); + updateCameraMat(); + updateDistCoeffs(); +} + +CameraIntrinsicParameters::~CameraIntrinsicParameters() { + delete focal_length_x; + delete focal_length_y; + delete principal_point_x; + delete principal_point_y; + delete dist_coeff_k1; + delete dist_coeff_k2; + delete dist_coeff_p1; + delete dist_coeff_p2; + delete dist_coeff_k3; + delete settings; +} + +void CameraIntrinsicParameters::updateCameraMat() { + camera_mat.at(0, 0) = focal_length_x->getDouble(); + camera_mat.at(1, 1) = focal_length_y->getDouble(); + camera_mat.at(0, 2) = principal_point_x->getDouble(); + camera_mat.at(1, 2) = principal_point_y->getDouble(); + + camera_mat_inv = camera_mat.inv(); +} + +void CameraIntrinsicParameters::updateDistCoeffs() { + dist_coeffs.at(0, 0) = dist_coeff_k1->getDouble(); + dist_coeffs.at(1, 0) = dist_coeff_k2->getDouble(); + dist_coeffs.at(2, 0) = dist_coeff_p1->getDouble(); + dist_coeffs.at(3, 0) = dist_coeff_p2->getDouble(); + dist_coeffs.at(4, 0) = dist_coeff_k3->getDouble(); +} + +void CameraIntrinsicParameters::updateConfigValues() const { + cv::Mat new_camera_mat(camera_mat); + focal_length_x->setDouble(new_camera_mat.at(0, 0)); + focal_length_y->setDouble(new_camera_mat.at(1, 1)); + principal_point_x->setDouble(new_camera_mat.at(0, 2)); + principal_point_y->setDouble(new_camera_mat.at(1, 2)); + + cv::Mat new_dist_coeffs(dist_coeffs); + dist_coeff_k1->setDouble(new_dist_coeffs.at(0, 0)); + dist_coeff_k2->setDouble(new_dist_coeffs.at(1, 0)); + dist_coeff_p1->setDouble(new_dist_coeffs.at(2, 0)); + dist_coeff_p2->setDouble(new_dist_coeffs.at(3, 0)); + dist_coeff_k3->setDouble(new_dist_coeffs.at(4, 0)); +} + +void CameraIntrinsicParameters::reset() { + focal_length_x->resetToDefault(); + focal_length_y->resetToDefault(); + principal_point_x->resetToDefault(); + principal_point_y->resetToDefault(); + updateCameraMat(); + + dist_coeff_k1->resetToDefault(); + dist_coeff_k2->resetToDefault(); + dist_coeff_p1->resetToDefault(); + dist_coeff_p2->resetToDefault(); + dist_coeff_k3->resetToDefault(); + updateDistCoeffs(); +} + +CameraExtrinsicParameters::CameraExtrinsicParameters() { + settings = new VarList("Extrinsic Parameters"); + rvec_x = new VarDouble("rvec x", 0.0); + rvec_y = new VarDouble("rvec y", 0.0); + rvec_z = new VarDouble("rvec z", 0.0); + tvec_x = new VarDouble("tvec x", 0.0); + tvec_y = new VarDouble("tvec y", 0.0); + tvec_z = new VarDouble("tvec z", 0.0); + + settings->addChild(rvec_x); + settings->addChild(rvec_y); + settings->addChild(rvec_z); + settings->addChild(tvec_x); + settings->addChild(tvec_y); + settings->addChild(tvec_z); + + connect(rvec_x, SIGNAL(hasChanged(VarType*)), this, SLOT(updateRVec())); + connect(rvec_y, SIGNAL(hasChanged(VarType*)), this, SLOT(updateRVec())); + connect(rvec_z, SIGNAL(hasChanged(VarType*)), this, SLOT(updateRVec())); + connect(tvec_x, SIGNAL(hasChanged(VarType*)), this, SLOT(updateTVec())); + connect(tvec_y, SIGNAL(hasChanged(VarType*)), this, SLOT(updateTVec())); + connect(tvec_z, SIGNAL(hasChanged(VarType*)), this, SLOT(updateTVec())); + + rvec = cv::Mat(3, 1, CV_64FC1, cv::Scalar(0)); + tvec = cv::Mat(3, 1, CV_64FC1, cv::Scalar(0)); + rotation_mat_inv = cv::Mat(3, 3, CV_64FC1, cv::Scalar(0)); + right_side_mat = cv::Mat(3, 1, CV_64FC1, cv::Scalar(0)); +} + +CameraExtrinsicParameters::~CameraExtrinsicParameters() { + delete rvec_x; + delete rvec_y; + delete rvec_z; + delete tvec_x; + delete tvec_y; + delete tvec_z; + delete settings; +} + +void CameraExtrinsicParameters::updateTVec() { + tvec.at(0, 0) = tvec_x->getDouble(); + tvec.at(0, 1) = tvec_y->getDouble(); + tvec.at(0, 2) = tvec_z->getDouble(); + + right_side_mat = rotation_mat_inv * tvec; +} + +void CameraExtrinsicParameters::updateRVec() { + rvec.at(0, 0) = rvec_x->getDouble(); + rvec.at(0, 1) = rvec_y->getDouble(); + rvec.at(0, 2) = rvec_z->getDouble(); + + cv::Mat rotation_mat(3, 3, cv::DataType::type); + cv::Rodrigues(rvec, rotation_mat); + rotation_mat_inv = rotation_mat.inv(); + right_side_mat = rotation_mat_inv * tvec; +} + +void CameraExtrinsicParameters::updateConfigValues() { + cv::Mat new_tvec = tvec.clone(); + tvec_x->setDouble(new_tvec.at(0, 0)); + tvec_y->setDouble(new_tvec.at(0, 1)); + tvec_z->setDouble(new_tvec.at(0, 2)); + + cv::Mat new_rvec = rvec.clone(); + rvec_x->setDouble(new_rvec.at(0, 0)); + rvec_y->setDouble(new_rvec.at(0, 1)); + rvec_z->setDouble(new_rvec.at(0, 2)); +} + +void CameraExtrinsicParameters::reset() { + tvec_x->resetToDefault(); + tvec_y->resetToDefault(); + tvec_z->resetToDefault(); + updateTVec(); + + rvec_x->resetToDefault(); + rvec_y->resetToDefault(); + rvec_z->resetToDefault(); + updateRVec(); +} diff --git a/src/shared/util/camera_parameters.h b/src/shared/util/camera_parameters.h new file mode 100644 index 00000000..c1d81f6c --- /dev/null +++ b/src/shared/util/camera_parameters.h @@ -0,0 +1,74 @@ +#pragma once + +#include + +#include +#include + +using namespace VarTypes; + +class CameraIntrinsicParameters : public QObject { + Q_OBJECT + + public: + CameraIntrinsicParameters(); + ~CameraIntrinsicParameters() override; + + void updateConfigValues() const; + void reset(); + + VarList *settings; + + cv::Mat camera_mat; + cv::Mat dist_coeffs; + + // derived matrices for faster computation + cv::Mat camera_mat_inv; + + VarDouble *focal_length_x; + VarDouble *focal_length_y; + VarDouble *principal_point_x; + VarDouble *principal_point_y; + + VarDouble *dist_coeff_k1; + VarDouble *dist_coeff_k2; + VarDouble *dist_coeff_p1; + VarDouble *dist_coeff_p2; + VarDouble *dist_coeff_k3; + public slots: + void updateCameraMat(); + void updateDistCoeffs(); +}; + +class CameraExtrinsicParameters : public QObject { + Q_OBJECT + + private: + VarDouble *rvec_x; + VarDouble *rvec_y; + VarDouble *rvec_z; + + VarDouble *tvec_x; + VarDouble *tvec_y; + VarDouble *tvec_z; + + public: + CameraExtrinsicParameters(); + ~CameraExtrinsicParameters() override; + + void updateConfigValues(); + void reset(); + + VarList *settings; + + cv::Mat rvec; + cv::Mat tvec; + + // derived matrices for faster computation + cv::Mat rotation_mat_inv; + cv::Mat right_side_mat; + + public slots: + void updateRVec(); + void updateTVec(); +}; diff --git a/src/shared/util/conversions_greyscale.cpp b/src/shared/util/conversions_greyscale.cpp new file mode 100644 index 00000000..b8bcf6e3 --- /dev/null +++ b/src/shared/util/conversions_greyscale.cpp @@ -0,0 +1,74 @@ +#include "conversions_greyscale.h" + +void ConversionsGreyscale::cvColor2Grey(const RawImage &src, Image *dst) { + // Allocate image if not already allocated. Allocate method will do + // nothing if data already allocated for correct width/height. + dst->allocate(src.getWidth(), src.getHeight()); + + switch (src.getColorFormat()) { + case COLOR_RGB8: + cvColor2Grey(src, CV_8UC3, dst, cv::COLOR_RGB2GRAY); + break; + case COLOR_RGBA8: + cvColor2Grey(src, CV_8UC4, dst, cv::COLOR_RGBA2GRAY); + break; + case COLOR_YUV422_UYVY: + cvColor2Grey(src, CV_8UC2, dst, cv::COLOR_YUV2GRAY_UYVY); + break; + case COLOR_YUV422_YUYV: + cvColor2Grey(src, CV_8UC2, dst, cv::COLOR_YUV2GRAY_YUYV); + break; + case COLOR_RGB16: + cvColor2Grey(src, CV_16UC3, dst, cv::COLOR_RGB2GRAY); + break; + case COLOR_RAW16: + [[fallthrough]]; + case COLOR_MONO16: + cv16bit2_8bit(src, dst); + break; + case COLOR_MONO8: + [[fallthrough]]; + case COLOR_RAW8: + // already in the correct format + copyData(src, dst); + break; + case COLOR_YUV411: + [[fallthrough]]; + case COLOR_YUV444: + [[fallthrough]]; + default: + manualColor2Grey(src, dst); + break; + } +} + +void ConversionsGreyscale::cvColor2Grey(const RawImage &src, const int src_data_format, Image *dst, + const cv::ColorConversionCodes conversion_code) { + cv::Mat srcMat(src.getWidth(), src.getHeight(), src_data_format, src.getData()); + cv::Mat dstMat(dst->getWidth(), dst->getHeight(), CV_8UC1, dst->getData()); + cv::cvtColor(srcMat, dstMat, conversion_code); +} + +void ConversionsGreyscale::cv16bit2_8bit(const RawImage &src, Image *dst) { + cv::Mat srcMat(src.getWidth(), src.getHeight(), CV_16UC1, src.getData()); + cv::Mat dstMat(dst->getWidth(), dst->getHeight(), CV_8UC1, dst->getData()); + // convertTo drops higher bits. Need to rescale 16 bit values to + // 8bit range. Should have a scale factor of 1/256. See + // https://stackoverflow.com/a/10420743 + srcMat.convertTo(dstMat, CV_8U, 0.00390625); +} + +void ConversionsGreyscale::manualColor2Grey(const RawImage &src, Image *dst) { + for (int i = 0; i < src.getWidth(); ++i) { + for (int j = 0; j < src.getHeight(); ++j) { + const auto pixel = src.getRgb(i, j); + *(dst->getPixelPointer(i, j)) = (pixel.r + pixel.g + pixel.b) / 3; + } + } +} + +void ConversionsGreyscale::copyData(const RawImage &src, Image *dst) { + cv::Mat srcMat(src.getWidth(), src.getHeight(), CV_8UC1, src.getData()); + cv::Mat dstMat(dst->getWidth(), dst->getHeight(), CV_8UC1, dst->getData()); + srcMat.copyTo(dstMat); +} diff --git a/src/shared/util/conversions_greyscale.h b/src/shared/util/conversions_greyscale.h new file mode 100644 index 00000000..f4d98f3d --- /dev/null +++ b/src/shared/util/conversions_greyscale.h @@ -0,0 +1,19 @@ +#pragma once + +#include + +#include + +#include "colors.h" +#include "rawimage.h" +#include "util.h" + +class ConversionsGreyscale { + public: + static void cvColor2Grey(const RawImage& src, Image* dst); + static void cvColor2Grey(const RawImage& src, int src_data_format, Image* dst, + cv::ColorConversionCodes conversion_code); + static void cv16bit2_8bit(const RawImage& src, Image* dst); + static void manualColor2Grey(const RawImage& src, Image* dst); + static void copyData(const RawImage& src, Image* dst); +}; diff --git a/src/shared/util/image.h b/src/shared/util/image.h index 08565f59..96865bc8 100644 --- a/src/shared/util/image.h +++ b/src/shared/util/image.h @@ -209,6 +209,13 @@ class Image : public ImageInterface void drawLine (int x0, int y0, int x1, int y1 , PIXEL val) { + if(abs((long) x0) > 50000L || + abs((long) y0) > 50000L || + abs((long) x1) > 50000L || + abs((long) y1) > 50000L) { + // sanity check to avoid endless or very long loops + return; + } int x, y, dx, dy, sx, sy, ax, ay, decy, decx; x = x0; @@ -263,6 +270,13 @@ class Image : public ImageInterface void drawFatLine (int x0, int y0, int x1, int y1 , PIXEL val) { + if(abs((long) x0) > 50000L || + abs((long) y0) > 50000L || + abs((long) x1) > 50000L || + abs((long) y1) > 50000L) { + // sanity check to avoid endless or very long loops + return; + } int x, y, dx, dy, sx, sy, ax, ay, decy, decx; x = x0; diff --git a/test-data/intrinsic_calibration/.keep b/test-data/intrinsic_calibration/.keep new file mode 100644 index 00000000..e69de29b