Commit 8208ce05 authored by oscar's avatar oscar

提交更新

parent 74fa341d
Pipeline #1042 canceled with stages
out/
.vs/
cmake_minimum_required(VERSION 3.0.2)
project(jfx_vision)
## Compile as C++11, supported in ROS Kinetic and newer
add_compile_options(-std=c++11)
set(CMAKE_BUILD_TYPE "Release")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
add_subdirectory(yolov5)
add_subdirectory(track)
find_package(OpenCV 3.2.0 REQUIRED)
find_package(CUDA REQUIRED)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
message("embed_platform on")
include_directories(/usr/local/cuda/targets/aarch64-linux/include)
link_directories(/usr/local/cuda/targets/aarch64-linux/lib)
else()
message("embed_platform off")
include_directories(/usr/local/cuda/include /host/home/sata1/hs_data/code/RunOnTensorRT/trt_soft/TensorRT-7.0.0.11/include )
link_directories(/usr/local/cuda/lib64 /host/home/sata1/hs_data/code/RunOnTensorRT/trt_soft/TensorRT-7.0.0.11/lib )
endif()
set(JFX_YOLOV5_PATH yolov5)
set(JFX_COMMON_LIBS_PATH ${PROJECT_SOURCE_DIR}/../jfx_common_libs)
include_directories(${JFX_YOLOV5_PATH})
include_directories(${JFX_YOLOV5_PATH}/modules)
include_directories(${JFX_COMMON_LIBS_PATH}/utils)
include_directories(${JFX_COMMON_LIBS_PATH}/coordinate)
include_directories(${JFX_COMMON_LIBS_PATH}/tracker)
include_directories(${JFX_COMMON_LIBS_PATH}/video_capture)
include_directories(${JFX_COMMON_LIBS_PATH})
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
camera_info_manager
cv_bridge
dynamic_reconfigure
image_transport
roscpp
rospy
sensor_msgs
std_srvs
jfx_common_msgs
)
catkin_package(
CATKIN_DEPENDS
camera_info_manager
cv_bridge
dynamic_reconfigure
image_transport
roscpp
rospy
sensor_msgs
std_srvs
DEPENDS
OpenCV
jfx_common_msgs
)
###########
## Build ##
###########
include_directories(
/usr/local/include
src
${catkin_INCLUDE_DIRS}
)
link_directories(/usr/local/lib)
link_directories(/usr/local/cuda/lib64)
link_directories(/usr/lib/aarch64-linux-gnu)
link_directories(/usr/lib/aarch64-linux-gnu/tegra)
file(GLOB_RECURSE COORDINATE_SRC_FILES
${PROJECT_SOURCE_DIR}/../jfx_common_libs/coordinate/*.cpp
)
file(GLOB_RECURSE TRACKER_SRC_FILES
${PROJECT_SOURCE_DIR}/../jfx_common_libs/tracker/*.cpp
)
add_executable(${PROJECT_NAME}_node
src/vision_node.cpp
${COORDINATE_SRC_FILES}
${TRACKER_SRC_FILES}
)
target_link_libraries(${PROJECT_NAME}_node
nvinfer
cudart
yolov5plugins
${catkin_LIBRARIES}
${OpenCV_LIBS}
yaml-cpp
)
add_executable(cv_test
src/test.cpp
${COORDINATE_SRC_FILES}
${TRACKER_SRC_FILES}
)
target_link_libraries(cv_test
${catkin_LIBRARIES}
${OpenCV_LIBS}
yaml-cpp
)
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# catkin_install_python(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
# install(TARGETS ${PROJECT_NAME}_node
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark libraries for installation
## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
# install(TARGETS ${PROJECT_NAME}
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_jfx_vision.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)
<launch>
<arg name="vision_config_path" default="$(find jfx_rvconfig)/param/vision_config.yaml" />
<node pkg="jfx_vision" type="jfx_vision_node" name="jfx_vision_node" output="screen" respawn="true" >
<param name="vision_config_path" value="$(arg vision_config_path)" />
</node>
</launch>
<?xml version="1.0"?>
<package format="2">
<name>jfx_vision</name>
<version>1.0.0</version>
<description>The jfx_vision package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<maintainer email="liguoqing@juefx.com">liguoqing</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/jfx_vision</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<!-- build dependencies -->
<build_depend>std_srvs</build_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>cv_bridge</build_depend>
<build_depend>dynamic_reconfigure</build_depend>
<build_depend>image_transport</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>camera_info_manager</build_depend>
<build_depend>message_filters</build_depend>
<build_depend>jfx_common_msgs</build_depend>
<!-- runtime dependencies -->
<exec_depend>std_srvs</exec_depend>
<exec_depend>sensor_msgs</exec_depend>
<exec_depend>image_transport</exec_depend>
<exec_depend>dynamic_reconfigure</exec_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>cv_bridge</exec_depend>
<exec_depend>camera_info_manager</exec_depend>
<exec_depend>message_filters</exec_depend>
<exec_depend>jfx_common_msgs</exec_depend>
</package>
This diff is collapsed.
#pragma once
#include "config.hpp"
#include "utils/single_ton.hpp"
#include "utils/spin_mutex.h"
#include "utils/utils.h"
#include "vision_config.h"
#include <map>
#include <string.h>
#include <string>
#include <vector>
using namespace std;
using namespace jfx;
using namespace jfx_vision;
#pragma pack(1)
typedef struct _tagSTFrame {
uint64_t batchIdx = 0;
uint64_t cameraIdx = 0;
uint64_t ts = 0;
int width = 0;
int height = 0;
uint16_t len = 0;
} STFrame;
typedef struct _tagSTObject {
int32_t type = 0;
int32_t x = 0;
int32_t y = 0;
int32_t width = 0;
int32_t height = 0;
float prob = 0;
} STObject;
#pragma pack()
class FrameSerialize : public Singleton<FrameSerialize> {
public:
FrameSerialize()
{
mFilePath = "/workspace/framedata";
mPCueFile = nullptr;
mCurFileSize = 0;
};
public:
void open()
{
}
void close()
{
if (mPCueFile) {
fclose(mPCueFile);
mPCueFile = nullptr;
mCurFileSize = 0;
}
}
void save(std::map<int, Results*>& result, std::map<int, uint64_t>& tm)
{
if (!mIfSaveFrameData) {
return;
}
std::lock_guard<SpinMutex> lock(mFileWLock);
std::map<int, Results*>::iterator itr = result.begin();
for (; itr != result.end(); ++itr) {
saveOneFrame(itr->second, itr->first, tm[itr->first]);
}
mBatchIdx++;
}
protected:
int saveOneFrame(Results* r, int idx, uint64_t ts)
{
string buf(102400, 0);
const char* bufPtr = buf.c_str();
const char* pos = bufPtr;
STFrame* pFrame = (STFrame*)bufPtr;
pFrame->batchIdx = mBatchIdx;
pFrame->cameraIdx = idx;
pFrame->ts = ts;
pFrame->width = mVisionConfig.cameraParamConfigs_map[idx].roiRect.width;
pFrame->height = mVisionConfig.cameraParamConfigs_map[idx].roiRect.height;
pFrame->len = r->results.size() * sizeof(STObject);
pos += sizeof(STFrame);
std::vector<MyResult>::iterator itr = r->results.begin();
for (; itr != r->results.end(); ++itr) {
MyResult& obj = *itr;
STObject* pObj = (STObject*)pos;
pObj->type = obj.num_class;
pObj->x = obj.location[0];
pObj->y = obj.location[1];
pObj->width = obj.location[2] - obj.location[0];
pObj->height = obj.location[3] - obj.location[1];
pObj->prob = obj.prob;
pos += sizeof(STObject);
}
int len = pos - bufPtr;
saveData(bufPtr, len, "frame3");
return 0;
}
int saveData(const char* pData, int len, const char* type)
{
if (!pData || len <= 0) {
return -1;
}
if (mCurFileSize > 20 * 1024 * 1024) {
if (mPCueFile) {
fclose(mPCueFile);
mPCueFile = nullptr;
mCurFileSize = 0;
}
}
if (!mPCueFile) {
mPCueFile = createFile(type);
}
writeFile(pData, len, 'v');
return 0;
}
FILE* createFile(const char* type)
{
char szFileName[256] = { 0 };
sprintf(szFileName, "%s/%s_%ld.dat", mFilePath.c_str(), type, mills_UTC());
FILE* pFile = fopen(szFileName, "wb");
return pFile;
}
void writeFile(const char* pData, int len, uint8_t type)
{
if (mPCueFile) {
uint64_t tCur = mills_UTC();
const char* tag = "~z";
fwrite(tag, strlen(tag), 1, mPCueFile);
fwrite(&type, sizeof(type), 1, mPCueFile);
fwrite(&tCur, sizeof(tCur), 1, mPCueFile);
fwrite(&len, sizeof(len), 1, mPCueFile);
fwrite(pData, len, 1, mPCueFile);
mCurFileSize += strlen(tag) + sizeof(len) + len;
}
}
public:
// 文件路径
string mFilePath;
// 当前文件句柄
FILE* mPCueFile;
// 当前文件写位置
uint64_t mCurFileSize;
// 批次
uint64_t mBatchIdx = 0;
SpinMutex mFileWLock;
//
bool mIfSaveFrameData = true;
VisionConfig mVisionConfig;
};
#pragma once
#include "ros_image_sync_def.h"
#include "detector.h"
#include "vision_config.h"
#include <chrono>
#include <limits.h>
#include <mutex>
#include <string.h>
#include <yaml-cpp/yaml.h>
using namespace std;
using namespace std::chrono;
namespace jfx_vision {
using namespace message_filters;
using namespace message_filters::sync_policies;
typedef struct _tagSTFrame {
int camIdx = 0;
uint64_t ts = 0;
cv::Mat mat;
} STFrame;
template <typename M0>
class ImageAsyncSubscriber {
typedef Subscriber<M0> ImageSubscriber;
typedef boost::shared_ptr<ImageSubscriber> ImageSubscriberPtr;
public:
ImageAsyncSubscriber(ros::NodeHandle& nh, int cam_size)
{
init(nh, cam_size);
};
~ImageAsyncSubscriber() {};
public:
int init(ros::NodeHandle& nh, int cam_size)
{
this->cam_size = cam_size;
this->frame_cnt = 0;
ROS_INFO("ImageAsyncSubscriber CONSTR: cam_size %d", cam_size);
this->drop_rate = g_visionConfig->globalConf.drop_rate;
ROS_INFO("ImageAsyncSubscriber Init YoloV5 model path:%s", g_visionConfig->globalConf.detect_model_path.c_str());
_detector.reset(new CVDetector(*g_visionConfig));
_detector->Init(nh, g_visionConfig->globalConf.detect_model_path, cam_size);
char szTopic[256] = { 0 };
std::map<int, CameraParamConfig>::iterator itr = g_visionConfig->cameraParamConfigs_map.begin();
for (; itr != g_visionConfig->cameraParamConfigs_map.end(); ++itr) {
if (strstr((char*)itr->second.url.c_str(), "usb:") != nullptr) {
// usb camera
sprintf(szTopic, "/camera_array/cam%d/image_raw", itr->second.idx);
} else {
// ip camera
sprintf(szTopic, "/cameras/%s/cam%d/image_raw", g_visionConfig->globalConf.node_name.c_str(), itr->second.idx);
}
ImageSubscriberPtr image(new ImageSubscriber(nh, szTopic, 10));
cam_image_subs.push_back(image);
image->registerCallback(boost::bind(&ImageAsyncSubscriber::asyncCallback, this, _1));
}
return 0;
}
void asyncCallback(const boost::shared_ptr<M0 const>& msg_img)
{
static uint64_t lastPostTS = ros::Time::now().toNSec() / 1000000;
static uint32_t lastSeq[16] = { 0 };
mtx.lock();
try {
STFrame frame;
frame.camIdx = atoi(msg_img->header.frame_id.c_str());
frame.ts = msg_img->header.stamp.toNSec() / 1000000;
frame.mat = cv_bridge::toCvShare(msg_img, "bgr8")->image.clone();
imgMap[frame.camIdx] = frame;
if (msg_img->header.seq != 0) {
if ((msg_img->header.seq - lastSeq[frame.camIdx]) > 1) {
ROS_WARN("lost image, cameraIdx=%d, ts=%ld, last seq=%d, cur seq=%d", frame.camIdx, frame.ts, msg_img->header.seq, lastSeq[frame.camIdx]);
}
lastSeq[frame.camIdx] = msg_img->header.seq;
if (msg_img->header.seq % 100 == 0) {
ROS_INFO("camera %d frame seq reach %d", frame.camIdx, msg_img->header.seq);
}
}
uint64_t curTS = ros::Time::now().toNSec() / 1000000;
if (this->cam_size <= imgMap.size() || (curTS - lastPostTS) >= 150) {
std::vector<STFrame> imgs;
std::map<uint32_t, STFrame>::iterator itr = imgMap.end();
for (int i = 0; i < this->cam_size; i++) {
itr = imgMap.find(i);
if (itr == imgMap.end()) {
STFrame tmpFrame;
tmpFrame.camIdx = i;
tmpFrame.ts = frame.ts;
int w = g_visionConfig->cameraParamConfigs_map.at(i).coordinate.intrinsics.width;
int h = g_visionConfig->cameraParamConfigs_map.at(i).coordinate.intrinsics.height;
cv::Mat zeroMat = cv::Mat::zeros(h, w, CV_8UC3);
tmpFrame.mat = zeroMat.clone();
imgs.push_back(tmpFrame);
ROS_INFO("push empty image of camera %d", i);
} else {
imgs.push_back(itr->second);
}
}
_detector->Push(imgs);
lastPostTS = curTS;
imgMap.clear();
}
} catch (...) {
ROS_INFO("asyncCallback catch exception");
}
mtx.unlock();
}
private:
int cam_size;
std::vector<ImageSubscriberPtr> cam_image_subs;
std::map<uint32_t, STFrame> imgMap;
std::mutex mtx;
ros::Publisher image_infer_pub;
uint32_t frame_cnt;
uint32_t drop_rate;
time_point<high_resolution_clock> star_clock;
typedef Detector<vector<STFrame>> CVDetector;
boost::shared_ptr<CVDetector> _detector;
};
}
#pragma once
#include "ros_image_sync_def.h"
#include <chrono>
#include <limits.h>
#include "vision_config.h"
#include "detector.h"
#include <yaml-cpp/yaml.h>
using namespace std;
using namespace std::chrono;
namespace jfx_vision {
using namespace message_filters;
using namespace message_filters::sync_policies;
typedef struct _tagSTFrame {
uint64_t ts = 0;
cv::Mat mat;
} STFrame;
template <typename M0>
class MultiCamImageSynchronizer {
typedef Subscriber<M0> ImageSubscriber;
typedef boost::shared_ptr<ImageSubscriber> ImageSubscriberPtr;
typedef Synchronizer<ApproximateTime<M0, M0>> TwoCamerasSynchronizer;
typedef boost::shared_ptr<TwoCamerasSynchronizer> TwoCamerasSynchronizerPtr;
typedef Synchronizer<ApproximateTime<M0, M0, M0>> ThreeCamerasSynchronizer;
typedef boost::shared_ptr<ThreeCamerasSynchronizer> ThreeCamerasSynchronizerPtr;
typedef Synchronizer<ApproximateTime<M0, M0, M0, M0>> FourCamerasSynchronizer;
typedef boost::shared_ptr<FourCamerasSynchronizer> FourCamerasSynchronizerPtr;
public:
MultiCamImageSynchronizer(ros::NodeHandle& nh, int cam_size)
{
// ros::NodeHandle nh("~");
// int cam_size;
// nh.getParam("cam_size", cam_size);
this->cam_size = cam_size;
this->frame_cnt = 0;
ROS_INFO("MultiCamImageSynchronizer CONSTR: cam_size %d", cam_size);
this->drop_rate = g_visionConfig->globalConf.drop_rate;
ROS_INFO("MultiCamImageSynchronizer Init YoloV5 model path:%s", g_visionConfig->globalConf.detect_model_path.c_str());
_detector.reset(new CVDetector(*g_visionConfig));
_detector->Init(nh, g_visionConfig->globalConf.detect_model_path, cam_size);
ImageSubscriberPtr image_0(new ImageSubscriber(nh, "/cameras/cam0/image_raw", 10));
ImageSubscriberPtr image_1(new ImageSubscriber(nh, "/cameras/cam1/image_raw", 10));
ImageSubscriberPtr image_2(new ImageSubscriber(nh, "/cameras/cam2/image_raw", 10));
ImageSubscriberPtr image_3(new ImageSubscriber(nh, "/cameras/cam3/image_raw", 10));
star_clock = high_resolution_clock::now();
switch (cam_size) {
case 1:
cam_image_subs.push_back(image_0);
image_0->registerCallback(boost::bind(&MultiCamImageSynchronizer::syncCallback1, this, _1));
break;
case 2:
cam_image_subs.push_back(image_0);
cam_image_subs.push_back(image_1);
images_sync_2.reset(new TwoCamerasSynchronizer(ApproximateTime<M0, M0>(2), *(image_0.get()), *(image_1.get())));
images_sync_2->registerCallback(boost::bind(&MultiCamImageSynchronizer::syncCallback2, this, _1, _2));
break;
case 3:
cam_image_subs.push_back(image_0);
cam_image_subs.push_back(image_1);
cam_image_subs.push_back(image_2);
images_sync_3.reset(new ThreeCamerasSynchronizer(ApproximateTime<M0, M0, M0>(3), *(image_0.get()), *(image_1.get()), *(image_2.get())));
images_sync_3->registerCallback(boost::bind(&MultiCamImageSynchronizer::syncCallback3, this, _1, _2, _3));
break;
case 4:
cam_image_subs.push_back(image_0);
cam_image_subs.push_back(image_1);
cam_image_subs.push_back(image_2);
cam_image_subs.push_back(image_3);
images_sync_4.reset(new FourCamerasSynchronizer(ApproximateTime<M0, M0, M0, M0>(4), *(image_0.get()), *(image_1.get()), *(image_2.get()), *(image_3.get())));
images_sync_4->registerCallback(boost::bind(&MultiCamImageSynchronizer::syncCallback4, this, _1, _2, _3, _4));
break;
}
};
~MultiCamImageSynchronizer() {};
private:
int cam_size;
std::vector<ImageSubscriberPtr> cam_image_subs;
ros::Publisher image_infer_pub;
TwoCamerasSynchronizerPtr images_sync_2;
ThreeCamerasSynchronizerPtr images_sync_3;
FourCamerasSynchronizerPtr images_sync_4;
uint32_t frame_cnt;
uint32_t drop_rate;
time_point<high_resolution_clock> star_clock;
typedef Detector<vector<STFrame>> CVDetector;
boost::shared_ptr<CVDetector> _detector;
void syncCallback1(const boost::shared_ptr<M0 const>& msg_img)
{
ROS_INFO("syncCallback1");
}
void syncCallback2(const boost::shared_ptr<M0 const>& msg_img0, const boost::shared_ptr<M0 const>& msg_img1)
{
frame_cnt++;
auto fram_time = high_resolution_clock::now();
auto millis = duration_cast<std::chrono::milliseconds>(fram_time - star_clock).count();
float fps = 1000.0 / millis * frame_cnt;
if (frame_cnt == INT_MAX) {
frame_cnt = 0;
star_clock = high_resolution_clock::now();
}
// yolov5 infer
std::vector<STFrame> imgs;
STFrame frm0, frm1;
frm0.ts = msg_img0->header.stamp.toNSec() / 1000000;
frm1.ts = msg_img1->header.stamp.toNSec() / 1000000;
frm0.mat = cv_bridge::toCvShare(msg_img0, "bgr8")->image;
frm1.mat = cv_bridge::toCvShare(msg_img1, "bgr8")->image;
imgs.push_back(frm0);
imgs.push_back(frm1);
_detector->Push(imgs);
}
void syncCallback3(const boost::shared_ptr<M0 const>& msg_img0, const boost::shared_ptr<M0 const>& msg_img1,
const boost::shared_ptr<M0 const>& msg_img2)
{
ROS_INFO("syncCallback3");
}
void syncCallback4(const boost::shared_ptr<M0 const>& msg_img0, const boost::shared_ptr<M0 const>& msg_img1,
const boost::shared_ptr<M0 const>& msg_img2, const boost::shared_ptr<M0 const>& msg_img3)
{
frame_cnt++;
auto fram_time = high_resolution_clock::now();
auto millis = duration_cast<std::chrono::milliseconds>(fram_time - star_clock).count();
float fps = 1000.0 / millis * frame_cnt;
// ROS_INFO("sync FPS:%.1f", fps);
if (frame_cnt == INT_MAX) {
frame_cnt = 0;
star_clock = high_resolution_clock::now();
}
//if ((drop_rate != 0) && (frame_cnt % drop_rate != 1)) {
// return;
//}
// yolov5 infer
std::vector<STFrame> imgs;
STFrame frm0, frm1, frm2, frm3;
frm0.ts = msg_img0->header.stamp.toNSec() / 1000000;
frm1.ts = msg_img1->header.stamp.toNSec() / 1000000;
frm2.ts = msg_img2->header.stamp.toNSec() / 1000000;
frm3.ts = msg_img3->header.stamp.toNSec() / 1000000;
frm0.mat = cv_bridge::toCvShare(msg_img0, "bgr8")->image;
frm1.mat = cv_bridge::toCvShare(msg_img1, "bgr8")->image;
frm2.mat = cv_bridge::toCvShare(msg_img2, "bgr8")->image;
frm3.mat = cv_bridge::toCvShare(msg_img3, "bgr8")->image;
imgs.push_back(frm0);
imgs.push_back(frm1);
imgs.push_back(frm2);
imgs.push_back(frm3);
_detector->Push(imgs);
}
};
}
#pragma
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <message_filters/sync_policies/approximate_time.h>
#include <message_filters/synchronizer.h>
#include <message_filters/subscriber.h>
#include <sensor_msgs/Image.h>
#include <sensor_msgs/CompressedImage.h>
#include <cv_bridge/cv_bridge.h>
#include "config.hpp"
#include "transform_uv.h"
#include "coordinate_convert.h"
#include "vision_config.h"
using namespace jfx_vision;
std::unique_ptr<VisionConfig> jfx_vision::g_visionConfig;
void initTransUV(VisionConfig& cvConfig, jfx::TransformUV& uv, int cameraIndex)
{
std::vector<double> f_c_x_y;
std::vector<float> transform_M;
std::vector<double> dist;
f_c_x_y.emplace_back(cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.intrinsics.fx);
f_c_x_y.emplace_back(cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.intrinsics.cx);
f_c_x_y.emplace_back(cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.intrinsics.fy);
f_c_x_y.emplace_back(cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.intrinsics.cy);
transform_M = cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.transform_M;
dist = cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate.intrinsics.dist;
printf("fx,cx,fy,cy:%.12f,%.12f,%.12f,%.12f \n",f_c_x_y[0],f_c_x_y[1],f_c_x_y[2],f_c_x_y[3] );
for(int i=0; i<dist.size(); i++) {
printf("%.12f,",dist[i]);
}
uv.Init(f_c_x_y, transform_M, dist);
}
void initCoordCVT(VisionConfig& cvConfig, CoordinateConvert& cvt, int cameraIndex)
{
auto coord = cvConfig.cameraParamConfigs_map.at(cameraIndex).coordinate;
printf("\nhorn_r,rx,ry,horn_v,vx,vy %.12f, %.8f,%.8f,%.12f,%.8f,%.8f\n",coord.horn_r, coord.rx, coord.ry, coord.horn_v, coord.vx, coord.vy);
cvt.Init(coord.horn_r, coord.rx, coord.ry, coord.horn_v, coord.vx, coord.vy);
}
int main() {
std::string config_yaml = "/data/catkin_ws_xishan/src/rosrv2x/jfx_rvconfig/param/vision_config.yaml";
g_visionConfig.reset(new VisionConfig(config_yaml));
// b_center=1566 380 rect=1515 298 1617 380 result=2.76558304,106.13797760 juefx_lat=31.58605464 juefx_lon=120.41351219, crossname=50272, taskid=xianfengchunfengn, Hit_Streak=9 Coast_Cycles=0
// cam:idx2 :center=1566 380 Trans X=2.80015530 Y=106.79021222 Lat=31.58604875,Lon=120.41351181
jfx::TransformUV transUV;
CoordinateConvert coordinatCvt;
//for(int i=0;i<4;i++) {
initTransUV(*g_visionConfig, transUV, 2);
initCoordCVT(*g_visionConfig, coordinatCvt, 2);
vector<float> transf_M = g_visionConfig->cameraParamConfigs_map.at(2).coordinate.transform_M;
for(int i=0; i< transf_M.size();i++) {
printf("%.12f,", transf_M[i]);
}
vector<float> r;
int lx = 1515;
int ly = 298;
int rx = 1617;
int ry = 380;
int bcenter_x = (lx + rx)/ 2;
int bcenter_y = ry;
int ret = transUV.Process(bcenter_x, bcenter_y, r);
if (ret == 0) {
float m_NowX = r[0];
float m_NowY = r[1];
jfx::Array result84;
jfx::Array objPoint = { r[0], r[1] };
coordinatCvt.Dev2WGS84(objPoint, result84, false);
double m_Lat = result84[0];
double m_Lon = result84[1];
printf("\ncenter=%d %d Trans X=%.8f Y=%.8f Lat=%.8f,Lon=%.8f\n", bcenter_x, bcenter_y, m_NowX, m_NowY, m_Lat, m_Lon);
}
//}
}
This diff is collapsed.
#include <ros/ros.h>
#include <ros/package.h>
#include "imageAsyncSubscriber.h"
#include <yaml-cpp/yaml.h>
#include <stdio.h>
#include "vision_config.h"
using namespace jfx_vision;
std::unique_ptr<VisionConfig> jfx_vision::g_visionConfig;
int main(int argc, char*argv[]) {
ROS_INFO("START JFX_VISION_NODE");
ros::init(argc, argv, "jfx_vision");
ros::NodeHandle nh("~");
string pkgName = "jfx_rvconfig";
string pkgPath = ros::package::getPath(pkgName);
std::string config_yaml = pkgPath + "/param/vision_config.yaml";
nh.param<std::string>("vision_config_path", config_yaml, pkgPath + "/param/vision_config.yaml");
ROS_INFO("use config file: %s", config_yaml.c_str());
g_visionConfig.reset(new VisionConfig(config_yaml));
int cam_size = g_visionConfig->globalConf.camera_size;
ROS_INFO("cam_size:%d", cam_size);
ImageAsyncSubscriber<sensor_msgs::Image> sync(nh, cam_size);
ros::spin();
return 0;
}
cmake_minimum_required(VERSION 3.0.2)
project(track)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
#!/bin/bash
read_dir(){
for file in `ls -a $1`
do
if [ -d $1"/"$file ]
then
if [[ $file != '.' && $file != '..' ]]
then
read_dir $1"/"$file
fi
else
fn=$1"/"$file
#echo $fn
if [ "${fn##*.}"x = "py"x ]
then
echo $fn
vim +':w ++ff=unix' +':q' "$fn"
fi
fi
done
}
read_dir scripts
sudo apt-get install ros-melodic-jsk-recognition-msgs && sudo apt-get install ros-melodic-jsk-rviz-plugins
\ No newline at end of file
glob2
pillow
scikit-learn
scikit-image
scikit-video
matplotlib
filterpy==1.4.5
setuptools
rosdep
rosinstall_generator
wstool
rosinstall
six
vcstools
empy
pyquaternion
easydict
futures
\ No newline at end of file
# Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import numpy as np, copy
#from numba import jit
from scipy.spatial import ConvexHull
#@jit
def poly_area(x, y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
#@jit
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2))
b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2))
c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
return a * b * c
#@jit
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1, p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s): outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0: return None
return (outputList)
def iou3d(corners1, corners2):
''' Compute 3D bounding box IoU, only working for object parallel to ground
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
# inter_area = shapely_polygon_intersection(rect1, rect2)
_, inter_area = convex_hull_intersection(rect1, rect2)
# try:
# _, inter_area = convex_hull_intersection(rect1, rect2)
# except ValueError:
# inter_area = 0
# except scipy.spatial.qhull.QhullError:
# inter_area = 0
iou_2d = inter_area / (area1 + area2 - inter_area)
ymax = min(corners1[0, 1], corners2[0, 1])
ymin = max(corners1[4, 1], corners2[4, 1])
inter_vol = inter_area * max(0.0, ymax - ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
#@jit
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def convert_3dbox_to_8corner(bbox3d_input):
''' Takes an object's 3D box with the representation of [x,y,z,theta,l,w,h] and
convert it to the 8 corners of the 3D box
Returns:
corners_3d: (8,3) array in in rect camera coord
'''
# compute rotational matrix around yaw axis
bbox3d = copy.copy(bbox3d_input)
R = roty(bbox3d[3])
# 3d bounding box dimensions
l = bbox3d[4]
w = bbox3d[5]
h = bbox3d[6]
# 3d bounding box corners
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
# rotate and translate 3d bounding box
corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
# print corners_3d.shape
corners_3d[0, :] = corners_3d[0, :] + bbox3d[0]
corners_3d[1, :] = corners_3d[1, :] + bbox3d[1]
corners_3d[2, :] = corners_3d[2, :] + bbox3d[2]
return np.transpose(corners_3d)
import numpy as np
from juefx_iou import bev_overlap
from juefx_kalman import kalman_update
from juefx_kalman_batch import kalman_update_batch
def assert_type(x):
assert x.dtype == "float32"
def iou3d_gpu(boxes_a, boxes_b):
"""
boxes_a: N * 7
boxes_b: M * 7
[x,y,z,l,w,h,thetha]
"""
m, n = len(boxes_a), len(boxes_b)
bo = np.zeros((m, n)).astype(np.float32)
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).reshape(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).reshape(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).reshape(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).reshape(1, -1)
# bev overlap
max_of_min = np.maximum(boxes_a_height_min, boxes_b_height_min)
min_of_max = np.minimum(boxes_a_height_max, boxes_b_height_max)
overlaps_h = np.maximum(min_of_max - max_of_min, 0)
bev_overlap(m, boxes_a, n, boxes_b, bo)
o3d = bo * overlaps_h # 3 * 3
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).reshape(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).reshape(1, -1)
res = o3d / np.maximum(1e-6, vol_a + vol_b - o3d)
return res
def iou2d_gpu(boxes_a, boxes_b):
"""
boxes_a: N * 7
boxes_b: M * 7
[x,y,z,l,w,h,thetha]
"""
m, n = len(boxes_a), len(boxes_b)
bo = np.zeros((m, n)).astype(np.float32)
bev_overlap(m, boxes_a, n, boxes_b, bo)
area1 = (boxes_a[:, 3] * boxes_a[:, 4]).reshape(-1, 1)
area2 = (boxes_b[:, 3] * boxes_b[:, 4]).reshape(1, -1)
res = bo / np.maximum(1e-6, area1 + area2 - bo)
return res
def kalman_update_gpu(Z, H, R, X, P):
"""
ns = dim_x len of state
no = dim_z len of observation
Z: measurement (dim_z, 1)
H: measurement function (dim_z, dim_x)
E / R: measurement uncertainty / noise, named "R" in python, "E" in CUDA (dim_z, dim_z)
X: estimate (dim_x, 1)
P: uncertainty covariance (dim_x, dim_x)
MAKE SURE ALL INPUTS ARE FLOAT32!
"""
assert X.shape[0] == P.shape[0] == P.shape[1] == H.shape[1] # ns = dim_x
assert Z.shape[0] == R.shape[0] == R.shape[1] == H.shape[0] # no = dim_z
assert X.shape[1] == Z.shape[1] == 1
assert_type(Z)
assert_type(H)
assert_type(R)
assert_type(X)
assert_type(P)
ns = X.shape[0]
no = Z.shape[0]
kalman_update(Z, H, R, X, P, ns, no)
def kalman_update_batch_gpu(Zs, Xs, Ps, bs, ns, no):
"""
ns = dim_x = 10 len of state
no = dim_z = 7 len of observation
Zs: measurement (bs, 7)
Xs: estimate (bs, 10)
Ps: uncertainty covariance (bs, 100)
H: measurement function (dim_z, dim_x)
E / R: measurement uncertainty / noise, named "R" in python, "E" in CUDA (dim_z, dim_z)
MAKE SURE ALL INPUTS ARE FLOAT32!
"""
assert_type(Zs)
assert_type(Xs)
assert_type(Ps)
assert Zs.shape[0] == Ps.shape[0] == Xs.shape[0] == bs
assert Zs.shape[1] == no
assert Xs.shape[1] == ns
assert Ps.shape[1] == ns * ns
# can not direct use reshape or slicing, must as follows:
HXs = np.zeros((bs, no))
HXs = Xs[:, :7].copy()
kalman_update_batch(Zs, Xs, Ps, HXs, bs, ns, no)
#from __future__ import absolute_import, division
import numpy as np
from numpy import dot, zeros, eye, isscalar
from filterpy.common import reshape_z
from gpu_helpers import kalman_update_gpu, kalman_update_batch_gpu
class KalmanFilter_GPU(object):
def __init__(self, dim_x, dim_z, dim_u=0):
if dim_x < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_z must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_u must be 0 or greater')
self.dt = np.float32
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)).astype(self.dt) # state
self.P = eye(dim_x).astype(self.dt) # uncertainty covariance
self.Q = eye(dim_x).astype(self.dt) # process uncertainty
self.B = None # control transition matrix
self.F = eye(dim_x).astype(self.dt) # state transition matrix
self.F_blindspot = eye(dim_x).astype(self.dt)
self.H = zeros((dim_z, dim_x)).astype(self.dt) # measurement function
self.R = eye(dim_z).astype(self.dt) # measurement uncertainty
self._alpha_sq = 1. # fading memory control
self.M = np.zeros((dim_x, dim_z)).astype(self.dt) # process-measurement cross correlation
self.z = np.array([[None]*self.dim_z]).T.astype(self.dt)
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = np.zeros((dim_x, dim_z)).astype(self.dt) # kalman gain
self.y = zeros((dim_z, 1)).astype(self.dt)
self.S = np.zeros((dim_z, dim_z)).astype(self.dt) # system uncertainty
self.SI = np.zeros((dim_z, dim_z)).astype(self.dt) # inverse system uncertainty
def predict(self, blindSpot=None, u=None, B=None, F=None, Q=None):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, default 0
Optional control vector.
B : np.array(dim_x, dim_u), or None
Optional control transition matrix; a value of None
will cause the filter to use `self.B`.
F : np.array(dim_x, dim_x), or None
Optional state transition matrix; a value of None
will cause the filter to use `self.F`.
Q : np.array(dim_x, dim_x), scalar, or None
Optional process noise matrix; a value of None will cause the
filter to use `self.Q`.
"""
if B is None:
B = self.B
if F is None:
F = self.F
if Q is None:
Q = self.Q
elif isscalar(Q):
Q = eye(self.dim_x) * Q
# x = Fx + Bu
if blindSpot == None:
if B is not None and u is not None:
self.x = dot(F, self.x) + dot(B, u)
else:
self.x = dot(F, self.x)
# P = FPF' + Q
self.P = self._alpha_sq * dot(dot(F, self.P), F.T) + Q
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
elif blindSpot == True:
self.x = dot(self.F_blindspot, self.x)
# P = FPF' + Q
self.P = self._alpha_sq * dot(dot(self.F_blindspot, self.P), self.F_blindspot.T) + Q
# save prior
self.x_prior = self.x.copy()
self.P_prior = self.P.copy()
if self.x[3] >= np.pi: self.x[3] -= np.pi * 2
if self.x[3] < -np.pi: self.x[3] += np.pi * 2
def update(self, z, R=None, H=None):
"""
bs = 1
Zs: measurement (bs, no)
Xs: estimate (bs, ns)
Ps: uncertainty covariance (bs, ns * ns)
self.x : (ns, 1)
self.P : (ns, ns)
"""
bs = 1
ns = 10
no = 7
z = z.astype(self.dt)
self.x = self.x.astype(self.dt)
self.P = self.P.astype(self.dt)
Zs = z.reshape(bs, no)
Xs = self.x.reshape(bs, ns)
Ps = self.P.reshape(bs, ns * ns)
kalman_update_batch_gpu(Zs, Xs, Ps, bs, ns, no)
self.x = Xs.reshape(ns, 1)
self.P = Ps.reshape(ns, ns)
# Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import math
import numpy as np
from kalmanFilter_gpu_class import KalmanFilter_GPU
#from filterpy.kalman import KalmanFilter
from sklearn.decomposition import PCA
import time
NUM_FRAME = 5
DIST_THRED = 0.5
def center_rot_y_f(points):
x_list = points[:, 0]
y_list = points[:, 1]
pca = PCA(n_components=2)
pca.fit(points)
PCA(copy=True, n_components=2, whiten=False)
orientation = pca.components_[0]
dx = x_list[-1] - x_list[0]
dy = y_list[-1] - y_list[0]
alpha = np.dot(np.array([dx, dy]), orientation)
if alpha < 0:
orientation = -orientation
center_rot_y = math.atan2(orientation[1], orientation[0])
return center_rot_y
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, bbox3D, info):
"""
Initialises a tracker using initial bounding box.
"""
# define constant velocity model
self.kf = KalmanFilter_GPU(dim_x=10, dim_z=7)
self.kf.F = np.array([[1, 0, 0, 0, 0, 0, 0, 1, 0, 0], # state transition matrix
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.kf.H = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # measurement function,
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0]])
self.kf.F_blindspot = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0], # state transition matrix
[0, 1, 0, 0, 0, 0, 0, 0, 0.3, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0.],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
self.kf.P[:, :] *= 1000.
self.kf.P *= 10.
self.kf.Q[:, :] *= 0.01
self.kf.x[:7] = bbox3D.reshape((7, 1))
self.time_since_update = 0
self.id = KalmanBoxTracker.count
self.create_stamp = info[0]
KalmanBoxTracker.count += 1
self.history = []
self.hits = 1 # number of total hits including the first detection
# one update, one hit
# self.hit_streak = 1 # number of continuing hit considering the first detection
self.first_continuing_hit = 1
self.still_first = True
self.age = 0
self.info = info # other info associated
self.type_history = [info[1]]
self.color_history = [info[-1]]
self.license_plate_number_history = [info[-2]]
self.name_history = [info[-3]]
last_x_y = [self.kf.x[0][0], self.kf.x[1][0]]
self.last_x_y_list = [last_x_y]
self.blind_stay = 0
def correct_angle(self, bbox3D):
"""
correct the angles of self.kf.x and bbox3d
"""
t = time.time()
points = self.last_x_y_list[:]
points = np.array(points)
#points = np.squeeze(points, axis=(2,))
x_list = points[:, 0]
y_list = points[:, 1]
if len(points) >= NUM_FRAME and abs(x_list[-1] - x_list[0]) > DIST_THRED or abs(y_list[-1] - y_list[0]) > DIST_THRED:
center_rot_y = center_rot_y_f(points)
rot_y = bbox3D[3]
if rot_y > np.pi: rot_y -= int((rot_y + np.pi)/(2*np.pi)) * np.pi * 2
if rot_y < -np.pi: rot_y += int((np.pi - rot_y)/(2*np.pi)) * np.pi * 2
if abs(center_rot_y - rot_y) > np.pi / 2.0 and abs(
center_rot_y - rot_y) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle
rot_y += np.pi
if rot_y > np.pi: rot_y -= np.pi * 2 # make the theta still in the range
if rot_y < -np.pi: rot_y += np.pi * 2
#bbox3D[3] = rot_y
bbox3D[3] = center_rot_y
t3_0 = time.time() - t
t = time.time()
self.time_since_update = 0
self.history = []
self.hits += 1
if self.still_first:
self.first_continuing_hit += 1 # number of continuing hit in the fist time
######################### orientation correction
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
new_theta = bbox3D[3]
if new_theta >= np.pi: new_theta -= np.pi * 2 # make the theta still in the range
if new_theta < -np.pi: new_theta += np.pi * 2
bbox3D[3] = new_theta
predicted_theta = self.kf.x[3]
if abs(new_theta - predicted_theta) > np.pi / 2.0 and abs(
new_theta - predicted_theta) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle
self.kf.x[3] += np.pi
if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
# now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90
if abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:
if new_theta > 0:
self.kf.x[3] += np.pi * 2
else:
self.kf.x[3] -= np.pi * 2
t3_1 = time.time() - t
return bbox3D, t3_0, t3_1
def assign_info(self, info):
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the rage
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.info = info
def update_single(self, bbox3D, info):
"""
Updates the state vector with observed bbox.
"""
# 1. correct_angle
bbox3D, t3_0, t3_1 = self.correct_angle(bbox3D)
# TIMING PART 2
s = time.time()
self.kf.update(bbox3D)
t3_2 = time.time() - s
# 3. assign_info
self.assign_info(info)
return t3_0, t3_1, t3_2
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
if self.time_since_update == 0:
last_x_y = [self.kf.x[0][0], self.kf.x[1][0]]
self.last_x_y_list.append(last_x_y)
if len(self.last_x_y_list) > NUM_FRAME:
del(self.last_x_y_list[0])
self.kf.predict()
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.age += 1
if (self.time_since_update > 0):
# self.hit_streak = 0
self.still_first = False
self.time_since_update += 1
self.history.append(self.kf.x)
return self.kf.x
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return self.kf.x[:7].reshape((7,))
def get_orientation(self):
return self.kf.x[7:].reshape((3,))
This diff is collapsed.
This diff is collapsed.
import numpy as np
from gpu_helpers import iouhelper
import time
""" CASE 1
boxes_a = np.array([[29.314 ,-0.861 ,-5.335 ,4.368 ,2.089 ,1.549 ,3.205 ,],
[18.732 ,-0.273 ,-5.352 ,4.340 ,1.949 ,1.570 ,3.187 ,],
[17.760 ,5.222 ,-5.467 ,1.588 ,0.623 ,1.451 ,4.944 ,],
[6.391 ,-4.806 ,-5.074 ,4.143 ,1.735 ,1.475 ,1.430 ,],
[74.945 ,-0.090 ,-5.449 ,4.267 ,1.908 ,1.509 ,3.171 ,]])
boxes_b = np.array([[29.866 ,-0.939 ,-5.386 ,4.407 ,2.085 ,1.569 ,3.139 ,],
[19.093 ,-0.195 ,-5.399 ,4.500 ,2.005 ,1.556 ,-3.135 ,],
[6.201 ,-5.699 ,-5.191 ,4.357 ,1.753 ,1.467 ,1.500 ,],
[17.754 ,5.237 ,-5.487 ,1.601 ,0.637 ,1.461 ,-1.378 ,],
[75.868 ,-0.130 ,-5.503 ,4.398 ,1.942 ,1.583 ,-3.103 ,],
])
GT_IOU=[ 0.663, 0.000, 0.000, 0.000, 0.000, 0.000, 0.731, 0.000,
0.000 ,0.000, 0.000, 0.000, 0.000, 0.916 ,0.000, 0.000 ,
0.000 ,0.206, 0.000, 0.000, 0.000, 0.000 ,0.000, 0.000 ,
0.584]
boxes_a = np.array(
)
boxes_b = np.array(
)
"""
boxes_a = np.array(
[[19.872 ,0.187 ,-5.328 ,4.441 ,1.946 ,1.563 ,2.889 ,],
[8.595 ,-18.317 ,-5.176 ,4.383 ,1.849 ,1.500 ,1.546 ,],
[8.332 ,27.013 ,-5.467 ,4.327 ,1.851 ,1.672 ,1.549 ,],
[2.256 ,-35.319 ,-4.886 ,4.322 ,1.954 ,1.544 ,1.555 ,],
[8.548 ,-45.319 ,-4.808 ,4.232 ,1.894 ,1.559 ,1.511 ,],
[14.450 ,-0.980 ,-5.244 ,1.639 ,0.622 ,1.389 ,1.622 ,],
[4.553 ,-51.571 ,-4.589 ,4.390 ,2.075 ,1.679 ,1.519 ,],
[2.559 ,-18.566 ,-4.887 ,4.136 ,1.816 ,1.525 ,1.521 ,],
[16.029 ,10.609 ,-5.247 ,1.634 ,0.669 ,1.606 ,1.565 ,],
[6.779 ,2.571 ,-5.271 ,4.334 ,1.721 ,1.534 ,1.533 ,],
[14.383 ,11.818 ,-5.430 ,1.682 ,0.628 ,1.522 ,1.625 ,],
[7.740 ,48.839 ,-5.484 ,4.227 ,1.742 ,1.514 ,1.512 ,],
]
)
boxes_b = np.array(
[
[7.766 ,-48.235 ,-4.698 ,3.758 ,1.650 ,1.541 ,2.182 ,],
[12.737 ,-4.107 ,-5.239 ,2.009 ,0.846 ,1.422 ,-1.598 ,],
[8.312 ,6.419 ,-5.341 ,4.360 ,1.793 ,1.561 ,-1.548 ,],
[13.368 ,19.624 ,-5.598 ,2.538 ,1.004 ,1.512 ,1.543 ,],
[20.009 ,35.247 ,-5.607 ,3.352 ,1.510 ,1.605 ,-1.623 ,],
[9.319 ,7.766 ,-5.225 ,4.291 ,1.791 ,1.541 ,1.907 ,],
[-0.122 ,-32.848 ,-4.968 ,4.396 ,1.995 ,1.564 ,1.533 ,],
[7.137 ,80.612 ,-5.592 ,4.291 ,1.748 ,1.510 ,1.434 ,],
[19.960 ,0.169 ,-5.338 ,4.382 ,1.906 ,1.520 ,2.921 ,],
[8.536 ,-47.138 ,-4.758 ,4.233 ,1.967 ,1.544 ,1.553 ,],
[8.533 ,-20.912 ,-5.101 ,4.391 ,1.874 ,1.510 ,1.538 ,],
[2.499 ,-21.261 ,-4.893 ,4.224 ,1.892 ,1.506 ,1.498 ,],
]
)
GT_IOU="""\n[
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.895 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000\n
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000i\n
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000\n
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000\n
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000\n
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
]"""
# [x,y,z,l,w,h,thetha]
#a = np.random.rand(19, 7)
nb =3
#boxes_a = np.random.rand(nb, 7).astype(np.float32)
#boxes_b = np.random.rand(nb, 7).astype(np.float32)
rt = 1
m, n = len(boxes_a), len(boxes_b)
###########################################################################3 gpu iou
# [x,y,z,l,w,h,thetha]
"""
bo = np.zeros((m, n)).astype(np.float32)
# height overlap
boxes_a_height_max = (boxes_a[:, 2] + boxes_a[:, 5] / 2).reshape(-1, 1)
boxes_a_height_min = (boxes_a[:, 2] - boxes_a[:, 5] / 2).reshape(-1, 1)
boxes_b_height_max = (boxes_b[:, 2] + boxes_b[:, 5] / 2).reshape(1, -1)
boxes_b_height_min = (boxes_b[:, 2] - boxes_b[:, 5] / 2).reshape(1, -1)
# bev overlap
max_of_min = np.maximum(boxes_a_height_min, boxes_b_height_min)
min_of_max = np.minimum(boxes_a_height_max, boxes_b_height_max)
overlaps_h = np.maximum(min_of_max - max_of_min, 0)
#print "boxes_a_height_max: ",boxes_a_height_max.shape, "boxes_b_height_max.shape: ", boxes_b_height_max.shape
#print "max_of_min.shape: ", max_of_min.shape, "min_of_max.shape: ", min_of_max.shape
bev_overlap(m, boxes_a, n, boxes_b, bo)
o3d = bo * overlaps_h # 3 * 3
#print "o3d.shape: ", o3d.shape
vol_a = (boxes_a[:, 3] * boxes_a[:, 4] * boxes_a[:, 5]).reshape(-1, 1)
vol_b = (boxes_b[:, 3] * boxes_b[:, 4] * boxes_b[:, 5]).reshape(1, -1)
res = o3d / np.maximum(1e-6, vol_a + vol_b - o3d)
#print "self gpu : ", res.shape
"""
a = time.time()
for _ in range(rt):
res = iouhelper(boxes_a,boxes_b)
print "gpu time : ", time.time() - a
###################################################3 corner version
from bbox_utils import convert_3dbox_to_8corner, iou3d
reorder = [0, 1, 2, 6,3, 4, 5]
# [x,y,z,theta,l,w,h]
dets = boxes_a[:, reorder]
trks = boxes_b[:, reorder]
dets = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]
dets = np.stack(dets, axis=0)
trks = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]
trks = np.stack(trks, axis=0)
iou_matrix = np.zeros((len(dets), len(trks)), dtype=np.float32)
hm = np.zeros((len(dets), len(trks)), dtype=np.float32)
bo_old = np.zeros((len(dets), len(trks)), dtype=np.float32)
a = time.time()
for _ in range(rt):
for d, det in enumerate(dets):
for t, trk in enumerate(trks):
iou_matrix[d, t] = iou3d(det, trk)[0]
print "old time: " ,time.time() - a
##################################################### log
print "old iou : ", iou_matrix.shape
print "self res: ############################### self res:\n", res
print "old res: ############################### old res:\n ", iou_matrix
#GT_IOU = np.array(GT_IOU).reshape(m,n)
print "GT res: ###############################:\n", GT_IOU
"""
cm = res == iou_matrix
print "compare: ", cm.all()
#print "self bo \n", bo
print "old bo \n", bo_old
#print "self h: \n", overlaps_h
print "old: h\n", hm
cmh = overlaps_h == hm
#print "compare h: ", cmh.all()
"""
# Author: Xinshuo Weng
# email: xinshuo.weng@gmail.com
import yaml
from easydict import EasyDict as edict
def Config(filename):
listfile1 = open(filename, 'r')
listfile2 = open(filename, 'r')
cfg = edict(yaml.safe_load(listfile1))
settings_show = listfile2.read().splitlines()
listfile1.close()
listfile2.close()
return cfg, settings_show
This diff is collapsed.
#coding:utf-8
import sys
import math
#sys.path.append("./venv/Lib")
import gaussian
import numpy as np
def wgs84_angle_cal(lat1,lon1,lat2,lon2):
p1 = np.array([lat1, lon1])
p11 = gaussian.Convert_v2(p1)
#print(gaussian.Convert_v2(p1))
p2 = np.array([lat2, lon2])
p22 = gaussian.Convert_v2(p2)
#print(gaussian.Convert_v2(p2))
x = abs(p22[0] - p11[0])
y = abs(p22[1] - p11[1])
#print(math.sqrt(x * x + y * y))
tmp_angle = math.atan(y / x) * 57.29578
#print(tmp_angle)
if p22[0] > p11[0] and p22[1] > p11[1]:
angle = 90 - tmp_angle
elif p22[0] > p11[0] and p22[1] < p11[1]:
angle = 90 + tmp_angle
elif p22[0] < p11[0] and p22[1] > p11[1]:
angle = 270 + tmp_angle
elif p22[0] < p11[0] and p22[1] < p11[1]:
angle = 270 - tmp_angle
elif p22[0] == p11[0] and p22[1] < p11[1]:
angle = 180
elif p22[0] < p11[0] and p22[1] == p11[1]:
angle = 270
elif p22[0] > p11[0] and p22[1] == p11[1]:
angle = 90
else:
angle = 0
return angle
def xy_angle_cal(x1,y1,x2,y2):
x = abs(x2 - x1)
y = abs(y2 - y1)
#print(math.sqrt(x * x + y * y))
tmp_angle = math.atan(y / x) * 57.29578
#print(tmp_angle)
if x2 > x1 and y2 > y1:
angle = 90 - tmp_angle
elif x2 > x1 and y2 < y1:
angle = 90 + tmp_angle
elif x2 < x1 and y2 > y1:
angle = 270 + tmp_angle
elif x2 < x1 and y2 < y1:
angle = 270 - tmp_angle
elif x2 == x1 and y2 < y1:
angle = 180
elif x2 < x1 and y2 == y1:
angle = 270
elif x2 > x1 and y2 == y1:
angle = 90
else:
angle = 0
return angle
def car_yaw_cal(base_angle,car_yaw):
tmp_angle = 0.0
car_angle = car_yaw / math.pi * 180.0
if car_angle >= 0:
tmp_angle = base_angle - car_angle
else:
tmp_angle = base_angle + abs(car_angle)
return (tmp_angle+360.0)%360.0
def compute_yaw(car_yaw_in_lidar):
'''
lat1 = 31.2808039
lon1 = 121.1876594
p1 = np.array([lat1, lon1])
p11 = gaussian.Convert_v2(p1)
lidar_x = p11[0]
lidar_y = p11[1]
R = [[6.61568761e-01, -7.49847829e-01, 7.42486399e-03],
[7.49874949e-01, 6.61577821e-01, -1.51043758e-03],
[-3.77952680e-03, 6.56697899e-03, 9.99971211e-01]]
#取x轴两个点坐标用来标定lidar的x轴的绝对航向
tmp_loc1 = [30,0,0]
tmp_loc2 = [50,0,0]
tmp_xyz1 = np.dot(R,tmp_loc1)
tmp_xyz2 = np.dot(R, tmp_loc2)
'''
#计算lidar x轴的绝对航向
#lidar_x_angle = (xy_angle_cal(tmp_xyz1[0],tmp_xyz1[1],tmp_xyz2[0],tmp_xyz2[1]) + 90.0)%360.0
lidar_x_angle = 88.35201485622395
#print("ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ the lidar_x_angle is:",lidar_x_angle)
#将局部坐标系的航向,转换成绝对航向
yaw = car_yaw_cal(lidar_x_angle,car_yaw_in_lidar)
#print("the car yaw is:",yaw)
return yaw
if __name__ == "__main__":
lat1 = 31.2840452
lon1 = 121.1597165
p1 = np.array([lat1, lon1])
p11 = gaussian.Convert_v2(p1)
lidar_x = p11[0]
lidar_y = p11[1]
R = [[-0.330806583166, -0.941249847412, 0.067936882377],
[0.943696200848, -0.329787790775, 0.026026830077],
[-0.002092991956, 0.072721630335, 0.997349977493]]
#取x轴两个点坐标用来标定lidar的x轴的绝对航向
tmp_loc1 = [30,0,0]
tmp_loc2 = [50,0,0]
tmp_xyz1 = np.dot(R,tmp_loc1)
tmp_xyz2 = np.dot(R, tmp_loc2)
#计算lidar x轴的绝对航向
lidar_x_angle = (xy_angle_cal(tmp_xyz1[0],tmp_xyz1[1],tmp_xyz2[0],tmp_xyz2[1]) + 90.0)%360.0
print(lidar_x_angle)
import math
import numpy as np
import wgs84
from rotations import degree2rad, rad2degree
ZoneOffset = 1e6
ZoneWidth = 3
EastOffset = 500000.0
def Convert_v2(inLonLat): # BL --> XY
Zone = (int)((inLonLat[1] + 1.5) / ZoneWidth)
L0 = Zone * ZoneWidth
dL = inLonLat[1] - L0
dLRadian = degree2rad(dL)
latRadian = degree2rad(inLonLat[0])
X = wgs84.GetX(latRadian)
N = wgs84.GetN(latRadian)
t = math.tan(latRadian)
t2 = t * t
sinB = math.sin(latRadian)
cosB = math.cos(latRadian)
ita2 = wgs84.ep2 * cosB * cosB
temp = dLRadian * cosB
temp2 = temp * temp
yPart1 = 0.5 * N * temp * temp * t
yPart2 = N * t * (5 - t2 + 9 * ita2 + 4 * ita2 * ita2) * temp2 * temp2 / 24
yPart3 = temp2 * temp2 * temp2 * N * t * (61 - 58 * t2 + t2 * t2) / 720
y = X + yPart1 + yPart2 + yPart3
xPart1 = N * temp
xPart2 = (1 - t2 + ita2) * N * temp * temp2 / 6
xPart3 = (5 - 18 * t2 + t2 * t2 + 14 * ita2 - 58 * ita2 * t2) * N * temp * temp2 * temp2 / 120
x = Zone * ZoneOffset + EastOffset + xPart1 + xPart2 + xPart3
projPt = np.array([x, y])
return projPt
def Inverse_v2(inXY): # XY --> BL
Zone = (int)(inXY[0] / ZoneOffset)
L0 = Zone * ZoneWidth
L0_radian = degree2rad(L0)
X0 = Zone * ZoneOffset + EastOffset
Y0 = 0
x = inXY[0] - X0
y = inXY[1] - Y0
Br = wgs84.GetLatByX(y)#
cosB = math.cos(Br)
secB = 1 / cosB
ita2 = wgs84.ep2 * cosB * cosB
t = math.tan(Br)
t2 = t * t
V = wgs84.GetV(Br)
V2 = V * V
N = wgs84.c / V
M = N/V/V
D = x/N
tmp3 = (1 + 2 * t2 + ita2) * D * D * D / 6
tmp5 = (5 + 28 * t2 + 24 * t2 * t2 + 6 * ita2 + 8 * ita2 * t2) * D * D * D * D * D / 120
l = secB * (D - tmp3 + tmp5)
L_radian = L0_radian + l
tmp2 = D * D / 2
tmp4 = (5 + 3 * t2 + ita2 - 9 * ita2 * t2) * D * D * D * D / 24
tmp6 = (61 + 90 * t2 + 45 * t2 * t2) * D * D * D * D * D * D / 720
B_radian = Br - t * V2 * (tmp2 - tmp4 + tmp6)
B = rad2degree(B_radian)
L = rad2degree(L_radian)
outLonLat = np.array([B, L])
return outLonLat
import numpy as np
import math
import cv2
ZoneOffset = 1e6
ZoneWidth = 3
EastOffset = 500000.0
wgs84_ep2 = 0.0067394967407662064
wgs84_c = 6399593.6257536924
a0 = 6367449.1457686741
a2 = 32077.017223574985
a4 = 67.330398573595
a6 = 0.13188597734903185
def GetLatByX(X):
Bfi0 = X / a0
Bfi1 = 0
num = 1
minAngle = math.pi * 1e-9
menus_minAngle = 0 - minAngle
while (num == 1):
num = 0
sinB = math.sin(Bfi0)
sinB2 = sinB * sinB
cosB = math.cos(Bfi0)
FBfi = 0 - sinB * cosB *((a2 - a4 + a6) + sinB2 * (2 * a4 - 16 * a6 / 3) + sinB2 * sinB2 * a6 * 16 / 3)
Bfi1 = (X - FBfi) / a0
deltaB = Bfi1 - Bfi0
if deltaB < menus_minAngle or deltaB > minAngle:
num = 1
Bfi0 = Bfi1
Bf = Bfi1
return Bf
def GetV(lat):
cosB = math.cos(lat)
V = math.sqrt(1 + wgs84_ep2 * cosB * cosB)
return V
def degree2rad(angle=None, list=None):
if not (list is None):
newlist = np.zeros(len(list))
i = 0
for angle in list:
newlist[i] = angle * np.pi / 180.0
i += 1
return newlist
else:
return angle * np.pi / 180.0
def rad2degree(rad=None, list=None):
if not (list is None):
newlist = np.zeros(len(list))
i = 0
for rad in list:
newlist[i] = rad * 180.0 / np.pi
i += 1
return newlist
else:
return rad * 180.0 / np.pi
def Inverse_v2(inXY): # XY --> BL
Zone = (int)(inXY[0] / ZoneOffset)
L0 = Zone * ZoneWidth
L0_radian = degree2rad(L0)
X0 = Zone * ZoneOffset + EastOffset
Y0 = 0
x = inXY[0] - X0
y = inXY[1] - Y0
Br = GetLatByX(y)#
cosB = math.cos(Br)
secB = 1 / cosB
ita2 = wgs84_ep2 * cosB * cosB
t = math.tan(Br)
t2 = t * t
V = GetV(Br)
V2 = V * V
N = wgs84_c / V
M = N/V/V
D = x/N
tmp3 = (1 + 2 * t2 + ita2) * D * D * D / 6
tmp5 = (5 + 28 * t2 + 24 * t2 * t2 + 6 * ita2 + 8 * ita2 * t2) * D * D * D * D * D / 120
l = secB * (D - tmp3 + tmp5)
L_radian = L0_radian + l
tmp2 = D * D / 2
tmp4 = (5 + 3 * t2 + ita2 - 9 * ita2 * t2) * D * D * D * D / 24
tmp6 = (61 + 90 * t2 + 45 * t2 * t2) * D * D * D * D * D * D / 720
B_radian = Br - t * V2 * (tmp2 - tmp4 + tmp6)
B = rad2degree(B_radian)
L = rad2degree(L_radian)
outLonLat = np.array([B, L])
return outLonLat
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def my_compute_box_3d_jit(corners_3d, center, heading, size, kitti2origin):
# P' = T * R * S * p
'''
x_corners = np.array([-1,1,1,-1,-1,1,1,-1])
y_corners = np.array([1,1,-1,-1,1,1,-1,-1])
z_corners = np.array([1,1,1,1,-1,-1,-1,-1])
tmp = np.vstack((x_corners, y_corners, z_corners)) / 2.0
corners_3d = np.ones([4,8])
corners_3d[:3,:] = tmp
'''
S = np.diag([size[0],size[1],size[2],1])
rot = rotz(heading)
Trans = np.zeros([4,4])
Trans[:3,:3] = rot
Trans[0,3] = center[0]
Trans[1,3] = center[1]
Trans[2,3] = center[2]
tmp4x4 = np.dot(S,corners_3d)
world_corners_3d = np.dot(Trans,tmp4x4)
'''
kitti2origin = [[-1.42763000e-05, 9.99987195e-01, -5.06064089e-03, -5.06064089e-03],
[-9.99984083e-01, 1.42763000e-05, 5.64201068e-03, 5.64201068e-03],
[5.64201068e-03, 5.06064089e-03, 9.99971279e-01, 9.99971279e-01],
[ 0 , 0 , 0 , 1 ]]
'''
world_corners_3d = np.dot(kitti2origin,world_corners_3d)
return np.transpose(world_corners_3d[:3,:])
def get_loc(boxes_3d, car_type, Trans, kitti2origin):
'''
Trans = np.array([[ 6.61568761e-01, -7.49847829e-01, 7.42486399e-03, 4.06130966e+07],
[ 7.49874949e-01, 6.61577821e-01, -1.51043758e-03, 3.46271626e+06],
[-3.77952680e-03, 6.56697899e-03, 9.99971211e-01, 1.87295623e+01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
'''
corners_3d_in = np.array(
[[ -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , ],
[ 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , ],
[ 0.5 , 0.5 , 0.5 , 0.5 , -0.5 , -0.5 , -0.5 , -0.5 , ],
[ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , ]]
)
#corners_3d = my_compute_box_3d(boxes_3d[:3],boxes_3d[6],boxes_3d[3:6])
corners_3d = my_compute_box_3d_jit(corners_3d_in, boxes_3d[:3],boxes_3d[6],boxes_3d[3:6], kitti2origin)
#adjust bbox
#new_corners_3d = adjust_bbox(boxes_3d,corners_3d)
head_pnt = (corners_3d[1] + corners_3d[2]) / 2.0
tail_pnt = (corners_3d[0] + corners_3d[3]) / 2.0
inter_len = 0.413
total_len = 4.6
'''
if car_type == 3:
inter_len = 1.35
total_len = 12.42
if car_type == 5:
inter_len = 2.25
total_len = 4.5
'''
lidar_loc = (tail_pnt - head_pnt) * inter_len / (1.0*total_len) + head_pnt
tmp_loc = np.ones([4])
tmp_loc[:3] = lidar_loc[:3]
world_loc = np.dot(Trans,tmp_loc)
out_BL = Inverse_v2(world_loc[:2])
#center point transfer
tmp_center_loc = np.ones([4])
tmp_center_loc[:3] = (corners_3d[0] + corners_3d[6]) / 2.0
world_center_loc = np.dot(Trans,tmp_center_loc)
out_center_BL = Inverse_v2(world_center_loc[:2])
return lidar_loc, out_BL, out_center_BL
def get_world_loc(boxes_3d, Trans, kitti2origin):
'''
Trans = np.array([[ 6.61568761e-01, -7.49847829e-01, 7.42486399e-03, 4.06130966e+07],
[ 7.49874949e-01, 6.61577821e-01, -1.51043758e-03, 3.46271626e+06],
[-3.77952680e-03, 6.56697899e-03, 9.99971211e-01, 1.87295623e+01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
'''
corners_3d_in = np.array(
[[ -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , ],
[ 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , ],
[ 0.5 , 0.5 , 0.5 , 0.5 , -0.5 , -0.5 , -0.5 , -0.5 , ],
[ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , ]]
)
#corners_3d = my_compute_box_3d(boxes_3d[:3],boxes_3d[6],boxes_3d[3:6])
corners_3d = my_compute_box_3d_jit(corners_3d_in, boxes_3d[:3],boxes_3d[6],boxes_3d[3:6], kitti2origin)
#center point transfer
center_loc = np.ones([4])
center_loc[:3] = (corners_3d[0] + corners_3d[6]) / 2.0
world_center_loc = np.dot(Trans, center_loc)
return world_center_loc[:2]
def get_camera_world_loc(u, v, cam_trans, cam_intrinsics, cam_dist):
#undistort (u, v)
pts = np.array([u, v])
undistort_pts = cv2.undistortPoints(pts, cam_intrinsics, cam_dist, None, cam_intrinsics)
u, v = undistort_pts[0][0][0], undistort_pts[0][0][1]
#(u, v) to (y, x)
loc_camera = np.array([u, v, 1])
xy = np.dot(cam_trans, loc_camera.T)
xy = xy / xy[2]
rot_xy = [[math.cos(math.pi / 90), -math.sin(math.pi / 90)], [math.sin(math.pi / 90), math.cos(math.pi / 90)]]
xy = np.dot(rot_xy, xy[:2])
return xy[1], -xy[0] - 3.14
# -*- coding:utf-8 -*-
import hashlib
import json
import traceback
import logging
import threading
import requests
def upload_thread(msgs, ipc_code, url):
threading.Thread(target=upload_data, args=[msg, ipc_code, url]).start()
def upload_data(msgs, ipc_code, url):
request_data = {
"devNo": ipc_code,
"mecNo": ipc_code,
"objs": []
}
for msg in msgs:
request_data["objs"].append({
"heading": msg.rot_y,
"id": msg.obj_id,
"lat": msg.Lat,
"lon": msg.Long,
"name": msg.name,
"speed": node["speed"],
"time": get_current_millisecond(),
"type": node["obj_type"]
})
request_json_pure(url, request_data, http_timeout=1)
def request_json_pure(http_url, data, http_timeout=10):
"""
http json 协议传输 无校验
:param http_url:
:param data:
:param http_timeout:
:return:
"""
header_data = {
"Content-Type": "application/json",
'Connection': 'keep-alive'
}
data_json = json.dumps(data, ensure_ascii=False, separators=(',', ':'))
print(data_json)
s = requests.session()
response = s.post(http_url, data=data_json, headers=header_data, timeout=http_timeout)
result = response.text
print(result)
response.close()
result = json.loads(result)
return result
import numpy as np
from gaussian import Convert_v2, Inverse_v2
from numba import jit
def my_compute_box_3d(center, heading, size):
# P' = T * R * S * p
x_corners = [-1,1,1,-1,-1,1,1,-1]
y_corners = [1,1,-1,-1,1,1,-1,-1]
z_corners = [1,1,1,1,-1,-1,-1,-1]
tmp = np.vstack([x_corners, y_corners, z_corners]) / 2.0
corners_3d = np.ones([4,8])
corners_3d[:3,:] = tmp
S = np.diag([size[0],size[1],size[2],1])
rot = rotz(heading)
Trans = np.zeros([4,4])
Trans[:3,:3] = rot
Trans[0,3] = center[0]
Trans[1,3] = center[1]
Trans[2,3] = center[2]
#Trans[2,3] = center[2] - size[2]/2
tmp4x4 = np.dot(S,corners_3d)
world_corners_3d = np.dot(Trans,tmp4x4)
"""
PREVIOUS:
kitti2origin = [[-0.001081 , 0.99956106, -0.02960616 ,-0.02960616],
[-0.99733779 , 0.001081 , 0.07291202 ,0.07291202],
[ 0.07291202 , 0.02960616 , 0.99689885 , 0.99689885],
[ 0. , 0. , 0. , 1. ]]
"""
kitti2origin = [[-0.0821106 , 0.99618338 ,-0.02960616, -0.02960616],
[-0.9941427 ,-0.07977548, 0.07291202 , 0.07291202],
[ 0.0702719 , 0.0354196 , 0.99689885, 0.99689885],
[ 0 , 0 , 0 , 1 ]]
kitti2origin = np.array(kitti2origin)
world_corners_3d = np.dot(kitti2origin,world_corners_3d)
return np.transpose(world_corners_3d[:3,:])
@jit
def my_compute_box_3d_jit(corners_3d, center, heading, size):
# P' = T * R * S * p
'''
x_corners = np.array([-1,1,1,-1,-1,1,1,-1])
y_corners = np.array([1,1,-1,-1,1,1,-1,-1])
z_corners = np.array([1,1,1,1,-1,-1,-1,-1])
tmp = np.vstack((x_corners, y_corners, z_corners)) / 2.0
corners_3d = np.ones([4,8])
corners_3d[:3,:] = tmp
'''
S = np.diag([size[0],size[1],size[2],1])
rot = rotz(heading)
Trans = np.zeros([4,4])
Trans[:3,:3] = rot
Trans[0,3] = center[0]
Trans[1,3] = center[1]
Trans[2,3] = center[2]
tmp4x4 = np.dot(S,corners_3d)
world_corners_3d = np.dot(Trans,tmp4x4)
kitti2origin = np.array([[-0.0821106 , 0.99618338 ,-0.02960616, -0.02960616],
[-0.9941427 ,-0.07977548, 0.07291202 , 0.07291202],
[ 0.0702719 , 0.0354196 , 0.99689885, 0.99689885],
[ 0 , 0 , 0 , 1 ]])
world_corners_3d = np.dot(kitti2origin,world_corners_3d)
return np.transpose(world_corners_3d[:3,:])
@jit
def rotz(t):
"""Rotation about the z-axis."""
c = np.cos(t)
s = np.sin(t)
return np.array([[c, -s, 0],
[s, c, 0],
[0, 0, 1]])
def get_T():
h = 19.34
rot = [[-0.330806583166, -0.941249847412, 0.067936882377],
[0.943696200848, -0.329787790775, 0.026026830077],
[-0.002092991956, 0.072721630335 ,0.997349977493]]
rot = np.array(rot)
BL = [31.2840452,121.1597165]
XY = Convert_v2(BL)
# print(XY)
Trans = np.zeros([4,4])
Trans[:3,:3] = rot
Trans[0,3] = XY[0]
Trans[1,3] = XY[1]
Trans[2,3] = h
Trans[3,3] = 1
return Trans
def get_loc(boxes_3d, car_type):
Trans = np.array([[-3.30806583e-01, -9.41249847e-01 , 6.79368824e-02 , 4.06104317e+07],
[ 9.43696201e-01, -3.29787791e-01 , 2.60268301e-02 , 3.46304736e+06],
[-2.09299196e-03, 7.27216303e-02 , 9.97349977e-01 , 1.93400000e+01],
[ 0.00000000e+00, 0.00000000e+00 , 0.00000000e+00 , 1.00000000e+00]])
corners_3d_in = np.array(
[[ -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , ],
[ 0.5 , 0.5 , -0.5 , -0.5 , 0.5 , 0.5 , -0.5 , -0.5 , ],
[ 0.5 , 0.5 , 0.5 , 0.5 , -0.5 , -0.5 , -0.5 , -0.5 , ],
[ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , ]]
)
#corners_3d = my_compute_box_3d(boxes_3d[:3],boxes_3d[6],boxes_3d[3:6])
corners_3d = my_compute_box_3d_jit(corners_3d_in, boxes_3d[:3],boxes_3d[6],boxes_3d[3:6])
#adjust bbox
#new_corners_3d = adjust_bbox(boxes_3d,corners_3d)
head_pnt = (corners_3d[1] + corners_3d[2]) / 2.0
tail_pnt = (corners_3d[0] + corners_3d[3]) / 2.0
inter_len = 1.5
total_len = 6.0
if car_type == 3:
inter_len = 1.35
total_len = 12.42
if car_type == 5:
inter_len = 2.25
total_len = 4.5
if car_type == 1:
inter_len = 2.3
total_len = 4.6
lidar_loc = (tail_pnt - head_pnt) * inter_len / (1.0*total_len) + head_pnt
#print(head_pnt, lidar_loc, tail_pnt)
#Trans = get_T()
tmp_loc = np.ones([4])
tmp_loc[:3] = lidar_loc[:3]
world_loc = np.dot(Trans,tmp_loc)
#print(world_loc)
out_BL = Inverse_v2(world_loc[:2])
#center point transfer
# Trans = get_T()
tmp_center_loc = np.ones([4])
tmp_center_loc[:3] = boxes_3d[:3]
world_center_loc = np.dot(Trans,tmp_center_loc)
out_center_BL = Inverse_v2(world_center_loc[:2])
return lidar_loc, out_BL, out_center_BL
import math
import numpy as np
import wgs84
from rotations import degree2rad, rad2degree
from numba import jit
ZoneOffset = 1e6
ZoneWidth = 3
EastOffset = 500000.0
def Convert_v2(inLonLat): # BL --> XY
Zone = (int)((inLonLat[1] + 1.5) / ZoneWidth)
L0 = Zone * ZoneWidth
dL = inLonLat[1] - L0
dLRadian = degree2rad(dL)
latRadian = degree2rad(inLonLat[0])
X = wgs84.GetX(latRadian)
N = wgs84.GetN(latRadian)
t = math.tan(latRadian)
t2 = t * t
sinB = math.sin(latRadian)
cosB = math.cos(latRadian)
ita2 = wgs84.ep2 * cosB * cosB
temp = dLRadian * cosB
temp2 = temp * temp
yPart1 = 0.5 * N * temp * temp * t
yPart2 = N * t * (5 - t2 + 9 * ita2 + 4 * ita2 * ita2) * temp2 * temp2 / 24
yPart3 = temp2 * temp2 * temp2 * N * t * (61 - 58 * t2 + t2 * t2) / 720
y = X + yPart1 + yPart2 + yPart3
xPart1 = N * temp
xPart2 = (1 - t2 + ita2) * N * temp * temp2 / 6
xPart3 = (5 - 18 * t2 + t2 * t2 + 14 * ita2 - 58 * ita2 * t2) * N * temp * temp2 * temp2 / 120
x = Zone * ZoneOffset + EastOffset + xPart1 + xPart2 + xPart3
projPt = np.array([x, y])
return projPt
@jit
def Inverse_v2(inXY): # XY --> BL
Zone = (int)(inXY[0] / ZoneOffset)
L0 = Zone * ZoneWidth
L0_radian = degree2rad(L0)
X0 = Zone * ZoneOffset + EastOffset
Y0 = 0
x = inXY[0] - X0
y = inXY[1] - Y0
Br = wgs84.GetLatByX(y)#
cosB = math.cos(Br)
secB = 1 / cosB
ita2 = wgs84.ep2 * cosB * cosB
t = math.tan(Br)
t2 = t * t
V = wgs84.GetV(Br)
V2 = V * V
N = wgs84.c / V
M = N/V/V
D = x/N
tmp3 = (1 + 2 * t2 + ita2) * D * D * D / 6
tmp5 = (5 + 28 * t2 + 24 * t2 * t2 + 6 * ita2 + 8 * ita2 * t2) * D * D * D * D * D / 120
l = secB * (D - tmp3 + tmp5)
L_radian = L0_radian + l
tmp2 = D * D / 2
tmp4 = (5 + 3 * t2 + ita2 - 9 * ita2 * t2) * D * D * D * D / 24
tmp6 = (61 + 90 * t2 + 45 * t2 * t2) * D * D * D * D * D * D / 720
B_radian = Br - t * V2 * (tmp2 - tmp4 + tmp6)
B = rad2degree(B_radian)
L = rad2degree(L_radian)
outLonLat = np.array([B, L])
return outLonLat
import numpy as np
from gaussian import Convert_v2, Inverse_v2
def get_T():
h = 19.34
rot = [[-0.330806583166, -0.941249847412, 0.067936882377],
[0.943696200848, -0.329787790775, 0.026026830077],
[-0.002092991956, 0.072721630335 ,0.997349977493]]
rot = np.array(rot)
BL = [31.2840452,121.1597165]
XY = Convert_v2(BL)
# print(XY)
Trans = np.zeros([4,4])
Trans[:3,:3] = rot
Trans[0,3] = XY[0]
Trans[1,3] = XY[1]
Trans[2,3] = h
Trans[3,3] = 1
return Trans
print(get_T())
This diff is collapsed.
import math
import numpy as np
from rotations import degree2rad, rad2degree
from numba import jit
a = 6378137.0
f = 0.003352810664
b = 6356752.3142499477
c = 6399593.6257536924
e2 = 0.006694379988651241
ep2 = 0.0067394967407662064
m0 = 6335439.3273023246
m2 = 63617.757378010137
m4 = 532.35180239277622
m6 = 4.1577261283373916
m8 = 0.031312573415813519
a0 = 6367449.1457686741
a2 = 32077.017223574985
a4 = 67.330398573595
a6 = 0.13188597734903185
a8 = 0.00024462947981104312
#class Wgs84:
def GetN(radB):
cosB = math.cos(radB)
V = math.sqrt(1 + ep2 * cosB * cosB)
N = c / V
return N
def GetX(radB):
sinB = math.sin(radB)
cosB = math.cos(radB)
al = a0 * radB
sc = sinB * cosB
ss = sinB * sinB
X = al - sc *((a2 - a4 + a6) + (2 * a4 - a6 * 16 / 3) * ss + a6 * 16 * ss * ss /3)
return X
@jit
def GetLatByX(X):
Bfi0 = X / a0
Bfi1 = 0
num = 1
minAngle = math.pi * 1e-9
menus_minAngle = 0 - minAngle
while (num == 1):
num = 0
sinB = math.sin(Bfi0)
sinB2 = sinB * sinB
cosB = math.cos(Bfi0)
FBfi = 0 - sinB * cosB *((a2 - a4 + a6) + sinB2 * (2 * a4 - 16 * a6 / 3) + sinB2 * sinB2 * a6 * 16 / 3)
Bfi1 = (X - FBfi) / a0
deltaB = Bfi1 - Bfi0
if deltaB < menus_minAngle or deltaB > minAngle:
num = 1
Bfi0 = Bfi1
Bf = Bfi1
return Bf
@jit
def GetV(lat):
cosB = math.cos(lat)
V = math.sqrt(1 + ep2 * cosB * cosB)
return V
def BLH2ECEF(ptBLH):
cosB = math.cos(degree2rad(ptBLH[0]))
sinB = math.sin(degree2rad(ptBLH[0]))
cosL = math.cos(degree2rad(ptBLH[1]))
sinL = math.sin(degree2rad(ptBLH[1]))
N = GetN(degree2rad(ptBLH[0]))
X = (N + ptBLH[2]) * cosB * cosL
Y = (N + ptBLH[2]) * cosB * sinL
Z = (N * (1 - e2) + ptBLH[2]) * sinB
ptXYZ = np.array([X, Y, Z])
return ptXYZ
def ECEF2BLH(ptXYZ):
L_radian = math.atan(ptXYZ[1] / ptXYZ[0])
if L_radian < 0:
L_radian += math.pi
L = rad2degree(L_radian)
sqrtXY = math.sqrt(ptXYZ[0] * ptXYZ[0] + ptXYZ[1] * ptXYZ[1])
t0 = ptXYZ[2] / sqrtXY
p = c * e2 / sqrtXY
k = 1+ep2
ti = t0
ti1 = 0
loop = 0
while loop < 10000:
loop += 1
ti1 = t0 + p * ti / math.sqrt( k + ti * ti)
if math.fabs(ti1 - ti) < 1e-12:
break
ti = ti1
B_radian = math.atan(ti1)
N = GetN(B_radian)
H = sqrtXY / math.cos(B_radian) - N
B = rad2degree(B_radian)
geopoint = np.array([B, L, H])
return geopoint
def GetR_ENU2ECEF(radianL, radianB):
sinB = math.sin(radianB)
cosB = math.cos(radianB)
sinL = math.sin(radianL)
cosL = math.cos(radianL)
R4 = np.array([[-sinL, -sinB * cosL, cosB * cosL],
[cosL, -sinB * sinL, cosB * sinL],
[0, cosB, sinB]])
return R4
def GetAngleDif(distance):
angleRafian = distance / a
return rad2degree(angleRafian)
This diff is collapsed.
import math
import numpy as np
from rotations import degree2rad, rad2degree
a = 6378137.0
f = 0.003352810664
b = 6356752.3142499477
c = 6399593.6257536924
e2 = 0.006694379988651241
ep2 = 0.0067394967407662064
m0 = 6335439.3273023246
m2 = 63617.757378010137
m4 = 532.35180239277622
m6 = 4.1577261283373916
m8 = 0.031312573415813519
a0 = 6367449.1457686741
a2 = 32077.017223574985
a4 = 67.330398573595
a6 = 0.13188597734903185
a8 = 0.00024462947981104312
#class Wgs84:
def GetN(radB):
cosB = math.cos(radB)
V = math.sqrt(1 + ep2 * cosB * cosB)
N = c / V
return N
def GetX(radB):
sinB = math.sin(radB)
cosB = math.cos(radB)
al = a0 * radB
sc = sinB * cosB
ss = sinB * sinB
X = al - sc *((a2 - a4 + a6) + (2 * a4 - a6 * 16 / 3) * ss + a6 * 16 * ss * ss /3)
return X
def GetLatByX(X):
Bfi0 = X / a0
Bfi1 = 0
num = 1
minAngle = math.pi * 1e-9
menus_minAngle = 0 - minAngle
while (num == 1):
num = 0
sinB = math.sin(Bfi0)
sinB2 = sinB * sinB
cosB = math.cos(Bfi0)
FBfi = 0 - sinB * cosB *((a2 - a4 + a6) + sinB2 * (2 * a4 - 16 * a6 / 3) + sinB2 * sinB2 * a6 * 16 / 3)
Bfi1 = (X - FBfi) / a0
deltaB = Bfi1 - Bfi0
if deltaB < menus_minAngle or deltaB > minAngle:
num = 1
Bfi0 = Bfi1
Bf = Bfi1
return Bf
def GetV(lat):
cosB = math.cos(lat)
V = math.sqrt(1 + ep2 * cosB * cosB)
return V
def BLH2ECEF(ptBLH):
cosB = math.cos(degree2rad(ptBLH[0]))
sinB = math.sin(degree2rad(ptBLH[0]))
cosL = math.cos(degree2rad(ptBLH[1]))
sinL = math.sin(degree2rad(ptBLH[1]))
N = GetN(degree2rad(ptBLH[0]))
X = (N + ptBLH[2]) * cosB * cosL
Y = (N + ptBLH[2]) * cosB * sinL
Z = (N * (1 - e2) + ptBLH[2]) * sinB
ptXYZ = np.array([X, Y, Z])
return ptXYZ
def ECEF2BLH(ptXYZ):
L_radian = math.atan(ptXYZ[1] / ptXYZ[0])
if L_radian < 0:
L_radian += math.pi
L = rad2degree(L_radian)
sqrtXY = math.sqrt(ptXYZ[0] * ptXYZ[0] + ptXYZ[1] * ptXYZ[1])
t0 = ptXYZ[2] / sqrtXY
p = c * e2 / sqrtXY
k = 1+ep2
ti = t0
ti1 = 0
loop = 0
while loop < 10000:
loop += 1
ti1 = t0 + p * ti / math.sqrt( k + ti * ti)
if math.fabs(ti1 - ti) < 1e-12:
break
ti = ti1
B_radian = math.atan(ti1)
N = GetN(B_radian)
H = sqrtXY / math.cos(B_radian) - N
B = rad2degree(B_radian)
geopoint = np.array([B, L, H])
return geopoint
def GetR_ENU2ECEF(radianL, radianB):
sinB = math.sin(radianB)
cosB = math.cos(radianB)
sinL = math.sin(radianL)
cosL = math.cos(radianL)
R4 = np.array([[-sinL, -sinB * cosL, cosB * cosL],
[cosL, -sinB * sinL, cosB * sinL],
[0, cosB, sinB]])
return R4
def GetAngleDif(distance):
angleRafian = distance / a
return rad2degree(angleRafian)
File added
File added
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "config.hpp"
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment