Commit c85a7034 authored by oscar's avatar oscar

提交py文件

parent 47642d18
# 文件功能:
# 1. 从数据集中加载点云数据
# 2. 从点云数据中滤除地面点云
# 3. 从剩余的点云中提取聚类
import numpy as np
import os
import struct
from sklearn import cluster, datasets, mixture
from itertools import cycle, islice
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time
import random
import open3d
import math
from scipy.spatial import KDTree
def augment(xyzs):
axyz = np.ones((len(xyzs), 4))
#axyz[:, :3] = xyzs
xyzs_len = len(xyzs)
for ii in range(xyzs_len):
axyz[ii,:3] = xyzs[ii][:3]
return axyz
def estimate(xyzs):
axyz = augment(xyzs[:3])
return np.linalg.svd(axyz)[-1][-1, :]
def is_inlier(coeffs, xyz, threshold):
err = np.abs(coeffs.dot(augment([xyz]).T)) / ((coeffs[0]**2 + coeffs[1]**2 + coeffs[2] ** 2) ** 0.5)
return err < threshold
def run_ransac(data, estimate, is_inlier, sample_size, goal_inliers, max_iterations, stop_at_goal=True, random_seed=None):
best_ic = 0
best_model = None
best_seg = None
random.seed(random_seed)
# random.sample cannot deal with "data" being a numpy array
data = list(data)
for i in range(max_iterations):
seg = np.ones((len(data)),dtype=np.uint8)
s = random.sample(data, int(sample_size))
m = estimate(s)
ic = 0
for j in range(len(data)):
if is_inlier(m, data[j]):
seg[j] = 0
ic += 1
print(s)
print('estimate:', m,)
print('# inliers:', ic)
if ic > best_ic:
best_ic = ic
best_model = m
best_seg = seg
if ic > goal_inliers and stop_at_goal:
break
print('took iterations:', i+1, 'best model:', best_model, 'explains:', best_ic)
return best_model, best_ic, best_seg
def define_area(point1, point2, point3):
"""
法向量 :n={A,B,C}
空间上某点:p={x0,y0,z0}
点法式方程:A(x-x0)+B(y-y0)+C(z-z0)=Ax+By+Cz-(Ax0+By0+Cz0)
https://wenku.baidu.com/view/12b44129af45b307e87197e1.html
:param point1:
:param point2:
:param point3:
:param point4:
:return:(Ax, By, Cz, D)代表:Ax + By + Cz + D = 0
"""
point1 = np.asarray(point1)
point2 = np.asarray(point2)
point3 = np.asarray(point3)
AB = np.asmatrix(point2 - point1)
AC = np.asmatrix(point3 - point1)
N = np.cross(AB, AC) # 向量叉乘,求法向量
# Ax+By+Cz
Ax = N[0, 0]
By = N[0, 1]
Cz = N[0, 2]
D = -(Ax * point1[0] + By * point1[1] + Cz * point1[2])
return Ax, By, Cz, D
def point2area_distance(point1, point2, point3, point4):
"""
:param point1:数据框的行切片,三维
:param point2:
:param point3:
:param point4:
:return:点到面的距离
"""
Ax, By, Cz, D = define_area(point1, point2, point3)
mod_d = Ax * point4[0] + By * point4[1] + Cz * point4[2] + D
mod_area = np.sqrt(np.sum(np.square([Ax, By, Cz])))
d = abs(mod_d) / mod_area
return d
# 功能:从kitti的.bin格式点云文件中读取点云
# 输入:
# path: 文件路径
# 输出:
# 点云数组
def read_ww_bin(path):
'''
:param path:
:return: homography matrix of the point cloud, N*4
'''
points = np.fromfile(
path, dtype=np.float32,
count=-1).reshape([-1, 4])
return points
# 功能:从点云文件中滤除地面点
# 输入:
# data: 一帧完整点云
# 输出:
# segmented_cloud: 删除地面点之后的点云
def ground_segmentation(data):
# 作业1
# 屏蔽开始
print(data.shape)
n = len(data)
max_iterations = 100
goal_inliers = n * 0.8
start_time = time.time()
m, b, seg = run_ransac(data, estimate, lambda x, y: is_inlier(x, y, 0.2), 3, goal_inliers, max_iterations)
a, b, c, d = m
end_time = time.time()
print('ransac time: ',end_time-start_time)
tmp_ind = np.where(seg > 0)
print(len(tmp_ind[0]),len(data[seg]))
segmented_cloud = data[tmp_ind]
# 屏蔽结束
import open3d
axis_pcd = open3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0])
colors_base = [[1,0,0],[0,0,1]]
print(seg)
colors = [colors_base[tmp] for tmp in list(seg)]
test_pcd = open3d.geometry.PointCloud()
test_pcd.points = open3d.utility.Vector3dVector(data[:,:3])
test_pcd.colors = open3d.utility.Vector3dVector(colors)
open3d.visualization.draw_geometries([test_pcd] + [axis_pcd], window_name="Open3D2")
print('origin data points num:', data.shape[0])
print('segmented data points num:', segmented_cloud.shape[0])
return segmented_cloud,m
def dist(t1, t2):
dis = np.linalg.norm(t1-t2)
return dis
# 功能:对点云进行voxel滤波
# 输入:
# point_cloud:输入点云
# leaf_size: voxel尺寸
def voxel_filter(point_cloud, leaf_size):
filtered_points = []
# 作业3
# 屏蔽开始
pc = np.array(point_cloud)[:,:3] #转为numpy矩阵
min_arr = pc.min(0)
max_arr = pc.max(0)
Dx = (max_arr[0] - min_arr[0]) / leaf_size
Dy = (max_arr[1] - min_arr[1]) / leaf_size
Dz = (max_arr[2] - min_arr[2]) / leaf_size
pnt_num = pc.shape[0]
h_arr = []
for ii in range(pnt_num):
hx = np.floor((pc[ii][0] - min_arr[0]) / leaf_size)
hy = np.floor((pc[ii][1] - min_arr[1]) / leaf_size)
hz = np.floor((pc[ii][2] - min_arr[2]) / leaf_size)
h = hx + hy*Dx + hz * Dx * Dy
h_arr.append(h)
#按照voxel index排序点云
h_arr = np.array(h_arr)
v_ind = np.argsort(h_arr)
h_sort = h_arr[v_ind]
voxel_ind_beg = 0
for ii in range(len(h_sort)-1):
if(h_sort[ii] == h_sort[ii+1]):
continue
else:
point_index = v_ind[voxel_ind_beg:(ii+1)]
#centroid
filtered_points.append(np.mean(pc[point_index], axis=0))
#random
#filtered_points.append(pc[point_index[np.random.randint(0,len(point_index))]])
voxel_ind_beg = ii
#print(len(filtered_points))
# 屏蔽结束
# 把点云格式改成array,并对外返回
filtered_points = np.array(filtered_points, dtype=np.float64)
return filtered_points
def MyDBSCAN1(D, eps, MinPts):
labels = [0]*len(D)
# C is the ID of the current cluster.
C = 0
for P in range(0, len(D)):
if not (labels[P] == 0):
continue
#根据eps,在D中查找点P的领域
NeighborPts = regionQuery(D, P, eps)
#判断领域点个数是否大雨MinPts
if len(NeighborPts) < MinPts:
labels[P] = -1
else:
#满足条件,调用cluster增长模块
C += 1
growCluster(D, labels, P, NeighborPts, C, eps, MinPts)
# All data has been clustered!
return labels
def growCluster(D, labels, P, NeighborPts, C, eps, MinPts):
"""
根据C和种子点P来做区域增长
参数:
D - 数据集
labels - 类别向量
P - 种子点的索引
NeighborPts - P的所有邻域
C - 当前cluster的label
eps - 距离阈值
MinPts - 邻域的点数阈值
"""
#将当前cluster的label赋值给点P
labels[P] = C
i = 0
while i < len(NeighborPts):
Pn = NeighborPts[i]
# 如果Pn被标记为噪声
if labels[Pn] == -1:
labels[Pn] = C
elif labels[Pn] == 0:
labels[Pn] = C
# 查找Pn的所有邻居
PnNeighborPts = regionQuery(D, Pn, eps)
# 如果Pn属于core point
# 将Pn的所有邻居加入到FIFO队列中
if len(PnNeighborPts) >= MinPts:
NeighborPts = NeighborPts + PnNeighborPts
i += 1
def regionQuery(D, P, eps):
neighbors = []
for Pn in range(0, len(D)):
if np.linalg.norm(D[P] - D[Pn]) < eps:
neighbors.append(Pn)
return neighbors
# visitlist类用于记录访问列表
# unvisitedlist记录未访问过的点
# visitedlist记录已访问过的点
# unvisitednum记录访问过的点数量
class visitlist:
def __init__(self, count=0):
self.unvisitedlist=[i for i in range(count)]
self.visitedlist=list()
self.unvisitednum=count
def visit(self, pointId):
self.visitedlist.append(pointId)
self.unvisitedlist.remove(pointId)
self.unvisitednum -= 1
def MyDBSCAN2(dataSet, eps, minPts):
# numpy.ndarray的 shape属性表示矩阵的行数与列数
# 行数即表小所有点的个数
nPoints = dataSet.shape[0]
# 标记所有对象为unvisited
# 在这里用一个类vPoints进行实现
vPoints = visitlist(count=nPoints)
# 初始化簇标记列表C,簇标记为 k
k = -1
C = [-1 for i in range(nPoints)]
# 构建KD-Tree,并生成所有距离<=eps的点集合
kd = KDTree(dataSet)
while(vPoints.unvisitednum>0):
# 随机选择一个unvisited对象p
p = random.choice(vPoints.unvisitedlist)
# 标记p为visited
vPoints.visit(p)
# 如果p的邻域至少有MinPts个对象
# N是p的邻域点列表
N = kd.query_ball_point(dataSet[p], eps)
if len(N) >= minPts:
# 创建个一个新簇C,并把p添加到C
# 这里的C是一个标记列表,直接对第p个结点进行赋值
k += 1
C[p] = k
# 令N为p的邻域中的对象的集合
# N是p的邻域点集合
for p1 in N:
# 如果邻居点p1是unvisited
if p1 in vPoints.unvisitedlist:
#标记p1为visited
vPoints.visit(p1)
M = kd.query_ball_point(dataSet[p1], eps)
if len(M) >= minPts:
for i in M:
if i not in N:
N.append(i)
# 如果p1还不是任何簇的成员,把p1添加到c
# C是标记列表,直接把p1分到对应的簇里即可
if C[p1] == -1:
C[p1] = k
#标记p为噪声
else:
C[p] = -1
# 直到没有标记为unvisited的对象
return C
# 功能:从点云中提取聚类
# 输入:
# data: 点云(滤除地面后的点云)
# 输出:
# clusters_index: 一维数组,存储的是点云中每个点所属的聚类编号(参考上一章内容容易理解)
def clustering(data):
# 作业2
# 屏蔽开始
start_time = time.time()
#先对非地面点云进行voxel采样
print(len(data))
filtered_cloud = voxel_filter(data, 0.5)
print(len(filtered_cloud))
clusters_index = MyDBSCAN2(filtered_cloud, 1.0, 4)
end_time = time.time()
print('dbscan time: ',end_time-start_time)
# 屏蔽结束
return filtered_cloud,clusters_index
# 功能:显示聚类点云,每个聚类一种颜色
# 输入:
# data:点云数据(滤除地面后的点云)
# cluster_index:一维数组,存储的是点云中每个点所属的聚类编号(与上同)
def plot_clusters(data, cluster_index):
ax = plt.figure().add_subplot(111, projection = '3d')
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(cluster_index) + 1))))
colors = np.append(colors, ["#000000"])
ax.scatter(data[:, 0], data[:, 1], data[:, 2], s=2, color=colors[cluster_index])
plt.show()
# 功能:显示聚类点云,每个聚类一种颜色
# 输入:
# data:点云数据(滤除地面后的点云)
# cluster_index:一维数组,存储的是点云中每个点所属的聚类编号(与上同)
def plot_clusters2(data, cluster_index):
colors_base = [[255,0,0], [0,255,0], [0,0,255],
[255,255,0], [128,128,0], [0,128,128],
[128,0,0], [0,0,128], [128,0,128]]
colors = []
n_colors = len(colors_base)
for ii in range(len(data)):
if cluster_index[ii] == -1:
colors.append([0,0,0])
else:
colors.append(colors_base[cluster_index[ii] % n_colors])
colors = np.array(colors)
test_pcd = open3d.geometry.PointCloud()
test_pcd.points = open3d.utility.Vector3dVector(data)
cluster_index = np.array(cluster_index) + 1
test_pcd.colors = open3d.Vector3dVector(colors)
open3d.visualization.draw_geometries([test_pcd], window_name="Open3D2")
#按照文本来解析pcd点云文件
#解析过程中将原始lidar坐标系转换为新虚拟lidar坐标系
#转换方式:
# ln(x) = -l(y);ln(y) = l(x);ln(z) = l(z)
# intensity = intensity / 255.0
def parse_pandarmind_pcd(pandar_pcd_path):
'''
pandar_fp = open(pandar_pcd_path,'r')
origin_data = []
pc = []
for line in pandar_fp.readlines()[11:]:
#TODO cur lidar -> virtual kitti lidar
x = float(line.split(' ')[1]) * -1 + 5 #将原点移到铁丝网后5米,这样可以把剔除掉的对象拉回来
y = float(line.split(' ')[0]) * 1 + 22 #将原点右移22米
z = float(line.split(' ')[2])
intensity = int(line.split(' ')[3])
origin_data.append([x,y,z,intensity/255.0])
pc.append([x,y,z])
origin_data = np.array(origin_data,dtype=np.float32)
pc = np.array(pc,dtype=np.float32)
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pc)
return origin_data, pcd
'''
pcd = open3d.io.read_point_cloud(pandar_pcd_path)
xyz = np.array(pcd.points)
print(xyz.shape)
#2021-01-14-15-08-26
#b = np.where((xyz[:,0] >= -75) & (xyz[:,0] <= 31) & \
# (xyz[:,1] >= -127) & (xyz[:,1] <= -23) & \
# (xyz[:,2] >= -7) & (xyz[:,1] <= -1) )
#2021-01-14-15-35-58
#b = np.where((xyz[:,0] >= -70) & (xyz[:,0] <= 70) & \
# (xyz[:,1] >= -60) & (xyz[:,1] <= 10) & \
# (xyz[:,2] >= -7) & (xyz[:,1] <= 1) )
#2021-01-14-16-01-54
#b = np.where((xyz[:,0] >= -30) & (xyz[:,0] <= 30) & \
# (xyz[:,1] >= -100) & (xyz[:,1] <= -10) & \
# (xyz[:,2] >= -7) & (xyz[:,1] <= 1) )
#futong_dongxiangxi
#b = np.where((xyz[:,0] >= -30) & (xyz[:,0] <= 18) & \
# (xyz[:,1] >= -120) & (xyz[:,1] <= -10) & \
# (xyz[:,2] >= -10) & (xyz[:,1] <= 1) )
#futong_xixiangdong
#b = np.where((xyz[:,0] >= -4) & (xyz[:,0] <= 10) & \
# (xyz[:,1] >= -70) & (xyz[:,1] <= -10) & \
# (xyz[:,2] >= -10) & (xyz[:,1] <= 1) )
#N1_1
'''
b = np.where((xyz[:,0] >= 22) & (xyz[:,0] <= 45) & \
(xyz[:,1] >= -4) & (xyz[:,1] <= 8) & \
(xyz[:,2] >= -4) & (xyz[:,1] <= -1) )
xyz = xyz[b]
xyzi = np.zeros([xyz.shape[0],4])
xyzi[:,:3] = xyz[:,:]
'''
b = np.where(((xyz[:,0] >= -5.84) & (xyz[:,0] <= 7.39) & \
(xyz[:,1] >= -81.9) & (xyz[:,1] <= 0) & \
(xyz[:,2] >= -7) & (xyz[:,1] <= -1) ) | (
(xyz[:,0] >= -80.76) & (xyz[:,0] <= 95.19) & \
(xyz[:,1] >= -11) & (xyz[:,1] <= 0) & \
(xyz[:,2] >= -7) & (xyz[:,1] <= -1)
) )
xyz = xyz[b]
xyzi = np.zeros([xyz.shape[0],4])
xyzi[:,:3] = xyz[:,:]
'''
xyzi[:,0] = xyz[:,0] - 15
xyzi[:,1] = xyz[:,1] - 7
xyzi[:,2] = xyz[:,2]
pcd.points = o3d.utility.Vector3dVector(xyzi[:,:3])
'''
return xyzi,pcd
def main():
root_dir = 'C:\\hs_data\\carcity\\0720\\' # 数据集路径
#root_dir = '/Users/haoshuang/Documents/tmp/M1/' # 数据集路径
cat = os.listdir(root_dir)
iteration_num = len(cat)
for i in range(iteration_num):
filename = os.path.join(root_dir, cat[i])
print('clustering pointcloud file:', filename)
#origin_points = read_ww_bin(filename)
xyzi,pcd = parse_pandarmind_pcd(filename)
segmented_points = ground_segmentation(data=xyzi[:,:3])
if __name__ == '__main__':
main()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment