Skip to content

Commit b29f50b

Browse files
committed
cuda(histlib): cuda library from icp-flow project.
1 parent 3d53932 commit b29f50b

7 files changed

Lines changed: 257 additions & 0 deletions

File tree

assets/cuda/README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,8 @@ Faster our code in CUDA.
55

66
- chamfer3D: 3D chamfer distance within two point cloud, by Qingwen Zhang involved when she was working on SeFlow.
77
- mmcv: directly from mmcv, not our code.
8+
- mmdet: only python file, no need to compile
9+
- histlib: from Yancong's [ICP-Flow](https://github.com/yanconglin/ICP-Flow) project.
810

911
---
1012

assets/cuda/histlib/hist.cu

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
#include <vector>
2+
#include "hist_cuda_core.cuh"
3+
4+
#include <ATen/ATen.h>
5+
#include <ATen/cuda/CUDAContext.h>
6+
#include <cuda.h>
7+
#include <cuda_runtime.h>
8+
9+
// #include <THC/THC.h>
10+
// #include <THC/THCAtomics.cuh>
11+
// #include <THC/THCDeviceUtils.cuh>
12+
13+
// extern THCState *state;
14+
15+
// author: Charles Shang
16+
// https://github.com/torch/cunn/blob/master/lib/THCUNN/generic/SpatialConvolutionMM.cu
17+
18+
19+
at::Tensor
20+
hist_cuda(const at::Tensor &X, const at::Tensor &Y,
21+
const float min_x, const float min_y, const float min_z,
22+
const float max_x, const float max_y, const float max_z,
23+
const int len_x, const int len_y, const int len_z,
24+
const int mini_batch
25+
)
26+
{
27+
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
28+
29+
AT_ASSERTM(X.is_contiguous(), "input tensor has to be contiguous");
30+
AT_ASSERTM(Y.is_contiguous(), "input tensor has to be contiguous");
31+
32+
AT_ASSERTM(X.type().is_cuda(), "input must be a CUDA tensor");
33+
AT_ASSERTM(Y.type().is_cuda(), "input must be a CUDA tensor");
34+
35+
const int batch = X.size(0);
36+
const int num_X = X.size(1);
37+
const int dim = X.size(2);
38+
const int num_Y = Y.size(1);
39+
40+
AT_ASSERTM((X.size(0) == Y.size(0)), "batch_X (%d) != batch_Y (%d).", X.size(0), Y.size(0));
41+
AT_ASSERTM((X.size(2) == Y.size(2)), "dim_X (%d) != dim_Y (%d).", X.size(2), Y.size(2));
42+
43+
AT_ASSERTM((dim == 4), "dim (%d) != 4; 3 for (x, y, z); 1 for indicator,padded or not.", dim);
44+
45+
// printf("len: %d %d %f \n", len_x, len_y, len_z);
46+
// printf("hist cuda coord: %f, %f, %f; %f, %f, %f; %f, %f, %f. \n", val_x, val_y, val_z, p_x, p_y, p_z, len_x, len_y, len_z);
47+
48+
// auto bins = at::zeros({batch, len_x, len_y, len_z}, X.options());
49+
// AT_DISPATCH_FLOATING_TYPES(X.type(), "hist_cuda_core", ([&] {
50+
// hist_cuda_core(at::cuda::getCurrentCUDAStream(),
51+
// X.data<scalar_t>(), Y.data<scalar_t>(),
52+
// batch, dim, num_X, num_Y,
53+
// min_x, min_y, min_z,
54+
// max_x, max_y, max_z,
55+
// len_x, len_y, len_z,
56+
// bins.data<scalar_t>());
57+
// }));
58+
59+
auto bins = at::zeros({batch, len_x, len_y, len_z}, X.options());
60+
61+
int iters = batch / mini_batch;
62+
if (batch % mini_batch != 0)
63+
{
64+
iters += 1;
65+
}
66+
67+
for (int i=0; i<iters; ++i)
68+
{
69+
int mini_batch_ = mini_batch;
70+
if ((i+1) * mini_batch > batch)
71+
{
72+
mini_batch_ = batch - i * mini_batch;
73+
}
74+
// printf("iter: %d %d %d %d %d \n", i, iters, mini_batch_, mini_batch, batch);
75+
AT_DISPATCH_FLOATING_TYPES(X.type(), "hist_cuda_core", ([&] {
76+
hist_cuda_core(at::cuda::getCurrentCUDAStream(),
77+
X.data<scalar_t>() + i*mini_batch*num_X*dim,
78+
Y.data<scalar_t>() + i*mini_batch*num_Y*dim,
79+
mini_batch_, dim, num_X, num_Y,
80+
min_x, min_y, min_z,
81+
max_x, max_y, max_z,
82+
len_x, len_y, len_z,
83+
bins.data<scalar_t>()+i*mini_batch*len_x*len_y*len_z);
84+
}));
85+
}
86+
87+
88+
89+
return bins;
90+
}

assets/cuda/histlib/hist.h

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#pragma once
2+
#include <torch/extension.h>
3+
4+
at::Tensor
5+
hist(const at::Tensor &X, const at::Tensor &Y,
6+
const float min_x, const float min_y, const float min_z,
7+
const float max_x, const float max_y, const float max_z,
8+
const int len_x, const int len_y, const int len_z,
9+
const int mini_batch
10+
);
11+
12+
13+

assets/cuda/histlib/hist_cuda.cpp

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#include "hist.h"
2+
#include "hist_cuda.h"
3+
4+
at::Tensor
5+
hist(const at::Tensor &X, const at::Tensor &Y,
6+
const float min_x, const float min_y, const float min_z,
7+
const float max_x, const float max_y, const float max_z,
8+
const int len_x, const int len_y, const int len_z,
9+
const int mini_batch
10+
)
11+
{
12+
13+
if (X.type().is_cuda() && Y.type().is_cuda())
14+
{
15+
return hist_cuda(X, Y,
16+
min_x, min_y, min_z,
17+
max_x, max_y, max_z,
18+
len_x, len_y, len_z,
19+
mini_batch
20+
);
21+
}
22+
AT_ERROR("Not implemented on the CPU");
23+
}
24+
25+
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
26+
m.def("hist", &hist, "hist");
27+
}

assets/cuda/histlib/hist_cuda.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
#pragma once
2+
#include <torch/extension.h>
3+
4+
at::Tensor
5+
hist_cuda(const at::Tensor &X, const at::Tensor &Y,
6+
const float min_x, const float min_y, const float min_z,
7+
const float max_x, const float max_y, const float max_z,
8+
const int len_x, const int len_y, const int len_z,
9+
const int mini_batch
10+
);
Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
#include <cstdio>
2+
#include <algorithm>
3+
#include <cstring>
4+
5+
#include <ATen/ATen.h>
6+
#include <ATen/cuda/CUDAContext.h>
7+
8+
// #include <THC/THC.h>
9+
#include <THC/THCAtomics.cuh>
10+
// #include <THC/THCDeviceUtils.cuh>
11+
12+
#define CUDA_KERNEL_LOOP(i, n) \
13+
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
14+
i < (n); \
15+
i += blockDim.x * gridDim.x)
16+
17+
const int CUDA_NUM_THREADS = 1024;
18+
inline int GET_BLOCKS(const int N)
19+
{
20+
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
21+
}
22+
23+
template <typename scalar_t>
24+
__global__ void hist_cuda_kernel(const int n,
25+
const scalar_t* X,
26+
const scalar_t* Y,
27+
const int batch, const int dim,
28+
const int num_X, const int num_Y,
29+
const float min_x, const float min_y, const float min_z,
30+
const float max_x, const float max_y, const float max_z,
31+
const int len_x, const int len_y, const int len_z,
32+
scalar_t* bins
33+
)
34+
{
35+
CUDA_KERNEL_LOOP(index, n)
36+
{
37+
// index index of output matrix
38+
// launch in parallel: batch * numX * numY;
39+
// printf("hist cuda bin size: %d, %d, %d, %d. \n", batch, len_x, len_y, len_z);
40+
const int b = index / num_X / num_Y % batch;
41+
const int i = index / num_Y % num_X;
42+
const int j = index % num_Y;
43+
44+
scalar_t flag_x = X[b*num_X*dim+i*dim+3];
45+
scalar_t flag_y = Y[b*num_Y*dim+j*dim+3];
46+
if (flag_x>0.0 && flag_y>0.0)
47+
{
48+
scalar_t val_x = X[b*num_X*dim+i*dim+0] - Y[b*num_Y*dim+j*dim+0];
49+
scalar_t val_y = X[b*num_X*dim+i*dim+1] - Y[b*num_Y*dim+j*dim+1];
50+
scalar_t val_z = X[b*num_X*dim+i*dim+2] - Y[b*num_Y*dim+j*dim+2];
51+
if (val_x >= min_x && val_x < max_x && val_y >= min_y && val_y < max_y && val_z >= min_z && val_z < max_z)
52+
{
53+
// [): left included; right excluded.
54+
int p_x = __float2int_rd( (val_x-min_x) / (max_x-min_x) * __int2float_rd(len_x));
55+
int p_y = __float2int_rd( (val_y-min_y) / (max_y-min_y) * __int2float_rd(len_y));
56+
int p_z = __float2int_rd( (val_z-min_z) / (max_z-min_z) * __int2float_rd(len_z));
57+
58+
// printf("hist cuda coord: %d, %d, %d, %d; %d, %d, %d, %d. \n", batch, len_x, len_y, len_z, b, p_x, p_y, p_z);
59+
int bin_id = b*len_x*len_y*len_z + p_x*len_y*len_z + p_y*len_z + p_z;
60+
atomicAdd(bins + bin_id, 1);
61+
}
62+
}
63+
}
64+
}
65+
66+
template <typename scalar_t>
67+
void hist_cuda_core(cudaStream_t stream,
68+
const scalar_t* X, const scalar_t* Y,
69+
const int batch, const int dim,
70+
const int num_X, const int num_Y,
71+
const float min_x, const float min_y, const float min_z,
72+
const float max_x, const float max_y, const float max_z,
73+
const int len_x, const int len_y, const int len_z,
74+
scalar_t* bins
75+
)
76+
{
77+
const int num_kernels = batch * num_X * num_Y;
78+
// printf("num kernels: %d\n", num_kernels);
79+
80+
// printf("hist cuda core: %f, %f, %f; %f, %f, %f; %f, %f, %f. \n", min_x, min_y, min_z, max_x, max_y, max_z, len_x, len_y, len_z);
81+
// printf("hist cuda core: ", min_x, min_y, min_z, max_x, max_y, max_z, len_x, len_y, len_z, " \n");
82+
hist_cuda_kernel<scalar_t>
83+
<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>(
84+
num_kernels,
85+
X, Y,
86+
batch, dim,
87+
num_X, num_Y,
88+
min_x, min_y, min_z,
89+
max_x, max_y, max_z,
90+
len_x, len_y, len_z,
91+
bins
92+
);
93+
94+
cudaError_t err = cudaGetLastError();
95+
if (err != cudaSuccess)
96+
{
97+
printf("error in hist_cuda_core: %s\n", cudaGetErrorString(err));
98+
}
99+
}
100+

assets/cuda/histlib/setup.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
from setuptools import setup
2+
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
3+
4+
setup(
5+
name='hist',
6+
ext_modules=[
7+
CUDAExtension('hist', [
8+
"/".join(__file__.split('/')[:-1] + ['hist_cuda.cpp']), # must named as xxx_cuda.cpp
9+
"/".join(__file__.split('/')[:-1] + ['hist.cu']),
10+
]),
11+
],
12+
cmdclass={
13+
'build_ext': BuildExtension
14+
},
15+
version='1.0.1')

0 commit comments

Comments
 (0)