python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import (
SparseTensor,
MinkowskiInstanceNorm,
MinkowskiInstanceNormFunction,
)
from utils.gradcheck import gradcheck
from tests.python.common import data_loader
class TestNormalization(unittest.TestCase):
def test_inst_norm(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
input = SparseTensor(feats, coords)
input.F.requires_grad_()
norm = MinkowskiInstanceNorm(num_features=in_channels).double()
out = norm(input)
print(out)
fn = MinkowskiInstanceNormFunction()
self.assertTrue(
gradcheck(
fn, (input.F, input.coordinate_map_key, None, input.coordinate_manager)
)
)
def test_inst_norm_gpu(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
device = torch.device("cuda")
input = SparseTensor(feats, coords, device=device)
input.F.requires_grad_()
norm = MinkowskiInstanceNorm(num_features=in_channels).to(device).double()
out = norm(input)
print(out)
fn = MinkowskiInstanceNormFunction()
self.assertTrue(
gradcheck(
fn, (input.F, input.coordinate_map_key, None, input.coordinate_manager)
)
)
if __name__ == "__main__":
unittest.main()
| MinkowskiEngine-master | tests/python/norm.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import SparseTensor, MinkowskiConvolution, \
MinkowskiConvolutionTranspose
import MinkowskiEngine as ME
from tests.common import data_loader
def get_random_coords(dimension=2, tensor_stride=2):
torch.manual_seed(0)
# Create random coordinates with tensor stride == 2
coords = torch.rand(10, dimension + 1)
coords[:, :dimension] *= 5 # random coords
coords[:, -1] *= 2 # random batch index
coords = coords.floor().int()
coords = ME.utils.sparse_quantize(coords)
coords[:, :dimension] *= tensor_stride # make the tensor stride 2
return coords, tensor_stride
class TestConvolution(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test")
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=2)
# Create random coordinates with tensor stride == 2
out_coords, tensor_stride = get_random_coords()
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords=coords)
cm = input.coords_man
print(cm._get_coords_key(1))
conv = MinkowskiConvolution(
in_channels,
out_channels,
kernel_size=3,
stride=1,
bias=False,
dimension=D).double()
print('Initial input: ', input)
print('Specified output coords: ', out_coords)
output = conv(input, out_coords)
# To specify the tensor stride
out_coords_key = cm.create_coords_key(out_coords, tensor_stride=2)
output = conv(input, out_coords_key)
print('Conv output: ', output)
output.F.sum().backward()
print(input.F.grad)
def test_tr(self):
print(f"{self.__class__.__name__}: test_tr")
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=2)
# tensor stride must be at least 2 for convolution transpose with stride 2
coords[:, :2] *= 2
out_coords = torch.rand(10, 3)
out_coords[:, :2] *= 10 # random coords
out_coords[:, 2] *= 2 # random batch index
out_coords = out_coords.floor().int()
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords=coords, tensor_stride=2)
cm = input.coords_man
print(cm._get_coords_key(2))
conv_tr = MinkowskiConvolutionTranspose(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=False,
dimension=D).double()
print('Initial input: ', input)
print('Specified output coords: ', out_coords)
output = conv_tr(input, out_coords)
print('Conv output: ', output)
output.F.sum().backward()
print(input.F.grad)
if __name__ == '__main__':
unittest.main()
| MinkowskiEngine-master | tests/python/conv_on_coords.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
import MinkowskiEngineBackend._C as _C
from MinkowskiEngine import (
SparseTensor,
MinkowskiConvolution,
MinkowskiConvolutionTranspose,
MinkowskiPruning,
MinkowskiPruningFunction,
)
from utils.gradcheck import gradcheck
from tests.python.common import data_loader
class TestPruning(unittest.TestCase):
def test(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
use_feat = torch.rand(feats.size(0)) < 0.5
pruning = MinkowskiPruning()
output = pruning(input, use_feat)
print(input)
print(use_feat)
print(output)
# Check backward
fn = MinkowskiPruningFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
use_feat,
input.coordinate_map_key,
output.coordinate_map_key,
input.coordinate_manager,
),
)
)
def test_device(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords, device="cuda")
use_feat = torch.rand(feats.size(0)) < 0.5
pruning = MinkowskiPruning()
output = pruning(input, use_feat.cuda())
print(input)
print(use_feat)
print(output)
def test_empty(self):
in_channels = 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
use_feat = torch.BoolTensor(len(input))
use_feat.zero_()
pruning = MinkowskiPruning()
output = pruning(input, use_feat)
print(input)
print(use_feat)
print(output)
# Check backward
fn = MinkowskiPruningFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
use_feat,
input.coordinate_map_key,
output.coordinate_map_key,
input.coordinate_manager,
),
)
)
def test_pruning(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
use_feat = torch.rand(feats.size(0)) < 0.5
pruning = MinkowskiPruning()
output = pruning(input, use_feat)
print(input)
print(use_feat)
print(output)
# Check backward
fn = MinkowskiPruningFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
use_feat,
input.coordinate_map_key,
output.coordinate_map_key,
input.coordinate_manager,
),
)
)
def test_device(self):
in_channels, D = 2, 2
device = torch.device("cuda")
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
use_feat = (torch.rand(feats.size(0)) < 0.5).to(device)
pruning = MinkowskiPruning()
input = SparseTensor(feats, coords, device=device)
output = pruning(input, use_feat)
print(input)
print(output)
fn = MinkowskiPruningFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
use_feat,
input.coordinate_map_key,
output.coordinate_map_key,
input.coordinate_manager,
),
)
)
def test_with_convtr(self):
channels, D = [2, 3, 4], 2
coords, feats, labels = data_loader(channels[0], batch_size=1)
feats = feats.double()
feats.requires_grad_()
# Create a sparse tensor with large tensor strides for upsampling
start_tensor_stride = 4
input = SparseTensor(
feats, coords * start_tensor_stride, tensor_stride=start_tensor_stride,
)
conv_tr1 = MinkowskiConvolutionTranspose(
channels[0],
channels[1],
kernel_size=3,
stride=2,
generate_new_coords=True,
dimension=D,
).double()
conv1 = MinkowskiConvolution(
channels[1], channels[1], kernel_size=3, dimension=D
).double()
conv_tr2 = MinkowskiConvolutionTranspose(
channels[1],
channels[2],
kernel_size=3,
stride=2,
generate_new_coords=True,
dimension=D,
).double()
conv2 = MinkowskiConvolution(
channels[2], channels[2], kernel_size=3, dimension=D
).double()
pruning = MinkowskiPruning()
out1 = conv_tr1(input)
self.assertTrue(torch.prod(torch.abs(out1.F) > 0).item() == 1)
out1 = conv1(out1)
use_feat = torch.rand(len(out1)) < 0.5
out1 = pruning(out1, use_feat)
out2 = conv_tr2(out1)
self.assertTrue(torch.prod(torch.abs(out2.F) > 0).item() == 1)
use_feat = torch.rand(len(out2)) < 0.5
out2 = pruning(out2, use_feat)
out2 = conv2(out2)
print(out2)
out2.F.sum().backward()
# Check gradient flow
print(input.F.grad)
| MinkowskiEngine-master | tests/python/pruning.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
| MinkowskiEngine-master | tests/python/__init__.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import torch
from MinkowskiEngine import SparseTensor, MinkowskiConvolution, MinkowskiAlgorithm
from tests.python.common import data_loader
class TestKernelMap(unittest.TestCase):
def test_kernelmap_gpu(self):
print(f"{self.__class__.__name__}: test_kernelmap_gpu")
if not torch.cuda.is_available():
return
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(
feats,
coordinates=coords,
minkowski_algorithm=MinkowskiAlgorithm.SPEED_OPTIMIZED,
device="cuda",
)
# Initialize context
conv = (
MinkowskiConvolution(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=True,
dimension=D,
)
.double()
.cuda()
)
output = conv(input)
iC = input.C.cpu().numpy()
oC = output.C.cpu().numpy()
print(iC)
print(oC)
kernel_maps = output.coordinate_manager.kernel_map(
1,
2,
stride=2,
kernel_size=3,
)
for kernel_index, in_out_map in kernel_maps.items():
for i, o in zip(in_out_map[0], in_out_map[1]):
print(kernel_index, iC[i], "->", oC[o])
self.assertTrue(sum(len(in_map[0]) for k, in_map in kernel_maps.items()) == 16)
def test_kernelmap(self):
print(f"{self.__class__.__name__}: test_kernelmap")
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
# Initialize context
conv = MinkowskiConvolution(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=True,
dimension=D,
).double()
output = conv(input)
iC = input.C.numpy()
oC = output.C.numpy()
print(iC)
print(oC)
kernel_maps = output.coordinate_manager.kernel_map(
1, 2, stride=2, kernel_size=3
)
for kernel_index, in_out_map in kernel_maps.items():
for i, o in zip(in_out_map[0], in_out_map[1]):
print(kernel_index, iC[i], "->", oC[o])
self.assertTrue(sum(len(in_map[0]) for k, in_map in kernel_maps.items()) == 16)
| MinkowskiEngine-master | tests/python/kernel_map.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import argparse
import numpy as np
from urllib.request import urlretrieve
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
import torch
import MinkowskiEngine as ME
from MinkowskiCommon import convert_to_int_list
from examples.common import Timer
# Check if the weights and file exist and download
if not os.path.isfile("1.ply"):
print("Downloading a room ply file...")
urlretrieve("http://cvgl.stanford.edu/data2/minkowskiengine/1.ply", "1.ply")
parser = argparse.ArgumentParser()
parser.add_argument("--file_name", type=str, default="1.ply")
parser.add_argument("--voxel_size", type=float, default=0.02)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--max_kernel_size", type=int, default=7)
def quantize(coordinates):
D = coordinates.size(1) - 1
coordinate_manager = ME.CoordinateManager(
D=D, coordinate_map_type=ME.CoordinateMapType.CPU
)
coordinate_map_key = ME.CoordinateMapKey(convert_to_int_list(1, D), "")
key, (unique_map, inverse_map) = coordinate_manager.insert_and_map(
coordinates, *coordinate_map_key.get_key()
)
return unique_map, inverse_map
def load_file(file_name, voxel_size):
pcd = o3d.io.read_point_cloud(file_name)
coords = torch.from_numpy(np.array(pcd.points))
feats = torch.from_numpy(np.array(pcd.colors)).float()
quantized_coords = torch.floor(coords / voxel_size).int()
inds, inverse_inds = quantize(quantized_coords)
return quantized_coords[inds], feats[inds], pcd
def generate_input_sparse_tensor(file_name, voxel_size=0.05, batch_size=1):
# Create a batch, this process is done in a data loader during training in parallel.
batch = [load_file(file_name, voxel_size),] * batch_size
coordinates_, featrues_, pcds = list(zip(*batch))
coordinates, features = ME.utils.sparse_collate(coordinates_, featrues_)
# Normalize features and create a sparse tensor
return features, coordinates
if __name__ == "__main__":
config = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define a model and load the weights
all_convs = {}
for k in range(3, config.max_kernel_size + 1, 2):
for in_ch in [3, 8, 16, 32, 64, 128]:
for out_ch in [16, 32, 64, 128, 256]:
all_convs[(k, in_ch, out_ch)] = ME.MinkowskiConvolution(
in_channels=in_ch,
out_channels=out_ch,
kernel_size=k,
stride=2,
dimension=3,
).to(device)
# Measure time
print("Initialization time")
features, coordinates = generate_input_sparse_tensor(
config.file_name, voxel_size=config.voxel_size, batch_size=config.batch_size
)
timer = Timer()
for i in range(20):
timer.tic()
sinput = ME.SparseTensor(
features.to(device), coordinates=coordinates.to(device)
)
timer.toc()
print(f"{timer.min_time:.12f} for initialization of {len(sinput)} voxels")
print("Forward")
for k, conv in all_convs.items():
timer = Timer()
features = torch.rand(len(coordinates), k[1]).to(device)
# Feed-forward pass and get the prediction
for i in range(20):
sinput = ME.SparseTensor(
features.to(device), coordinates=coordinates.to(device)
)
timer.tic()
soutput = conv(sinput)
timer.toc()
print(
f"{timer.min_time:.12f} for {k} strided convolution with {len(sinput)} voxel"
)
print("Backward")
sinput = ME.SparseTensor(
features.to(device), coordinates=coordinates.to(device)
)
for k, conv in all_convs.items():
timer = Timer()
sinput._F = torch.rand(len(sinput), k[1]).to(device)
soutput = conv(sinput)
loss = soutput.F.sum()
# Feed-forward pass and get the prediction
for i in range(20):
timer.tic()
loss.backward()
timer.toc()
print(
f"{timer.min_time:.12f} for {k} strided convolution with {len(sinput)} voxel"
)
| MinkowskiEngine-master | tests/python/strided_conv.py |
# Copyright (c) 2020-2021 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import open3d as o3d
import numpy as np
import os
from urllib.request import urlretrieve
import torch
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import SparseTensor
from MinkowskiEngine.utils import summary, batched_coordinates
class StackUNet(ME.MinkowskiNetwork):
def __init__(self, in_nchannel, out_nchannel, D):
ME.MinkowskiNetwork.__init__(self, D)
channels = [in_nchannel, 16, 32]
self.net = nn.Sequential(
ME.MinkowskiStackSum(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=1,
dimension=D,
),
nn.Sequential(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=2,
dimension=D,
),
ME.MinkowskiStackSum(
nn.Identity(),
nn.Sequential(
ME.MinkowskiConvolution(
channels[1],
channels[2],
kernel_size=3,
stride=2,
dimension=D,
),
ME.MinkowskiConvolutionTranspose(
channels[2],
channels[1],
kernel_size=3,
stride=1,
dimension=D,
),
ME.MinkowskiPoolingTranspose(
kernel_size=2, stride=2, dimension=D
),
),
),
ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
),
),
ME.MinkowskiToFeature(),
nn.Linear(channels[1], out_nchannel, bias=True),
)
def forward(self, x):
return self.net(x)
class TestSummary(unittest.TestCase):
def setUp(self):
file_name, voxel_size = "1.ply", 0.02
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.net = StackUNet(3, 20, D=3).to(self.device)
if not os.path.isfile(file_name):
print('Downloading an example pointcloud...')
urlretrieve("https://bit.ly/3c2iLhg", file_name)
pcd = o3d.io.read_point_cloud(file_name)
coords = np.array(pcd.points)
colors = np.array(pcd.colors)
self.sinput = SparseTensor(
features=torch.from_numpy(colors).float(),
coordinates=batched_coordinates([coords / voxel_size], dtype=torch.float32),
device=self.device,
)
def test(self):
summary(self.net, self.sinput)
| MinkowskiEngine-master | tests/python/summary.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import numpy as np
import torch
import MinkowskiEngine as ME
from urllib.request import urlretrieve
if not os.path.isfile("1.ply"):
urlretrieve("http://cvgl.stanford.edu/data2/minkowskiengine/1.ply", "1.ply")
def load_file(file_name):
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
pcd = o3d.io.read_point_cloud(file_name)
coords = np.array(pcd.points)
colors = np.array(pcd.colors)
return coords, colors, pcd
def get_coords(data):
coords = []
for i, row in enumerate(data):
for j, col in enumerate(row):
if col != " ":
coords.append([i, j])
return np.array(coords)
def data_loader(
nchannel=3,
max_label=5,
is_classification=True,
seed=-1,
batch_size=2,
dtype=torch.float32,
):
if seed >= 0:
torch.manual_seed(seed)
data = [" X ", " X X ", " XXXXX "]
# Generate coordinates
coords = [get_coords(data) for i in range(batch_size)]
coords = ME.utils.batched_coordinates(coords)
# features and labels
N = len(coords)
feats = torch.arange(N * nchannel).view(N, nchannel).to(dtype)
label = (torch.rand(batch_size if is_classification else N) * max_label).long()
return coords, feats, label
| MinkowskiEngine-master | tests/python/common.py |
# Copyright (c) 2021 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import numpy as np
import torch
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine import MinkowskiStackCat, MinkowskiStackSum
from MinkowskiEngine.utils import batched_coordinates
from utils.gradcheck import gradcheck
from tests.python.common import data_loader, load_file
class TestStack(unittest.TestCase):
def test_sum(self):
coords, colors, pcd = load_file("1.ply")
device = "cuda"
D = 3
batch_size = 16
voxel_size = 0.02
channels = [3, 64, 128]
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
in_feats = torch.rand(len(bcoords), 3).to(0)
layer = MinkowskiStackSum(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=1,
dimension=3,
),
nn.Sequential(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=2,
dimension=3,
),
ME.MinkowskiStackSum(
nn.Identity(),
nn.Sequential(
ME.MinkowskiConvolution(
channels[1],
channels[2],
kernel_size=3,
stride=2,
dimension=3,
),
ME.MinkowskiConvolutionTranspose(
channels[2],
channels[1],
kernel_size=3,
stride=1,
dimension=3,
),
ME.MinkowskiPoolingTranspose(
kernel_size=2, stride=2, dimension=D
),
),
),
ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
),
).cuda()
for i in range(1000):
torch.cuda.empty_cache()
sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device)
layer(sinput)
def test_baseline(self):
coords, colors, pcd = load_file("1.ply")
device = "cuda"
D = 3
batch_size = 16
voxel_size = 0.02
channels = [3, 64, 128]
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
in_feats = torch.rand(len(bcoords), 3).to(0)
layer = nn.Sequential(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=1,
dimension=3,
),
ME.MinkowskiConvolution(
channels[1],
channels[2],
kernel_size=3,
stride=2,
dimension=3,
),
ME.MinkowskiConvolutionTranspose(
channels[2],
channels[1],
kernel_size=3,
stride=1,
dimension=3,
),
).cuda()
for i in range(1000):
torch.cuda.empty_cache()
sinput = ME.SparseTensor(in_feats, coordinates=bcoords, device=device)
layer(sinput)
| MinkowskiEngine-master | tests/python/stack.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import unittest
import numpy as np
import torch
from MinkowskiEngine import (
SparseTensor,
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
clear_global_coordinate_manager,
is_cuda_available,
)
from MinkowskiEngine.utils import batched_coordinates, sparse_quantize, sparse_collate
from tests.python.common import data_loader, load_file
class SparseTensorTestCase(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test SparseTensor")
coords, feats, labels = data_loader(nchannel=2)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_empty(self):
print(f"{self.__class__.__name__}: test_empty SparseTensor")
feats = torch.FloatTensor(0, 16)
coords = torch.IntTensor(0, 4)
input = SparseTensor(feats, coordinates=coords)
print(input)
def test_tensor_stride(self):
print(f"{self.__class__.__name__}: test_tensor_stride SparseTensor")
feats = torch.FloatTensor(4, 16)
coords = torch.IntTensor(
[[0, 4, 2, 1], [0, 4, 0, 0], [0, 4, 4, 4], [0, 4, 4, 7]]
)
print(coords)
input = SparseTensor(feats, coordinates=coords, tensor_stride=4)
self.assertEqual(input.tensor_stride, [4, 4, 4])
print(input)
def test_force_creation(self):
print(f"{self.__class__.__name__}: test_force_creation")
coords, feats, labels = data_loader(nchannel=2)
input1 = SparseTensor(feats, coordinates=coords)
input2 = SparseTensor(
feats, coordinates=coords, coordinate_manager=input1.coordinate_manager
)
print(input1.coordinate_map_key, input2.coordinate_map_key)
def test_device(self):
print(f"{self.__class__.__name__}: test_device SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device_unique(self):
print(f"{self.__class__.__name__}: test_device_unique SparseTensor")
if not is_cuda_available():
return
coords = torch.IntTensor(
[[0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1], [1, 2]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
SparseTensor(feats.to(0), coords.to(0))
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T.to(0)
st = SparseTensor(feats, coords, device=feats.device)
print(st)
def test_device2(self):
print(f"{self.__class__.__name__}: test_device2 SparseTensor")
if not is_cuda_available():
return
coordinates = np.random.rand(8192,3) * 200
quant_coordinates, quant_features = sparse_quantize(coordinates, coordinates)
bcoords, bfeats = sparse_collate([quant_coordinates], [quant_features])
bcoords, bfeats = bcoords.cuda(), bfeats.cuda()
print(bcoords, bfeats)
SparseTensor(bfeats, bcoords)
def test_quantization(self):
print(f"{self.__class__.__name__}: test_quantization")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_quantization_gpu(self):
print(f"{self.__class__.__name__}: test_quantization_gpu")
coords, feats, labels = data_loader(nchannel=2)
# create duplicate coords
coords[0] = coords[1]
coords[2] = coords[3]
input = SparseTensor(feats, coordinates=coords)
self.assertTrue(len(input) == len(coords) - 2)
input = SparseTensor(
feats,
coordinates=coords,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
self.assertTrue(len(coords) == 16)
self.assertTrue(len(input) == 14)
print(input)
# 1D
coords = torch.IntTensor(
[[0, 1], [0, 1], [0, 2], [0, 2], [1, 0], [1, 0], [1, 1]]
)
feats = torch.FloatTensor([[0, 1, 2, 3, 5, 6, 7]]).T
# 0.5, 2.5, 5.5, 7
sinput = SparseTensor(
coordinates=coords,
features=feats,
quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
device="cuda",
)
print(sinput)
self.assertTrue(len(sinput) == 4)
self.assertTrue(0.5 in sinput.features)
self.assertTrue(2.5 in sinput.features)
self.assertTrue(5.5 in sinput.features)
self.assertTrue(7 in sinput.features)
self.assertTrue(len(sinput.slice(sinput)) == len(coords))
def test_extraction(self):
print(f"{self.__class__.__name__}: test_extraction")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords)
C0 = X.coordinates_at(0)
F0 = X.features_at(0)
self.assertTrue(0 in C0)
self.assertTrue(1 in C0)
self.assertTrue(2 in C0)
self.assertTrue(1.1 in F0)
self.assertTrue(2.1 in F0)
self.assertTrue(3.1 in F0)
CC0, FC0 = X.coordinates_and_features_at(0)
self.assertTrue((C0 == CC0).all())
self.assertTrue((F0 == FC0).all())
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
if not is_cuda_available():
return
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(feats, coords, device=0)
coords, feats = X.decomposed_coordinates_and_features
for c, f in zip(coords, feats):
self.assertEqual(c.numel(), f.numel())
print(c, f)
self.assertEqual(len(coords[0]), 3)
self.assertEqual(len(coords[1]), 0)
self.assertEqual(len(coords[2]), 2)
def test_features_at_coordinates(self):
print(f"{self.__class__.__name__}: test_features_at_coordinates")
coords = torch.IntTensor([[0, 0], [0, 1], [0, 2], [2, 0], [2, 2]])
feats = torch.FloatTensor([[1.1, 2.1, 3.1, 4.1, 5.1]]).t()
X = SparseTensor(features=feats, coordinates=coords)
feats = X.features_at_coordinates(
torch.FloatTensor([[0, 0], [0, 1], [0, 2], [2, 2], [0, 0], [0, 0.5]])
).flatten()
self.assertTrue(feats[0] == 1.1)
self.assertTrue(feats[3] == 5.1)
self.assertTrue(feats[4] == 1.1)
def test_decomposition(self):
print(f"{self.__class__.__name__}: test_decomposition")
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats, bcoords)
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_decomposition_gpu(self):
print(f"{self.__class__.__name__}: test_decomposition_gpu")
if not torch.cuda.is_available():
return
coords, colors, pcd = load_file("1.ply")
colors = torch.from_numpy(colors)
for batch_size in [5, 10, 20, 40]:
for voxel_size in [0.02]:
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
feats = torch.cat([colors for b in range(batch_size)], 0)
sinput = SparseTensor(feats.to(0), bcoords.to(0))
(
decomposed_coords,
decomposed_feats,
) = sinput.decomposed_coordinates_and_features
print([len(c) for c in decomposed_coords])
print([len(f) for f in decomposed_feats])
self.assertEqual(len(decomposed_coords), batch_size)
self.assertEqual(len(decomposed_feats), batch_size)
def test_operation_mode(self):
print(f"{self.__class__.__name__}: test_operation_mode")
# Set to use the global sparse tensor coords manager by default
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
)
coords, feats, labels = data_loader(nchannel=2)
# Create a sparse tensor on two different coordinates.
A = SparseTensor(torch.rand(feats.shape), coordinates=coords)
B = SparseTensor(
torch.rand(4, 2),
coordinates=torch.IntTensor([[0, 0, 0], [1, 1, 1], [0, 1, 0], [1, 0, 1]]),
)
self.assertTrue(A.coordinate_manager == B.coordinate_manager)
A.requires_grad_(True)
B.requires_grad_(True)
C = A + B
C.F.sum().backward()
self.assertTrue(torch.all(A.F.grad == 1).item())
self.assertTrue(torch.all(B.F.grad == 1).item())
C = A - B
C = A * B
C = A / B
# Inplace
A.requires_grad_(False)
D = SparseTensor(
torch.rand(feats.shape),
coordinate_map_key=A.coordinate_map_key,
coordinate_manager=A.coordinate_manager,
)
A -= D
A *= D
A /= D
clear_global_coordinate_manager()
set_sparse_tensor_operation_mode(
SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER
)
| MinkowskiEngine-master | tests/python/sparse_tensor.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import SparseTensor, MinkowskiChannelwiseConvolution
import MinkowskiEngine as ME
from tests.common import data_loader
def get_random_coords(dimension=2, tensor_stride=2):
torch.manual_seed(0)
# Create random coordinates with tensor stride == 2
coords = torch.rand(10, dimension + 1)
coords[:, :dimension] *= 5 # random coords
coords[:, -1] *= 2 # random batch index
coords = coords.floor().int()
coords = ME.utils.sparse_quantize(coords)
coords[:, :dimension] *= tensor_stride # make the tensor stride 2
return coords, tensor_stride
class TestConvolution(unittest.TestCase):
def test(self):
print(f"{self.__class__.__name__}: test")
in_channels, D = 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=2)
# Create random coordinates with tensor stride == 2
out_coords, tensor_stride = get_random_coords()
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords=coords)
conv = MinkowskiChannelwiseConvolution(
in_channels,
kernel_size=3,
stride=1,
bias=False,
dimension=D).double()
print('Initial input: ', input)
output = conv(input)
print('Conv output: ', output)
output.F.sum().backward()
print(input.F.grad)
def test_gpu(self):
print(f"{self.__class__.__name__}: test_gpu")
if not torch.cuda.is_available():
return
device = torch.device('cuda')
in_channels, D = 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=2)
# Create random coordinates with tensor stride == 2
out_coords, tensor_stride = get_random_coords()
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords=coords).to(device)
conv = MinkowskiChannelwiseConvolution(
in_channels,
kernel_size=3,
stride=1,
bias=False,
dimension=D).double().to(device)
print('Initial input: ', input)
output = conv(input)
print('Conv output: ', output)
if __name__ == '__main__':
unittest.main()
| MinkowskiEngine-master | tests/python/chwise_conv.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import (
SparseTensor,
TensorField,
MinkowskiConvolution,
MinkowskiLocalPoolingFunction,
MinkowskiSumPooling,
MinkowskiAvgPooling,
MinkowskiMaxPooling,
MinkowskiLocalPoolingTransposeFunction,
MinkowskiPoolingTranspose,
MinkowskiGlobalPoolingFunction,
MinkowskiGlobalPooling,
MinkowskiGlobalSumPooling,
MinkowskiGlobalAvgPooling,
MinkowskiGlobalMaxPooling,
)
from utils.gradcheck import gradcheck
from tests.python.common import data_loader
class TestLocalMaxPooling(unittest.TestCase):
def test_gpu(self):
if not torch.cuda.is_available():
return
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
output = pool(input)
print(output)
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device=0)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
class TestLocalSumPooling(unittest.TestCase):
def test_sumpooling(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiSumPooling(kernel_size=3, stride=2, dimension=D)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
input = SparseTensor(feats, coords, device=0)
output = pool(input)
print(output)
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test_poolmap(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiSumPooling(kernel_size=2, stride=2, dimension=D)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
input = SparseTensor(feats, coords, device=0)
output = pool(input)
print(output)
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
class TestLocalAvgPooling(unittest.TestCase):
def test_gpu(self):
if not torch.cuda.is_available():
return
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiAvgPooling(kernel_size=3, stride=2, dimension=D)
output = pool(input)
print(output)
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device=0)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiAvgPooling(kernel_size=3, stride=2, dimension=D)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
pool.kernel_generator,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
class TestPoolingTranspose(unittest.TestCase):
def test_unpool(self):
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
input = SparseTensor(feats, coords)
conv = MinkowskiConvolution(
in_channels, out_channels, kernel_size=3, stride=2, dimension=D
)
conv = conv.double()
unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
input = conv(input)
output = unpool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingTransposeFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
unpool.pooling_mode,
unpool.kernel_generator,
input.coordinate_map_key,
None,
input.coordinate_manager,
),
)
)
def test_unpool_gpu(self):
if not torch.cuda.is_available():
return
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
input = SparseTensor(feats, coords)
conv = MinkowskiConvolution(
in_channels, out_channels, kernel_size=3, stride=2, dimension=D
)
conv = conv.double()
unpool = MinkowskiPoolingTranspose(kernel_size=3, stride=2, dimension=D)
input = conv(input)
output = unpool(input)
print(output)
# Check backward
fn = MinkowskiLocalPoolingTransposeFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
unpool.pooling_mode,
unpool.kernel_generator,
input.coordinate_map_key,
None,
input.coordinate_manager,
),
)
)
with torch.cuda.device(0):
conv = conv.to("cuda")
input = SparseTensor(feats, coords, device="cuda")
input = conv(input)
input.requires_grad_()
output = unpool(input)
print(output)
# Check backward
self.assertTrue(
gradcheck(
fn,
(
input.F,
unpool.pooling_mode,
unpool.kernel_generator,
input.coordinate_map_key,
None,
input.coordinate_manager,
),
)
)
class TestGlobalAvgPooling(unittest.TestCase):
def test_batch_size1(self):
if not torch.cuda.is_available():
return
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiGlobalAvgPooling()
output = pool(input)
print(output)
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device=0)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test_gpu(self):
if not torch.cuda.is_available():
return
in_channels = 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiGlobalAvgPooling()
output = pool(input)
print(output)
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device=0)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiGlobalAvgPooling()
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
class TestGlobalMaxPooling(unittest.TestCase):
def test_batch_size(self):
if not torch.cuda.is_available():
return
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiGlobalMaxPooling()
output = pool(input)
print(output)
output.F.sum().backward()
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device="cuda")
output = pool(input)
print(output)
output.F.sum().backward()
def test_gpu(self):
if not torch.cuda.is_available():
return
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coordinates=coords)
pool = MinkowskiGlobalMaxPooling()
output = pool(input)
print(output)
if not torch.cuda.is_available():
return
input = SparseTensor(feats, coordinates=coords, device=0)
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiGlobalAvgPooling()
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
def test_field(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
feats = feats.double()
feats.requires_grad_()
input = TensorField(feats, coords)
pool = MinkowskiGlobalMaxPooling()
output = pool(input)
print(output)
# Check backward
fn = MinkowskiGlobalPoolingFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_field_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
if not torch.cuda.is_available():
return
input = TensorField(feats, coords, device="cuda")
output = pool(input)
print(output)
# Check backward
self.assertTrue(
gradcheck(
fn,
(
input.F,
pool.pooling_mode,
input.coordinate_field_map_key,
output.coordinate_map_key,
input._manager,
),
)
)
| MinkowskiEngine-master | tests/python/pool.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import (
SparseTensor,
MinkowskiGlobalSumPooling,
MinkowskiBroadcastFunction,
MinkowskiBroadcastAddition,
MinkowskiBroadcastMultiplication,
MinkowskiBroadcast,
MinkowskiBroadcastConcatenation,
BroadcastMode,
)
from utils.gradcheck import gradcheck
from tests.python.common import data_loader
class TestBroadcast(unittest.TestCase):
def test_broadcast_gpu(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
coords, feats_glob, labels = data_loader(in_channels)
feats = feats.double()
feats_glob = feats_glob.double()
feats.requires_grad_()
feats_glob.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiGlobalSumPooling()
input_glob = pool(input).detach()
input_glob.F.requires_grad_()
broadcast_add = MinkowskiBroadcastAddition()
broadcast_mul = MinkowskiBroadcastMultiplication()
broadcast_cat = MinkowskiBroadcastConcatenation()
cpu_add = broadcast_add(input, input_glob)
cpu_mul = broadcast_mul(input, input_glob)
cpu_cat = broadcast_cat(input, input_glob)
# Check backward
fn = MinkowskiBroadcastFunction()
device = torch.device("cuda")
input = SparseTensor(feats, coords, device=device)
input_glob = pool(input).detach()
gpu_add = broadcast_add(input, input_glob)
gpu_mul = broadcast_mul(input, input_glob)
gpu_cat = broadcast_cat(input, input_glob)
self.assertTrue(torch.prod(gpu_add.F.cpu() - cpu_add.F < 1e-5).item() == 1)
self.assertTrue(torch.prod(gpu_mul.F.cpu() - cpu_mul.F < 1e-5).item() == 1)
self.assertTrue(torch.prod(gpu_cat.F.cpu() - cpu_cat.F < 1e-5).item() == 1)
self.assertTrue(
gradcheck(
fn,
(
input.F,
input_glob.F,
broadcast_add.operation_type,
input.coordinate_map_key,
input_glob.coordinate_map_key,
input.coordinate_manager,
),
)
)
self.assertTrue(
gradcheck(
fn,
(
input.F,
input_glob.F,
broadcast_mul.operation_type,
input.coordinate_map_key,
input_glob.coordinate_map_key,
input.coordinate_manager,
),
)
)
def test_broadcast(self):
in_channels, D = 2, 2
coords, feats, labels = data_loader(in_channels)
coords, feats_glob, labels = data_loader(in_channels)
feats = feats.double()
feats_glob = feats_glob.double()
feats.requires_grad_()
feats_glob.requires_grad_()
input = SparseTensor(feats, coords)
pool = MinkowskiGlobalSumPooling()
input_glob = pool(input).detach()
input_glob.requires_grad_()
broadcast = MinkowskiBroadcast()
broadcast_cat = MinkowskiBroadcastConcatenation()
broadcast_add = MinkowskiBroadcastAddition()
broadcast_mul = MinkowskiBroadcastMultiplication()
output = broadcast(input, input_glob)
print(output)
output = broadcast_cat(input, input_glob)
print(output)
output = broadcast_add(input, input_glob)
print(output)
output = broadcast_mul(input, input_glob)
print(output)
# Check backward
fn = MinkowskiBroadcastFunction()
self.assertTrue(
gradcheck(
fn,
(
input.F,
input_glob.F,
broadcast_add.operation_type,
input.coordinate_map_key,
input_glob.coordinate_map_key,
input.coordinate_manager,
),
)
)
self.assertTrue(
gradcheck(
fn,
(
input.F,
input_glob.F,
broadcast_mul.operation_type,
input.coordinate_map_key,
input_glob.coordinate_map_key,
input.coordinate_manager,
),
)
)
if __name__ == "__main__":
unittest.main()
| MinkowskiEngine-master | tests/python/broadcast.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
from MinkowskiEngine import spmm, MinkowskiSPMMFunction, MinkowskiSPMMAverageFunction
from utils.gradcheck import gradcheck
class TestSPMM(unittest.TestCase):
def test_spmm(self):
rows = torch.Tensor([0, 0, 1, 1]).int()
cols = torch.Tensor([0, 1, 2, 3]).int()
vals = torch.ones(4).double()
size = [2, 4]
mat = torch.rand(4, 3).double()
mat.requires_grad_()
out = spmm(rows, cols, vals, size, mat, is_sorted=False)
print(out)
rows = rows.cuda()
cols = cols.cuda()
vals = vals.cuda()
mat = mat.cuda()
out = spmm(rows, cols, vals, size, mat, is_sorted=False)
print(out)
def test_spmm_sorted(self):
rows = torch.Tensor([0, 0, 1, 1]).int()
cols = torch.Tensor([0, 1, 2, 3]).int()
vals = torch.ones(4).double()
size = [2, 4]
mat = torch.rand(4, 3).double()
mat.requires_grad_()
out = spmm(rows, cols, vals, size, mat, is_sorted=True)
print(out)
rows = rows.cuda()
cols = cols.cuda()
vals = vals.cuda()
mat = mat.cuda()
out = spmm(rows, cols, vals, size, mat, is_sorted=True)
print(out)
def test(self):
rows = torch.Tensor([0, 0, 1, 1]).int()
cols = torch.Tensor([0, 1, 2, 3]).int()
vals = torch.ones(4).double()
size = [2, 4]
mat = torch.rand(4, 3).double()
mat.requires_grad_()
spmm_fn = MinkowskiSPMMFunction()
out = spmm_fn.apply(rows, cols, vals, size, mat)
print(out)
loss = out.sum()
loss.backward()
print(mat.grad)
self.assertTrue(gradcheck(spmm_fn, (rows, cols, vals, size, mat)))
rows = rows.cuda()
cols = cols.cuda()
vals = vals.cuda()
mat = mat.cuda()
mat.requires_grad_()
out = spmm_fn.apply(rows, cols, vals, size, mat)
print(out)
loss = out.sum()
loss.backward()
print(mat.grad)
self.assertTrue(gradcheck(spmm_fn, (rows, cols, vals, size, mat)))
def test_average(self):
rows = torch.Tensor([0, 0, 1, 1]).int()
cols = torch.Tensor([0, 1, 2, 3]).int()
size = [2, 4]
mat = torch.rand(4, 3).double()
mat.requires_grad_()
spmm_fn = MinkowskiSPMMAverageFunction()
out = spmm_fn.apply(rows, cols, size, mat)
print(out)
loss = out.sum()
loss.backward()
print(mat.grad)
self.assertTrue(gradcheck(spmm_fn, (rows, cols, size, mat)))
rows = rows.cuda()
cols = cols.cuda()
mat = mat.cuda()
mat.requires_grad_()
out = spmm_fn.apply(rows, cols, size, mat)
print(out)
loss = out.sum()
loss.backward()
print(mat.grad)
self.assertTrue(gradcheck(spmm_fn, (rows, cols, size, mat)))
def test_dtype(self):
rows = torch.Tensor([0, 0, 1, 1]).float()
cols = torch.Tensor([0, 1, 2, 3]).double()
vals = torch.ones(4).double()
size = [2, 4]
mat = torch.rand(4, 3).double()
mat.requires_grad_()
spmm_fn = MinkowskiSPMMFunction()
out = spmm_fn.apply(rows, cols, vals, size, mat)
print(out)
if not torch.cuda.is_available():
return
rows = torch.cuda.IntTensor([0, 0, 1, 1])
cols = torch.cuda.IntTensor([0, 1, 2, 3])
vals = torch.ones(4).double().to(0)
size = [2, 4]
mat = mat.to(0)
mat.requires_grad_()
out = spmm_fn.apply(rows, cols, vals, size, mat)
print(out)
| MinkowskiEngine-master | tests/python/spmm.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import unittest
import MinkowskiEngine as ME
from MinkowskiEngine import SparseTensor, MinkowskiUnion
class TestUnion(unittest.TestCase):
def test_union(self):
coords1 = torch.IntTensor([[0, 0], [0, 1]])
coords2 = torch.IntTensor([[0, 1], [1, 1]])
feats1 = torch.DoubleTensor([[1], [2]])
feats2 = torch.DoubleTensor([[3], [4]])
union = MinkowskiUnion()
input1 = SparseTensor(
coordinates=ME.utils.batched_coordinates([coords1]), features=feats1
)
input2 = SparseTensor(
coordinates=ME.utils.batched_coordinates([coords2]),
features=feats2,
coordinate_manager=input1.coordinate_manager, # Must use same coords manager
)
input1.requires_grad_()
input2.requires_grad_()
output = union(input1, input2)
print(output)
self.assertTrue(len(output) == 3)
self.assertTrue(5 in output.F)
output.F.sum().backward()
# Grad of sum feature is 1.
self.assertTrue(torch.prod(input1.F.grad) == 1)
self.assertTrue(torch.prod(input2.F.grad) == 1)
def test_union_gpu(self):
device = torch.device("cuda")
coords1 = torch.IntTensor([[0, 0], [0, 1]])
coords2 = torch.IntTensor([[0, 1], [1, 1]])
feats1 = torch.DoubleTensor([[1], [2]])
feats2 = torch.DoubleTensor([[3], [4]])
union = MinkowskiUnion()
input1 = SparseTensor(feats1, coords1, device=device, requires_grad=True)
input2 = SparseTensor(
feats2,
coords2,
device=device,
coordinate_manager=input1.coordinate_manager,
requires_grad=True,
)
output_gpu = union(input1, input2)
output_gpu.F.sum().backward()
print(output_gpu)
self.assertTrue(len(output_gpu) == 3)
self.assertTrue(1 in output_gpu.F)
self.assertTrue(5 in output_gpu.F)
self.assertTrue(4 in output_gpu.F)
if __name__ == "__main__":
unittest.main()
| MinkowskiEngine-master | tests/python/union.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C as _C
from utils import load_file, batched_coordinates
from gradcheck import gradcheck
class ConvolutionTestCase(unittest.TestCase):
def test(self):
IC, OC = 3, 5
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
in_features = torch.rand(len(coordinates), IC)
manager = _C.CoordinateMapManager()
in_key, unique_inverse_map = manager.insert_and_map(coordinates, [1, 1], "")
kernel_size = [3, 3]
kernel_stride = [2, 2]
kernel_dilation = [1, 1]
out_key = _C.CoordinateMapKey(3)
# size, in, out
kernel = torch.rand(9, IC, OC)
out_features = _C.ConvolutionForwardCPU(
in_features,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
print(in_features, out_features)
def test_backward(self):
IC, OC = 3, 5
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
in_features = torch.rand(len(coordinates), IC)
manager = _C.CoordinateMapManager()
in_key, unique_inverse_map = manager.insert_and_map(coordinates, [1, 1], "")
kernel_size = [3, 3]
kernel_stride = [2, 2]
kernel_dilation = [1, 1]
out_key = _C.CoordinateMapKey(3)
# size, in, out
kernel = torch.rand(9, IC, OC)
out_features = _C.ConvolutionForwardCPU(
in_features,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
out_feat_grad = torch.rand_like(out_features)
in_feat_grad, kernel_grad = _C.ConvolutionBackwardCPU(
in_features,
out_feat_grad,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
def test_pcd(self):
IC, OC = 3, 16
coords, colors, pcd = load_file("1.ply")
kernel_size = [3, 3, 3]
kernel_stride = [2, 2, 2]
kernel_dilation = [1, 1, 1]
# size, in, out
kernel = torch.rand(27, IC, OC)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.05, 0.035, 0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
tcolors = torch.from_numpy(colors).float()
bcolors = torch.cat([tcolors for i in range(batch_size)])
for i in range(10):
manager = _C.CoordinateMapManager()
# batch insert
in_key, (unique_map, inverse_map) = manager.insert_and_map(
bcoords, [1, 1, 1], ""
)
ucolors = bcolors[unique_map.long()]
out_key = in_key
stime = time.time()
out_features = _C.ConvolutionForwardCPU(
ucolors,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
min_time = min(time.time() - stime, min_time)
print(f"{batch_size}\t{voxel_size}\t{manager.size(in_key)}\t{min_time}")
def test_pcd2(self):
IC, OC = 3, 16
coords, colors, pcd = load_file("1.ply")
kernel_size = [3, 3, 3]
kernel_stride = [2, 2, 2]
kernel_dilation = [1, 1, 1]
for IC in [3, 8, 16, 32, 64, 128]:
for OC in [16, 32, 64, 128, 256]:
# size, in, out
kernel = torch.rand(np.prod(kernel_size), IC, OC)
for batch_size in [1]:
for voxel_size in [0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates(
[dcoords for i in range(batch_size)]
)
for i in range(10):
manager = _C.CoordinateMapManager()
# batch insert
in_key, (unique_map, inverse_map) = manager.insert_and_map(
bcoords, [1, 1, 1], ""
)
in_feats = torch.rand(manager.size(in_key), IC)
out_key = _C.CoordinateMapKey(4)
stime = time.time()
out_features = _C.ConvolutionForwardCPU(
in_feats,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
min_time = min(time.time() - stime, min_time)
print(
f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{IC}\t{OC}\t{min_time}"
)
| MinkowskiEngine-master | tests/cpp/convolution_cpu_test.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C
from utils import load_file, batched_coordinates
class KernelRegionTestCase(unittest.TestCase):
def test(self):
coordinates = torch.IntTensor(
[[0, 1, -1], [0, 1, 0], [0, 1, 1], [0, 2, -1], [0, 2, 0], [0, 2, 1]]
)
kernel_size = torch.IntTensor([3, 3])
(in_maps, out_maps), N, t = MinkowskiEngineTest._C.kernel_map_test(
coordinates, coordinates, kernel_size
)
def test2(self):
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
kernel_size = torch.IntTensor([3, 3])
regions = MinkowskiEngineTest._C.region_iterator_test(coordinates, kernel_size)
self.assertEqual(
len(regions), len(coordinates) * torch.prod(kernel_size).item()
)
self.assertEqual(regions[0], [0, 0, -2])
self.assertEqual(regions[1], [0, 1, -2])
self.assertEqual(regions[2], [0, 2, -2])
self.assertEqual(regions[3], [0, 0, -1])
self.assertEqual(regions[4], [0, 1, -1])
self.assertEqual(regions[5], [0, 2, -1])
self.assertEqual(regions[6], [0, 0, 0])
self.assertEqual(regions[7], [0, 1, 0])
self.assertEqual(regions[8], [0, 2, 0])
def test_even(self):
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
kernel_size = torch.IntTensor([3, 2])
regions = MinkowskiEngineTest._C.region_iterator_test(coordinates, kernel_size)
self.assertEqual(
len(regions), len(coordinates) * torch.prod(kernel_size).item()
)
self.assertEqual(regions[0], [0, 0, -1])
self.assertEqual(regions[1], [0, 1, -1])
self.assertEqual(regions[2], [0, 2, -1])
self.assertEqual(regions[3], [0, 0, 0])
self.assertEqual(regions[4], [0, 1, 0])
self.assertEqual(regions[5], [0, 2, 0])
def test_even3(self):
coordinates = torch.IntTensor([[0, 1, -1, 3], [0, 2, 1, -2]])
kernel_size = torch.IntTensor([3, 2, 2])
regions = MinkowskiEngineTest._C.region_iterator_test(coordinates, kernel_size)
self.assertEqual(
len(regions), len(coordinates) * torch.prod(kernel_size).item()
)
self.assertEqual(regions[0], [0, 0, -1, 3])
self.assertEqual(regions[1], [0, 1, -1, 3])
self.assertEqual(regions[2], [0, 2, -1, 3])
self.assertEqual(regions[3], [0, 0, 0, 3])
self.assertEqual(regions[4], [0, 1, 0, 3])
self.assertEqual(regions[5], [0, 2, 0, 3])
self.assertEqual(regions[6], [0, 0, -1, 4])
self.assertEqual(regions[7], [0, 1, -1, 4])
self.assertEqual(regions[8], [0, 2, -1, 4])
self.assertEqual(regions[9], [0, 0, 0, 4])
self.assertEqual(regions[10], [0, 1, 0, 4])
self.assertEqual(regions[11], [0, 2, 0, 4])
def test_kernel_map1(self):
in_coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
out_coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1], [1, 2, 1]])
kernel_size = torch.IntTensor([1, 1])
(in_maps, out_maps), num, t = MinkowskiEngineTest._C.kernel_map_test(
in_coordinates, out_coordinates, kernel_size
)
self.assertEqual(in_maps[0], [0, 1])
self.assertEqual(out_maps[0], [0, 1])
def test_kernel_map(self):
in_coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]])
out_coordinates = torch.IntTensor([[0, 1, 0], [0, 1, 2], [1, 2, 1]])
kernel_size = torch.IntTensor([3, 3])
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
in_coordinates, out_coordinates, kernel_size
)
in_maps = kernel_map[0]
out_maps = kernel_map[1]
self.assertEqual(len(in_maps), torch.prod(kernel_size).item())
self.assertEqual(in_maps[1], [0])
self.assertEqual(out_maps[1], [0])
self.assertEqual(in_maps[2], [1])
self.assertEqual(out_maps[2], [1])
def test_pcd(self):
coords, colors, pcd = load_file("1.ply")
kernel_size = torch.IntTensor([3, 3, 3])
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.05, 0.035, 0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
for i in range(10):
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
bcoords, bcoords, kernel_size
)
min_time = min(t, min_time)
num_kernels = np.sum([len(a) for a in kernel_map[0]])
print(f"{batch_size}\t{voxel_size}\t{num}\t{num_kernels}\t{min_time}")
| MinkowskiEngine-master | tests/cpp/kernel_region_cpu_test.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C as _C
from utils import load_file, batched_coordinates
class ConvolutionTestCase(unittest.TestCase):
def test_stride(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
key, manager, map_inverse_map = _C.coordinate_map_manager_test(coordinates, "")
unique_map, inverse_map = map_inverse_map
stride = [2]
key = _C.coordinate_map_manager_stride(manager, key, stride)
print(key)
def test_pcd(self):
coords, colors, pcd = load_file("1.ply")
kernel_size = torch.IntTensor([3, 3, 3])
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.05, 0.035, 0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
for i in range(10):
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
bcoords, bcoords, kernel_size
)
min_time = min(t, min_time)
num_kernels = np.sum([len(a) for a in kernel_map[0]])
print(f"{batch_size}\t{voxel_size}\t{num}\t{num_kernels}\t{min_time}")
| MinkowskiEngine-master | tests/cpp/convolution_cpu.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C
from utils import load_file, batched_coordinates
class CoordinateMapTestCase(unittest.TestCase):
def test_batch_insert(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
num, _ = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(coordinates)
self.assertEqual(num, 3)
def test_inverse_map(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
(
mapping_inverse_mapping,
time,
) = MinkowskiEngineTest._C.coordinate_map_inverse_test(coordinates)
mapping, inverse_mapping = mapping_inverse_mapping
self.assertTrue(
torch.all(coordinates == coordinates[mapping][inverse_mapping])
)
def test_pcd_insert(self):
coords, colors, pcd = load_file("1.ply")
BATCH_SIZE = 1
voxel_size = 0.02
bcoords = [np.floor(coords / voxel_size) for i in range(BATCH_SIZE)]
bcoords = batched_coordinates(bcoords)
num, t = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(bcoords)
self.assertEqual(num, 161890)
for batch_size in [1, 2, 4, 8, 16, 20, 40, 80, 160, 320]:
for voxel_size in [0.02]:
min_time = 1000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
for i in range(10):
s = time.time()
num, t = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(
bcoords
)
min_time = min(time.time() - s, min_time)
print(f"{len(bcoords)}\t{num}\t{min_time}\t{t}")
def test_batch_find(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
queries = torch.IntTensor([[-1, 1], [1, 2], [2, 3], [2, 3], [0, 0]])
(
valid_query_index,
query_value,
) = MinkowskiEngineTest._C.coordinate_map_batch_find_test(coordinates, queries)
self.assertEqual(len(valid_query_index), len(query_value))
self.assertEqual(len(valid_query_index), 3)
self.assertEqual(valid_query_index[0], 1)
self.assertEqual(valid_query_index[1], 2)
self.assertEqual(valid_query_index[2], 3)
self.assertEqual(query_value[0], 1)
self.assertEqual(query_value[1], 2)
self.assertEqual(query_value[2], 2)
def test_stride(self):
coordinates = torch.IntTensor([[0, 1], [0, 2], [0, 3], [0, 3]])
stride = [1]
with self.assertRaises(TypeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([-1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([1, 1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([2])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [2])
coordinates = torch.IntTensor(
[[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]
)
stride = torch.IntTensor([1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
coordinates = torch.IntTensor(
[[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]
)
stride = torch.IntTensor([1, 1])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 5)
self.assertEqual(tensor_stride, [1, 1])
stride = torch.IntTensor([2, 1])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 5)
self.assertEqual(tensor_stride, [2, 1])
stride = torch.IntTensor([4, 4])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [4, 4])
coordinates = torch.IntTensor([[0, -1], [0, -2], [0, 1], [0, 0]])
stride = torch.IntTensor([2])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [2])
| MinkowskiEngine-master | tests/cpp/coordinate_map_cpu_test.py |
import unittest
import torch
import MinkowskiEngineTest._C as _C
class CoordinateMapManagerTestCase(unittest.TestCase):
def test(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
key, manager, map_inverse_map = _C.coordinate_map_manager_test(coordinates, "")
unique_map, inverse_map = map_inverse_map
self.assertTrue(
torch.all(coordinates[unique_map.long()][inverse_map.long()] == coordinates)
)
def test_stride(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
key, manager, map_inverse_map = _C.coordinate_map_manager_test(coordinates, "")
unique_map, inverse_map = map_inverse_map
stride = [2]
key = _C.coordinate_map_manager_stride(manager, key, stride)
print(key)
def test_kernel_map(self):
coordinates = torch.IntTensor([[0, 1], [0, 2], [1, 2], [1, 3]])
manager = _C.CoordinateMapManager()
key, (unique_map, inverse_map) = manager.insert_and_map(coordinates, [1], "1")
key2, (unique_map2, inverse_map2) = manager.insert_and_map(
coordinates, [1], "2"
)
print(key, key2)
self.assertTrue(
torch.all(coordinates[unique_map.long()][inverse_map.long()] == coordinates)
)
in_maps, out_maps = _C.coordinate_map_manager_kernel_map(
manager, key, key2, [3]
)
print(in_maps)
print(out_maps)
| MinkowskiEngine-master | tests/cpp/coordinate_map_manager_cpu_test.py |
import unittest
import torch
import MinkowskiEngineTest._C
class CoordinateMapKeyTestCase(unittest.TestCase):
def test(self):
MinkowskiEngineTest._C.coordinate_map_key_test()
key = MinkowskiEngineTest._C.CoordinateMapKey([3, 4, 5], "")
print(key.__repr__())
self.assertEqual([3, 4, 5], key.get_tensor_stride())
self.assertEqual(4, key.get_coordinate_size())
self.assertEqual(([3, 4, 5], ''), key.get_key())
def test(self):
MinkowskiEngineTest._C.coordinate_map_key_test()
key = MinkowskiEngineTest._C.CoordinateMapKey(3)
print(key.__repr__())
MinkowskiEngineTest._C.coordinate_map_key_update(key, [2, 3], "test")
print(key.__repr__())
self.assertEqual(([2, 3], "test"), key.get_key())
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_key_update(key, [2, 3, 4], "")
| MinkowskiEngine-master | tests/cpp/coordinate_map_key_test.py |
MinkowskiEngine-master | tests/cpp/__init__.py |
|
import unittest
import torch
import MinkowskiEngineTest._C
class TypeTestCase(unittest.TestCase):
def test(self):
MinkowskiEngineTest._C.type_test()
self.assertErtTrue(True)
| MinkowskiEngine-master | tests/cpp/type_test.py |
import unittest
import torch
import MinkowskiEngineTest._C
class CoordinateTestCase(unittest.TestCase):
def test_check(self):
coordinates = torch.FloatTensor([2, 3])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_test(coordinates)
coordinates = torch.IntTensor([2, 3])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_test(coordinates)
def test(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]])
self.assertEqual(MinkowskiEngineTest._C.coordinate_test(coordinates), 3)
| MinkowskiEngine-master | tests/cpp/coordinate_test.py |
import sys
from sys import argv, platform
import torch.cuda
import os
import subprocess
from setuptools import setup
import unittest
from pathlib import Path
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME
def run_command(*args):
subprocess.check_call(args)
def _argparse(pattern, argv, is_flag=True):
if is_flag:
found = pattern in argv
if found:
argv.remove(pattern)
return found, argv
else:
arr = [arg for arg in argv if pattern in arg]
if len(arr) == 0: # not found
return False, argv
else:
assert "=" in arr[0], f"{arr[0]} requires a value."
argv.remove(arr[0])
return arr[0].split("=")[1], argv
SOURCE_SETS = {
"convolution_cpu": [
CppExtension,
["convolution_test.cpp"],
["math_functions.cpp", "coordinate_map_manager.cpp", "convolution_cpu.cpp"],
["-DCPU_ONLY"],
],
"convolution_gpu": [
CUDAExtension,
["convolution_test.cu"],
[
"math_functions.cpp",
"coordinate_map_manager.cu",
"convolution_gpu.cu",
"coordinate_map_gpu.cu",
"convolution_kernel.cu",
],
[],
],
"coordinate_map_manager_cpu": [
CppExtension,
["coordinate_map_manager_cpu_test.cpp"],
["coordinate_map_manager.cpp"],
["-DCPU_ONLY"],
],
"coordinate_map_manager_gpu": [
CUDAExtension,
["coordinate_map_manager_gpu_test.cu"],
["coordinate_map_manager.cu", "coordinate_map_gpu.cu"],
[],
],
"coordinate_map_key": [CppExtension, ["coordinate_map_key_test.cpp"], [], [],],
"coordinate_map_cpu": [CppExtension, ["coordinate_map_cpu_test.cpp"], [], [],],
"coordinate_map_gpu": [
CUDAExtension,
["coordinate_map_gpu_test.cu"],
["coordinate_map_gpu.cu"],
[],
],
"coordinate": [CppExtension, ["coordinate_test.cpp"], [], []],
"kernel_region_cpu": [CppExtension, ["kernel_region_cpu_test.cpp"], [], []],
"kernel_region_gpu": [
CUDAExtension,
["kernel_region_gpu_test.cu"],
["coordinate_map_gpu.cu"],
[],
],
"type": [CppExtension, ["type_test.cpp"], [], []],
}
test_target, argv = _argparse("--test", argv, False)
no_debug, argv = _argparse("--nodebug", argv)
USE_NINJA = os.getenv("USE_NINJA") == "0"
HERE = Path(os.path.dirname(__file__)).absolute()
SRC_PATH = HERE.parent.parent / "src"
CXX = os.environ["CXX"]
assert test_target in SOURCE_SETS.keys()
if sys.platform == "win32":
vc_version = os.getenv("VCToolsVersion", "")
if vc_version.startswith("14.16."):
CXX_FLAGS = ["/sdl"]
else:
CXX_FLAGS = ["/sdl", "/permissive-"]
else:
CXX_FLAGS = ["-fopenmp"]
NVCC_FLAGS = [f"-ccbin={CXX}", "--extended-lambda"]
if not no_debug:
CXX_FLAGS += ["-g", "-DDEBUG"]
NVCC_FLAGS += ["-g", "-DDEBUG"]
else:
CXX_FLAGS += ["-O3"]
NVCC_FLAGS += ["-O3"]
Extension = SOURCE_SETS[test_target][0]
CURR_TEST_FILES = SOURCE_SETS[test_target][1:3]
ARGS = SOURCE_SETS[test_target][3]
CXX_FLAGS += ARGS
NVCC_FLAGS += ARGS
ext_modules = [
Extension(
name="MinkowskiEngineTest._C",
# ["type_test.cpp", "],
sources=[
*[str(HERE / test_file) for test_file in CURR_TEST_FILES[0]],
*[str(SRC_PATH / src_file) for src_file in CURR_TEST_FILES[1]],
],
extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS,},
libraries=["openblas"],
),
]
# if torch.cuda.is_available() and CUDA_HOME is not None:
# extension = CUDAExtension(
# 'torch_test_cpp_extension.cuda', [
# 'cuda_extension.cpp',
# 'cuda_extension_kernel.cu',
# 'cuda_extension_kernel2.cu',
# ],
# extra_compile_args={'cxx': CXX_FLAGS,
# 'nvcc': ['-O2']})
# ext_modules.append(extension)
setup(
name="MinkowskiEngineTest",
packages=[],
ext_modules=ext_modules,
include_dirs=[
str(SRC_PATH),
str(SRC_PATH / "3rdparty"),
os.path.join(CUDA_HOME, "include"),
],
test_suite="setup.suite",
cmdclass={"build_ext": BuildExtension},
)
| MinkowskiEngine-master | tests/cpp/setup.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import numpy as np
import collections
from urllib.request import urlretrieve
import torch
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
if not os.path.isfile("1.ply"):
urlretrieve("https://bit.ly/3c2iLhg", "1.ply")
def load_file(file_name):
pcd = o3d.io.read_point_cloud(file_name)
coords = np.array(pcd.points)
colors = np.array(pcd.colors)
return coords, colors, pcd
def batched_coordinates(coords, dtype=torch.int32, device=None):
r"""Create a `ME.SparseTensor` coordinates from a sequence of coordinates
Given a list of either numpy or pytorch tensor coordinates, return the
batched coordinates suitable for `ME.SparseTensor`.
Args:
:attr:`coords` (a sequence of `torch.Tensor` or `numpy.ndarray`): a
list of coordinates.
:attr:`dtype`: torch data type of the return tensor. torch.int32 by default.
Returns:
:attr:`batched_coordindates` (`torch.Tensor`): a batched coordinates.
.. warning::
From v0.4, the batch index will be prepended before all coordinates.
"""
assert isinstance(
coords, collections.abc.Sequence
), "The coordinates must be a sequence."
assert np.array(
[cs.ndim == 2 for cs in coords]
).all(), "All coordinates must be in a 2D array."
D = np.unique(np.array([cs.shape[1] for cs in coords]))
assert len(D) == 1, f"Dimension of the array mismatch. All dimensions: {D}"
D = D[0]
if device is None:
if isinstance(coords, torch.Tensor):
device = coords[0].device
else:
device = "cpu"
assert dtype in [
torch.int32,
torch.float32,
], "Only torch.int32, torch.float32 supported for coordinates."
# Create a batched coordinates
N = np.array([len(cs) for cs in coords]).sum()
bcoords = torch.zeros((N, D + 1), dtype=dtype, device=device) # uninitialized
s = 0
for b, cs in enumerate(coords):
if dtype == torch.int32:
if isinstance(cs, np.ndarray):
cs = torch.from_numpy(np.floor(cs))
elif not (
isinstance(cs, torch.IntTensor) or isinstance(cs, torch.LongTensor)
):
cs = cs.floor()
cs = cs.int()
else:
if isinstance(cs, np.ndarray):
cs = torch.from_numpy(cs)
cn = len(cs)
# BATCH_FIRST:
bcoords[s : s + cn, 1:] = cs
bcoords[s : s + cn, 0] = b
s += cn
return bcoords | MinkowskiEngine-master | tests/cpp/utils.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C
from utils import load_file, batched_coordinates
class KernelRegionTestCase(unittest.TestCase):
def test(self):
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]]).cuda()
kernel_size = torch.IntTensor([3, 3])
regions = MinkowskiEngineTest._C.region_iterator_test(coordinates, kernel_size)
regions = regions.cpu().tolist()
self.assertEqual(
len(regions), len(coordinates) * torch.prod(kernel_size).item()
)
self.assertEqual(regions[0], [0, 0, -2])
self.assertEqual(regions[1], [0, 1, -2])
self.assertEqual(regions[2], [0, 2, -2])
self.assertEqual(regions[3], [0, 0, -1])
self.assertEqual(regions[4], [0, 1, -1])
self.assertEqual(regions[5], [0, 2, -1])
self.assertEqual(regions[6], [0, 0, 0])
self.assertEqual(regions[7], [0, 1, 0])
self.assertEqual(regions[8], [0, 2, 0])
def test_even3(self):
coordinates = torch.IntTensor([[0, 1, -1, 3], [0, 2, 1, -2]]).cuda()
kernel_size = torch.IntTensor([3, 2, 2])
regions = MinkowskiEngineTest._C.region_iterator_test(coordinates, kernel_size)
regions = regions.cpu().tolist()
self.assertEqual(
len(regions), len(coordinates) * torch.prod(kernel_size).item()
)
self.assertEqual(regions[0], [0, 0, -1, 3])
self.assertEqual(regions[1], [0, 1, -1, 3])
self.assertEqual(regions[2], [0, 2, -1, 3])
self.assertEqual(regions[3], [0, 0, 0, 3])
self.assertEqual(regions[4], [0, 1, 0, 3])
self.assertEqual(regions[5], [0, 2, 0, 3])
self.assertEqual(regions[6], [0, 0, -1, 4])
self.assertEqual(regions[7], [0, 1, -1, 4])
self.assertEqual(regions[8], [0, 2, -1, 4])
self.assertEqual(regions[9], [0, 0, 0, 4])
self.assertEqual(regions[10], [0, 1, 0, 4])
self.assertEqual(regions[11], [0, 2, 0, 4])
def test_kernel_map(self):
in_coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]]).cuda()
out_coordinates = torch.IntTensor([[0, 1, 0], [0, 1, 2], [1, 2, 1]]).cuda()
kernel_size = torch.IntTensor([3, 3])
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
in_coordinates, out_coordinates, kernel_size, 50, 16,
)
in_maps = kernel_map[0]
out_maps = kernel_map[1]
self.assertEqual(len(in_maps), torch.prod(kernel_size).item())
print(in_maps)
print(out_maps)
def test_kernel_map2(self):
in_coordinates = torch.IntTensor([[0, 1], [0, 2], [0, 3], [0, 4]]).cuda()
out_coordinates = torch.IntTensor([[0, 1], [0, 2], [0, 3], [0, 4]]).cuda()
kernel_size = torch.IntTensor([3])
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
in_coordinates, out_coordinates, kernel_size, 50, 16
)
in_maps = kernel_map[0]
out_maps = kernel_map[1]
self.assertEqual(len(in_maps), torch.prod(kernel_size).item())
print(in_maps)
print(out_maps)
self.assertEqual(len(in_maps), torch.prod(kernel_size).item())
def test_pcd(self):
coords, colors, pcd = load_file("1.ply")
kernel_size = torch.IntTensor([3, 3, 3])
dcoords = torch.from_numpy(np.floor(coords / 0.02)).int()
bcoords = batched_coordinates([dcoords]).to(0)
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
bcoords, bcoords, kernel_size, 50, 128,
)
num_kernels = np.sum([len(a) for a in kernel_map[0]])
print(f"{num}\t{num_kernels}\t{t}")
def test_pcd2(self):
coords, colors, pcd = load_file("1.ply")
kernel_size = torch.IntTensor([3, 3, 3])
for occupancy in [50]:
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.05, 0.035, 0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates(
[dcoords for i in range(batch_size)]
).to(0)
for i in range(10):
kernel_map, num, t = MinkowskiEngineTest._C.kernel_map_test(
bcoords, bcoords, kernel_size, occupancy, 128,
)
min_time = min(t, min_time)
num_kernels = np.sum([len(a) for a in kernel_map[0]])
print(
f"{occupancy}\t{batch_size}\t{voxel_size}\t{num}\t{num_kernels}\t{min_time}"
)
| MinkowskiEngine-master | tests/cpp/kernel_region_gpu_test.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C as _C
from utils import load_file, batched_coordinates
from gradcheck import gradcheck
class ConvolutionTestCase(unittest.TestCase):
def test(self):
D, IC, OC = 2, 3, 5
coordinates = torch.IntTensor([[0, 1], [0, 2]]).to(0)
in_features = torch.rand(len(coordinates), IC).to(0)
manager = _C.CoordinateMapManager()
in_key, unique_inverse_map = manager.insert_and_map(coordinates, [1], "")
kernel_size = [3]
kernel_stride = [2]
kernel_dilation = [1]
out_key = _C.CoordinateMapKey(D)
# size, in, out
kernel = torch.rand(3, IC, OC).to(0)
out_features = _C.ConvolutionForwardGPU(
in_features,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor().to(0),
in_key,
out_key,
manager,
)
print(in_features, out_features)
def test_backward(self):
IC, OC = 3, 5
coordinates = torch.IntTensor([[0, 1, -1], [0, 2, 1]]).to(0)
in_features = torch.rand(len(coordinates), IC).to(0)
manager = _C.CoordinateMapManager()
in_key, unique_inverse_map = manager.insert_and_map(coordinates, [1, 1], "")
kernel_size = [3, 3]
kernel_stride = [2, 2]
kernel_dilation = [1, 1]
out_key = _C.CoordinateMapKey(3)
# size, in, out
kernel = torch.rand(9, IC, OC).to(0)
out_features = _C.ConvolutionForwardGPU(
in_features,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor().to(0),
in_key,
out_key,
manager,
)
out_feat_grad = torch.rand_like(out_features)
in_feat_grad, kernel_grad = _C.ConvolutionBackwardGPU(
in_features,
out_feat_grad,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor().to(0),
in_key,
out_key,
manager,
)
print(in_feat_grad, kernel_grad)
def test_pcd(self):
IC, OC = 3, 16
coords, colors, pcd = load_file("1.ply")
kernel_size = [3, 3, 3]
kernel_stride = [2, 2, 2]
kernel_dilation = [1, 1, 1]
# size, in, out
kernel = torch.rand(np.prod(kernel_size), IC, OC).to(0)
for batch_size in [1, 5, 10, 20, 40]:
for voxel_size in [0.05, 0.035, 0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
for i in range(10):
manager = _C.CoordinateMapManager()
# batch insert
in_key, (unique_map, inverse_map) = manager.insert_and_map(
bcoords.to(0), [1, 1, 1], ""
)
in_feats = torch.rand(manager.size(in_key), IC).to(0)
out_key = _C.CoordinateMapKey(4)
stime = time.time()
out_features = _C.ConvolutionForwardGPU(
in_feats,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
min_time = min(time.time() - stime, min_time)
print(
f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{min_time}"
)
def test_pcd2(self):
IC, OC = 128, 128
coords, colors, pcd = load_file("1.ply")
kernel_size = [3, 3, 3]
kernel_stride = [2, 2, 2]
kernel_dilation = [1, 1, 1]
for IC in [3, 8, 16, 32, 64, 128]:
for OC in [16, 32, 64, 128, 256]:
# size, in, out
kernel = torch.rand(np.prod(kernel_size), IC, OC).to(0)
for batch_size in [1]:
for voxel_size in [0.02]:
min_time = 100000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates(
[dcoords for i in range(batch_size)]
)
for i in range(10):
manager = _C.CoordinateMapManager()
# batch insert
in_key, (unique_map, inverse_map) = manager.insert_and_map(
bcoords.to(0), [1, 1, 1], ""
)
in_feats = torch.rand(manager.size(in_key), IC).to(0)
out_key = _C.CoordinateMapKey(4)
stime = time.time()
out_features = _C.ConvolutionForwardGPU(
in_feats,
kernel,
kernel_size,
kernel_stride,
kernel_dilation,
_C.RegionType.HYPER_CUBE,
torch.IntTensor(),
in_key,
out_key,
manager,
)
min_time = min(time.time() - stime, min_time)
print(
f"{batch_size}\t{manager.size(in_key)}\t{manager.size(out_key)}\t{IC}\t{OC}\t{min_time}"
)
| MinkowskiEngine-master | tests/cpp/convolution_gpu_test.py |
import numpy as np
import unittest
import time
import torch
import MinkowskiEngineTest._C
from utils import load_file, batched_coordinates
class CoordinateMapTestCase(unittest.TestCase):
def test_batch_insert(self):
assert torch.cuda.is_available()
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)
num, _ = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(coordinates)
self.assertEqual(num, 3)
def test_mapping(self):
assert torch.cuda.is_available()
coordinates = torch.IntTensor(
[[0, 1], [1, 2], [2, 3], [2, 3], [3, 2], [3, 2]]
).to(0)
(
mapping,
inverse_mapping,
) = MinkowskiEngineTest._C.coordinate_map_inverse_map_test(coordinates)
print(mapping)
print(inverse_mapping)
self.assertEqual(len(mapping), 4)
self.assertTrue(
torch.all(
coordinates[mapping.long()][inverse_mapping.long()] == coordinates
)
)
def test_pcd_insert(self):
coords, colors, pcd = load_file("1.ply")
BATCH_SIZE = 1
voxel_size = 0.02
bcoords = [np.floor(coords / voxel_size) for i in range(BATCH_SIZE)]
bcoords = batched_coordinates(bcoords).to(0)
num, _ = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(bcoords)
self.assertEqual(num, 161890)
for batch_size in [1, 2, 4, 8, 16, 20, 40, 80, 160, 320]:
for voxel_size in [0.02]:
py_min_time = 1000
dcoords = torch.from_numpy(np.floor(coords / voxel_size)).int()
bcoords = batched_coordinates([dcoords for i in range(batch_size)])
for i in range(10):
s = time.time()
bcoords = bcoords.to(0)
(
num,
cpp_time,
) = MinkowskiEngineTest._C.coordinate_map_batch_insert_test(bcoords)
py_min_time = min(time.time() - s, py_min_time)
print(f"{len(bcoords)}\t{num}\t{py_min_time}\t{cpp_time}")
def test_batch_find(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)
queries = torch.IntTensor([[-1, 1], [1, 2], [2, 3], [2, 3], [0, 0]]).to(0)
(
valid_query_index,
query_value,
) = MinkowskiEngineTest._C.coordinate_map_batch_find_test(coordinates, queries)
self.assertEqual(len(valid_query_index), len(query_value))
self.assertEqual(len(valid_query_index), 3)
self.assertEqual(valid_query_index[0], 1)
self.assertEqual(valid_query_index[1], 2)
self.assertEqual(valid_query_index[2], 3)
self.assertEqual(query_value[0], 1)
self.assertEqual(query_value[1], 2)
self.assertEqual(query_value[2], 2)
def test_stride(self):
coordinates = torch.IntTensor([[0, 1], [0, 2], [0, 3], [0, 3]]).to(0)
stride = [1]
with self.assertRaises(TypeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([-1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([1, 1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
stride = torch.IntTensor([2])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [2])
coordinates = torch.IntTensor(
[[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]
)
stride = torch.IntTensor([1])
with self.assertRaises(RuntimeError):
MinkowskiEngineTest._C.coordinate_map_stride_test(coordinates, stride)
coordinates = torch.IntTensor(
[[0, 1, 1], [0, 2, 1], [0, 1, 0], [1, 0, 3], [1, 0, 2]]
).to(0)
stride = torch.IntTensor([1, 1])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 5)
self.assertEqual(tensor_stride, [1, 1])
stride = torch.IntTensor([2, 1])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 5)
self.assertEqual(tensor_stride, [2, 1])
stride = torch.IntTensor([4, 4])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [4, 4])
coordinates = torch.IntTensor([[0, -1], [0, -2], [0, 1], [0, 0]]).to(0)
stride = torch.IntTensor([2])
map_size, tensor_stride = MinkowskiEngineTest._C.coordinate_map_stride_test(
coordinates, stride
)
self.assertEqual(map_size, 2)
self.assertEqual(tensor_stride, [2])
| MinkowskiEngine-master | tests/cpp/coordinate_map_gpu_test.py |
import unittest
import torch
import MinkowskiEngineTest._C as _C
class CoordinateMapManagerTestCase(unittest.TestCase):
def test(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)
key, manager, map_inverse_map = _C.coordinate_map_manager_test(coordinates, "")
unique_map, inverse_map = map_inverse_map
self.assertTrue(
torch.all(coordinates[unique_map.long()][inverse_map.long()] == coordinates)
)
def test_stride(self):
coordinates = torch.IntTensor([[0, 1], [1, 2], [2, 3], [2, 3]]).to(0)
key, manager, map_inverse_map = _C.coordinate_map_manager_test(coordinates, "")
unique_map, inverse_map = map_inverse_map
stride = [2]
key = _C.coordinate_map_manager_stride(manager, key, stride)
print(key)
def test_collision(self):
coordinates = torch.IntTensor([[0, 1], [0, 1], [1, 2], [1, 2]]).to(0)
manager = _C.CoordinateMapManager()
key, (unique_map, inverse_map) = manager.insert_and_map(coordinates, [1], "1")
key2, (unique_map2, inverse_map2) = manager.insert_and_map(
coordinates, [1], "2"
)
print(unique_map, inverse_map)
in_maps, out_maps = _C.coordinate_map_manager_kernel_map(
manager, key, key2, [3]
)
print(in_maps)
print(out_maps)
def test_kernel_map(self):
coordinates = torch.IntTensor([[0, 1], [0, 2], [1, 2], [1, 3]]).to(0)
manager = _C.CoordinateMapManager()
key, (unique_map, inverse_map) = manager.insert_and_map(coordinates, [1], "1")
key2, (unique_map2, inverse_map2) = manager.insert_and_map(
coordinates, [1], "2"
)
print(key, key2)
self.assertTrue(
torch.all(coordinates[unique_map.long()][inverse_map.long()] == coordinates)
)
in_maps, out_maps = _C.coordinate_map_manager_kernel_map(
manager, key, key2, [3]
)
print(in_maps)
print(out_maps)
| MinkowskiEngine-master | tests/cpp/coordinate_map_manager_gpu_test.py |
import os
import sys
# import pkg_resources
# pkg_resources.require('MinkowskiEngine==0.4.2a1')
import MinkowskiEngine as ME
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../../'))
# autodoc_mock_imports = ['MinkowskiEngine.examples']
# -- Project information -----------------------------------------------------
project = 'MinkowskiEngine'
copyright = '2020, Chris Choy'
author = 'Chris Choy'
# The short X.Y version
version = ME.__version__
# The full version, including alpha/beta/rc tags
release = ME.__version__
github_doc_root = 'https://github.com/NVIDIA/MinkowskiEngine'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_markdown_tables',
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'recommonmark',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'README.md', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {
# 'github_user': 'NVIDIA',
# 'github_repo': 'MinkowskiEngine',
# 'github_banner': True
# }
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'MinkowskiEngineDoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MinkowskiEngine.tex', 'MinkowskiEngine Documentation',
'Chris Choy', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'minkowskiengine', 'MinkowskiEngine Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MinkowskiEngine', 'MinkowskiEngine Documentation',
author, 'MinkowskiEngine', 'The generalized sparse convolution library.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| MinkowskiEngine-master | docs/conf.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import argparse
import numpy as np
from urllib.request import urlretrieve
try:
import open3d as o3d
except ImportError:
raise ImportError('Please install open3d with `pip install open3d`.')
import torch
import MinkowskiEngine as ME
from examples.minkunet import MinkUNet34C
# Check if the weights and file exist and download
if not os.path.isfile('weights.pth'):
print('Downloading weights...')
urlretrieve("https://bit.ly/2O4dZrz", "weights.pth")
if not os.path.isfile("1.ply"):
print('Downloading an example pointcloud...')
urlretrieve("https://bit.ly/3c2iLhg", "1.ply")
parser = argparse.ArgumentParser()
parser.add_argument('--file_name', type=str, default='1.ply')
parser.add_argument('--weights', type=str, default='weights.pth')
parser.add_argument('--use_cpu', action='store_true')
CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table',
'door', 'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink',
'bathtub', 'otherfurniture')
VALID_CLASS_IDS = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39
]
SCANNET_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
def load_file(file_name):
pcd = o3d.io.read_point_cloud(file_name)
coords = np.array(pcd.points)
colors = np.array(pcd.colors)
return coords, colors, pcd
def normalize_color(color: torch.Tensor, is_color_in_range_0_255: bool = False) -> torch.Tensor:
r"""
Convert color in range [0, 1] to [-0.5, 0.5]. If the color is in range [0,
255], use the argument `is_color_in_range_0_255=True`.
`color` (torch.Tensor): Nx3 color feature matrix
`is_color_in_range_0_255` (bool): If the color is in range [0, 255] not [0, 1], normalize the color to [0, 1].
"""
if is_color_in_range_0_255:
color /= 255
color -= 0.5
return color.float()
if __name__ == '__main__':
config = parser.parse_args()
device = torch.device('cuda' if (
torch.cuda.is_available() and not config.use_cpu) else 'cpu')
print(f"Using {device}")
# Define a model and load the weights
model = MinkUNet34C(3, 20).to(device)
model_dict = torch.load(config.weights)
model.load_state_dict(model_dict)
model.eval()
coords, colors, pcd = load_file(config.file_name)
# Measure time
with torch.no_grad():
voxel_size = 0.02
# Feed-forward pass and get the prediction
in_field = ME.TensorField(
features=normalize_color(torch.from_numpy(colors)),
coordinates=ME.utils.batched_coordinates([coords / voxel_size], dtype=torch.float32),
quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
minkowski_algorithm=ME.MinkowskiAlgorithm.SPEED_OPTIMIZED,
device=device,
)
# Convert to a sparse tensor
sinput = in_field.sparse()
# Output sparse tensor
soutput = model(sinput)
# get the prediction on the input tensor field
out_field = soutput.slice(in_field)
logits = out_field.F
_, pred = logits.max(1)
pred = pred.cpu().numpy()
# Create a point cloud file
pred_pcd = o3d.geometry.PointCloud()
# Map color
colors = np.array([SCANNET_COLOR_MAP[VALID_CLASS_IDS[l]] for l in pred])
pred_pcd.points = o3d.utility.Vector3dVector(coords)
pred_pcd.colors = o3d.utility.Vector3dVector(colors / 255)
pred_pcd.estimate_normals()
# Move the original point cloud
pcd.points = o3d.utility.Vector3dVector(
np.array(pcd.points) + np.array([0, 5, 0]))
# Visualize the input point cloud and the prediction
o3d.visualization.draw_geometries([pcd, pred_pcd])
| MinkowskiEngine-master | examples/indoor.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.resnet import ResNetBase
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels, out_channels, D=3):
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat(out, out_b3p8)
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat(out, out_b2p4)
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat(out, out_b1p2)
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
if __name__ == '__main__':
from tests.python.common import data_loader
# loss and network
criterion = nn.CrossEntropyLoss()
net = MinkUNet14A(in_channels=3, out_channels=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader(is_classification=False)
input = ME.SparseTensor(feat, coordinates=coords, device=device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| MinkowskiEngine-master | examples/minkunet.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import MinkowskiEngine as ME
from tests.python.common import data_loader
def get_random_coords(dimension=2, tensor_stride=2):
torch.manual_seed(0)
# Create random coordinates with tensor stride == 2
coords = torch.rand(10, dimension + 1)
coords[:, :dimension] *= 5 # random coords
coords[:, -1] *= 2 # random batch index
coords = coords.floor().int()
coords = ME.utils.sparse_quantize(coords)
coords[:, :dimension] *= tensor_stride # make the tensor stride 2
return coords, tensor_stride
def print_sparse_tensor(tensor):
for c, f in zip(tensor.C.numpy(), tensor.F.detach().numpy()):
print(f"Coordinate {c} : Feature {f}")
def conv():
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
# Convolution
input = ME.SparseTensor(features=feats, coordinates=coords)
conv = ME.MinkowskiConvolution(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=False,
dimension=D)
output = conv(input)
print('Input:')
print_sparse_tensor(input)
print('Output:')
print_sparse_tensor(output)
# Convolution transpose and generate new coordinates
strided_coords, tensor_stride = get_random_coords()
input = ME.SparseTensor(
features=torch.rand(len(strided_coords), in_channels), #
coordinates=strided_coords,
tensor_stride=tensor_stride)
conv_tr = ME.MinkowskiConvolutionTranspose(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=False,
dimension=D)
output = conv_tr(input)
print('\nInput:')
print_sparse_tensor(input)
print('Convolution Transpose Output:')
print_sparse_tensor(output)
def conv_on_coords():
in_channels, out_channels, D = 2, 3, 2
coords, feats, labels = data_loader(in_channels, batch_size=1)
# Create input with tensor stride == 4
strided_coords4, tensor_stride4 = get_random_coords(tensor_stride=4)
strided_coords2, tensor_stride2 = get_random_coords(tensor_stride=2)
input = ME.SparseTensor(
features=torch.rand(len(strided_coords4), in_channels), #
coordinates=strided_coords4,
tensor_stride=tensor_stride4)
cm = input.coordinate_manager
# Convolution transpose and generate new coordinates
conv_tr = ME.MinkowskiConvolutionTranspose(
in_channels,
out_channels,
kernel_size=3,
stride=2,
bias=False,
dimension=D)
pool_tr = ME.MinkowskiPoolingTranspose(
kernel_size=2,
stride=2,
dimension=D)
# If the there is no coordinates defined for the tensor stride, it will create one
# tensor stride 4 -> conv_tr with stride 2 -> tensor stride 2
output1 = conv_tr(input)
# output1 = pool_tr(input)
# convolution on the specified coords
output2 = conv_tr(input, coords)
# output2 = pool_tr(input, coords)
# convolution on the specified coords with tensor stride == 2
coords_key, _ = cm.insert_and_map(strided_coords2, tensor_stride=2)
output3 = conv_tr(input, coords_key)
# output3 = pool_tr(input, coords_key)
# convolution on the coordinates of a sparse tensor
output4 = conv_tr(input, output1)
# output4 = pool_tr(input, output1)
if __name__ == '__main__':
conv()
conv_on_coords()
| MinkowskiEngine-master | examples/convolution.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import sys
import subprocess
import argparse
import logging
import glob
import numpy as np
from time import time
import urllib
# Must be imported before large libs
try:
import open3d as o3d
except ImportError:
raise ImportError(
"Please install open3d and scipy with `pip install open3d scipy`."
)
import torch
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
from torch.utils.data.sampler import Sampler
import MinkowskiEngine as ME
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
def resample_mesh(mesh_cad, density=1):
"""
https://chrischoy.github.io/research/barycentric-coordinate-for-mesh-sampling/
Samples point cloud on the surface of the model defined as vectices and
faces. This function uses vectorized operations so fast at the cost of some
memory.
param mesh_cad: low-polygon triangle mesh in o3d.geometry.TriangleMesh
param density: density of the point cloud per unit area
param return_numpy: return numpy format or open3d pointcloud format
return resampled point cloud
Reference :
[1] Barycentric coordinate system
\begin{align}
P = (1 - \sqrt{r_1})A + \sqrt{r_1} (1 - r_2) B + \sqrt{r_1} r_2 C
\end{align}
"""
faces = np.array(mesh_cad.triangles).astype(int)
vertices = np.array(mesh_cad.vertices)
vec_cross = np.cross(
vertices[faces[:, 0], :] - vertices[faces[:, 2], :],
vertices[faces[:, 1], :] - vertices[faces[:, 2], :],
)
face_areas = np.sqrt(np.sum(vec_cross ** 2, 1))
n_samples = (np.sum(face_areas) * density).astype(int)
# face_areas = face_areas / np.sum(face_areas)
# Sample exactly n_samples. First, oversample points and remove redundant
# Bug fix by Yangyan ([email protected])
n_samples_per_face = np.ceil(density * face_areas).astype(int)
floor_num = np.sum(n_samples_per_face) - n_samples
if floor_num > 0:
indices = np.where(n_samples_per_face > 0)[0]
floor_indices = np.random.choice(indices, floor_num, replace=True)
n_samples_per_face[floor_indices] -= 1
n_samples = np.sum(n_samples_per_face)
# Create a vector that contains the face indices
sample_face_idx = np.zeros((n_samples,), dtype=int)
acc = 0
for face_idx, _n_sample in enumerate(n_samples_per_face):
sample_face_idx[acc : acc + _n_sample] = face_idx
acc += _n_sample
r = np.random.rand(n_samples, 2)
A = vertices[faces[sample_face_idx, 0], :]
B = vertices[faces[sample_face_idx, 1], :]
C = vertices[faces[sample_face_idx, 2], :]
P = (
(1 - np.sqrt(r[:, 0:1])) * A
+ np.sqrt(r[:, 0:1]) * (1 - r[:, 1:]) * B
+ np.sqrt(r[:, 0:1]) * r[:, 1:] * C
)
return P
M = np.array(
[
[0.80656762, -0.5868724, -0.07091862],
[0.3770505, 0.418344, 0.82632997],
[-0.45528188, -0.6932309, 0.55870326],
]
)
assert (
int(o3d.__version__.split(".")[1]) >= 8
), f"Requires open3d version >= 0.8, the current version is {o3d.__version__}"
if not os.path.exists("ModelNet40"):
logging.info("Downloading the pruned ModelNet40 dataset...")
subprocess.run(["sh", "./examples/download_modelnet40.sh"])
###############################################################################
# Utility functions
###############################################################################
def PointCloud(points, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def collate_pointcloud_fn(list_data):
coords, feats, labels = list(zip(*list_data))
# Concatenate all lists
return {
"coords": coords,
"xyzs": [torch.from_numpy(feat).float() for feat in feats],
"labels": torch.LongTensor(labels),
}
class ModelNet40Dataset(torch.utils.data.Dataset):
def __init__(self, phase, transform=None, config=None):
self.phase = phase
self.files = []
self.cache = {}
self.data_objects = []
self.transform = transform
self.resolution = config.resolution
self.last_cache_percent = 0
self.root = "./ModelNet40"
fnames = glob.glob(os.path.join(self.root, "chair/train/*.off"))
fnames = sorted([os.path.relpath(fname, self.root) for fname in fnames])
self.files = fnames
assert len(self.files) > 0, "No file loaded"
logging.info(
f"Loading the subset {phase} from {self.root} with {len(self.files)} files"
)
self.density = 30000
# Ignore warnings in obj loader
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
mesh_file = os.path.join(self.root, self.files[idx])
if idx in self.cache:
xyz = self.cache[idx]
else:
# Load a mesh, over sample, copy, rotate, voxelization
assert os.path.exists(mesh_file)
pcd = o3d.io.read_triangle_mesh(mesh_file)
# Normalize to fit the mesh inside a unit cube while preserving aspect ratio
vertices = np.asarray(pcd.vertices)
vmax = vertices.max(0, keepdims=True)
vmin = vertices.min(0, keepdims=True)
pcd.vertices = o3d.utility.Vector3dVector(
(vertices - vmin) / (vmax - vmin).max()
)
# Oversample points and copy
xyz = resample_mesh(pcd, density=self.density)
self.cache[idx] = xyz
cache_percent = int((len(self.cache) / len(self)) * 100)
if (
cache_percent > 0
and cache_percent % 10 == 0
and cache_percent != self.last_cache_percent
):
logging.info(
f"Cached {self.phase}: {len(self.cache)} / {len(self)}: {cache_percent}%"
)
self.last_cache_percent = cache_percent
# Use color or other features if available
feats = np.ones((len(xyz), 1))
if len(xyz) < 1000:
logging.info(
f"Skipping {mesh_file}: does not have sufficient CAD sampling density after resampling: {len(xyz)}."
)
return None
if self.transform:
xyz, feats = self.transform(xyz, feats)
# Get coords
xyz = xyz * self.resolution
coords, inds = ME.utils.sparse_quantize(xyz, return_index=True)
return (coords, xyz[inds], idx)
def make_data_loader(
phase, augment_data, batch_size, shuffle, num_workers, repeat, config
):
dset = ModelNet40Dataset(phase, config=config)
args = {
"batch_size": batch_size,
"num_workers": num_workers,
"collate_fn": collate_pointcloud_fn,
"pin_memory": False,
"drop_last": False,
}
if repeat:
args["sampler"] = InfSampler(dset, shuffle)
else:
args["shuffle"] = shuffle
loader = torch.utils.data.DataLoader(dset, **args)
return loader
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split(".")[0] + " %(asctime)s %(message)s",
datefmt="%m/%d %H:%M:%S",
handlers=[ch],
)
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=128)
parser.add_argument("--max_iter", type=int, default=30000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--lr", default=1e-2, type=float)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--stat_freq", type=int, default=50)
parser.add_argument("--weights", type=str, default="modelnet_reconstruction.pth")
parser.add_argument("--load_optimizer", type=str, default="true")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--max_visualization", type=int, default=4)
###############################################################################
# End of utility functions
###############################################################################
class GenerativeNet(nn.Module):
CHANNELS = [1024, 512, 256, 128, 64, 32, 16]
def __init__(self, resolution, in_nchannel=512):
nn.Module.__init__(self)
self.resolution = resolution
# Input sparse tensor must have tensor stride 128.
ch = self.CHANNELS
# Block 1
self.block1 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
in_nchannel, ch[0], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[0], ch[0], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiGenerativeConvolutionTranspose(
ch[0], ch[1], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[1], ch[1], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
)
self.block1_cls = ME.MinkowskiConvolution(
ch[1], 1, kernel_size=1, bias=True, dimension=3
)
# Block 2
self.block2 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[1], ch[2], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[2], ch[2], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
)
self.block2_cls = ME.MinkowskiConvolution(
ch[2], 1, kernel_size=1, bias=True, dimension=3
)
# Block 3
self.block3 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[2], ch[3], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[3], ch[3], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
)
self.block3_cls = ME.MinkowskiConvolution(
ch[3], 1, kernel_size=1, bias=True, dimension=3
)
# Block 4
self.block4 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[3], ch[4], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[4], ch[4], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
)
self.block4_cls = ME.MinkowskiConvolution(
ch[4], 1, kernel_size=1, bias=True, dimension=3
)
# Block 5
self.block5 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[4], ch[5], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[5], ch[5], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
)
self.block5_cls = ME.MinkowskiConvolution(
ch[5], 1, kernel_size=1, bias=True, dimension=3
)
# Block 6
self.block6 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[5], ch[6], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[6], ch[6], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
)
self.block6_cls = ME.MinkowskiConvolution(
ch[6], 1, kernel_size=1, bias=True, dimension=3
)
# pruning
self.pruning = ME.MinkowskiPruning()
@torch.no_grad()
def get_target(self, out, target_key, kernel_size=1):
target = torch.zeros(len(out), dtype=torch.bool, device=out.device)
cm = out.coordinate_manager
strided_target_key = cm.stride(
target_key,
out.tensor_stride[0],
)
kernel_map = cm.kernel_map(
out.coordinate_map_key,
strided_target_key,
kernel_size=kernel_size,
region_type=1,
)
for k, curr_in in kernel_map.items():
target[curr_in[0].long()] = 1
return target
def valid_batch_map(self, batch_map):
for b in batch_map:
if len(b) == 0:
return False
return True
def forward(self, z, target_key):
out_cls, targets = [], []
# Block1
out1 = self.block1(z)
out1_cls = self.block1_cls(out1)
target = self.get_target(out1, target_key)
targets.append(target)
out_cls.append(out1_cls)
keep1 = (out1_cls.F > 0).squeeze()
# If training, force target shape generation, use net.eval() to disable
if self.training:
keep1 += target
# Remove voxels 32
out1 = self.pruning(out1, keep1)
# Block 2
out2 = self.block2(out1)
out2_cls = self.block2_cls(out2)
target = self.get_target(out2, target_key)
targets.append(target)
out_cls.append(out2_cls)
keep2 = (out2_cls.F > 0).squeeze()
if self.training:
keep2 += target
# Remove voxels 16
out2 = self.pruning(out2, keep2)
# Block 3
out3 = self.block3(out2)
out3_cls = self.block3_cls(out3)
target = self.get_target(out3, target_key)
targets.append(target)
out_cls.append(out3_cls)
keep3 = (out3_cls.F > 0).squeeze()
if self.training:
keep3 += target
# Remove voxels 8
out3 = self.pruning(out3, keep3)
# Block 4
out4 = self.block4(out3)
out4_cls = self.block4_cls(out4)
target = self.get_target(out4, target_key)
targets.append(target)
out_cls.append(out4_cls)
keep4 = (out4_cls.F > 0).squeeze()
if self.training:
keep4 += target
# Remove voxels 4
out4 = self.pruning(out4, keep4)
# Block 5
out5 = self.block5(out4)
out5_cls = self.block5_cls(out5)
target = self.get_target(out5, target_key)
targets.append(target)
out_cls.append(out5_cls)
keep5 = (out5_cls.F > 0).squeeze()
if self.training:
keep5 += target
# Remove voxels 2
out5 = self.pruning(out5, keep5)
# Block 5
out6 = self.block6(out5)
out6_cls = self.block6_cls(out6)
target = self.get_target(out6, target_key)
targets.append(target)
out_cls.append(out6_cls)
keep6 = (out6_cls.F > 0).squeeze()
# Last layer does not require keep
# if self.training:
# keep6 += target
# Remove voxels 1
out6 = self.pruning(out6, keep6)
return out_cls, targets, out6
def train(net, dataloader, device, config):
in_nchannel = len(dataloader.dataset)
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
crit = nn.BCEWithLogitsLoss()
net.train()
train_iter = iter(dataloader)
# val_iter = iter(val_dataloader)
logging.info(f"LR: {scheduler.get_lr()}")
for i in range(config.max_iter):
s = time()
data_dict = train_iter.next()
d = time() - s
optimizer.zero_grad()
init_coords = torch.zeros((config.batch_size, 4), dtype=torch.int)
init_coords[:, 0] = torch.arange(config.batch_size)
in_feat = torch.zeros((config.batch_size, in_nchannel))
in_feat[torch.arange(config.batch_size), data_dict["labels"]] = 1
sin = ME.SparseTensor(
features=in_feat,
coordinates=init_coords,
tensor_stride=config.resolution,
device=device,
)
# Generate target sparse tensor
cm = sin.coordinate_manager
target_key, _ = cm.insert_and_map(
ME.utils.batched_coordinates(data_dict["xyzs"]).to(device),
string_id="target",
)
# Generate from a dense tensor
out_cls, targets, sout = net(sin, target_key)
num_layers, loss = len(out_cls), 0
losses = []
for out_cl, target in zip(out_cls, targets):
curr_loss = crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
losses.append(curr_loss.item())
loss += curr_loss / num_layers
loss.backward()
optimizer.step()
t = time() - s
if i % config.stat_freq == 0:
logging.info(
f"Iter: {i}, Loss: {loss.item():.3e}, Depths: {len(out_cls)} Data Loading Time: {d:.3e}, Tot Time: {t:.3e}"
)
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
scheduler.step()
logging.info(f"LR: {scheduler.get_lr()}")
net.train()
def visualize(net, dataloader, device, config):
in_nchannel = len(dataloader.dataset)
net.eval()
crit = nn.BCEWithLogitsLoss()
n_vis = 0
for data_dict in dataloader:
init_coords = torch.zeros((config.batch_size, 4), dtype=torch.int)
init_coords[:, 0] = torch.arange(config.batch_size)
in_feat = torch.zeros((config.batch_size, in_nchannel))
in_feat[torch.arange(config.batch_size), data_dict["labels"]] = 1
sin = ME.SparseTensor(
features=in_feat,
coordinates=init_coords,
tensor_stride=config.resolution,
device=device,
)
# Generate target sparse tensor
cm = sin.coordinate_manager
target_key, _ = cm.insert_and_map(
ME.utils.batched_coordinates(data_dict["xyzs"]).to(device),
string_id="target",
)
# Generate from a dense tensor
out_cls, targets, sout = net(sin, target_key)
num_layers, loss = len(out_cls), 0
for out_cl, target in zip(out_cls, targets):
loss += (
crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
/ num_layers
)
batch_coords, batch_feats = sout.decomposed_coordinates_and_features
for b, (coords, feats) in enumerate(zip(batch_coords, batch_feats)):
pcd = PointCloud(coords.cpu())
pcd.estimate_normals()
pcd.translate([0.6 * config.resolution, 0, 0])
pcd.rotate(M)
opcd = PointCloud(data_dict["xyzs"][b])
opcd.translate([-0.6 * config.resolution, 0, 0])
opcd.estimate_normals()
opcd.rotate(M)
o3d.visualization.draw_geometries([pcd, opcd])
n_vis += 1
if n_vis > config.max_visualization:
return
if __name__ == "__main__":
config = parser.parse_args()
logging.info(config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = make_data_loader(
"val",
augment_data=True,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
repeat=True,
config=config,
)
in_nchannel = len(dataloader.dataset)
net = GenerativeNet(config.resolution, in_nchannel=in_nchannel)
net.to(device)
logging.info(net)
if not config.eval:
train(net, dataloader, device, config)
else:
if not os.path.exists(config.weights):
logging.info(f"Downloaing pretrained weights. This might take a while...")
urllib.request.urlretrieve(
"https://bit.ly/36d9m1n", filename=config.weights
)
logging.info(f"Loading weights from {config.weights}")
checkpoint = torch.load(config.weights)
net.load_state_dict(checkpoint["state_dict"])
visualize(net, dataloader, device, config)
| MinkowskiEngine-master | examples/reconstruction.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import sys
import subprocess
import argparse
import logging
import numpy as np
from time import time
import urllib
# Must be imported before large libs
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
import torch
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import MinkowskiEngine as ME
from examples.reconstruction import ModelNet40Dataset, InfSampler
M = np.array(
[
[0.80656762, -0.5868724, -0.07091862],
[0.3770505, 0.418344, 0.82632997],
[-0.45528188, -0.6932309, 0.55870326],
]
)
assert (
int(o3d.__version__.split(".")[1]) >= 8
), f"Requires open3d version >= 0.8, the current version is {o3d.__version__}"
if not os.path.exists("ModelNet40"):
logging.info("Downloading the pruned ModelNet40 dataset...")
subprocess.run(["sh", "./examples/download_modelnet40.sh"])
###############################################################################
# Utility functions
###############################################################################
def PointCloud(points, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
class CollationAndTransformation:
def __init__(self, resolution):
self.resolution = resolution
def random_crop(self, coords_list):
crop_coords_list = []
for coords in coords_list:
sel = coords[:, 0] < self.resolution / 3
crop_coords_list.append(coords[sel])
return crop_coords_list
def __call__(self, list_data):
coords, feats, labels = list(zip(*list_data))
coords = self.random_crop(coords)
# Concatenate all lists
return {
"coords": ME.utils.batched_coordinates(coords),
"xyzs": [torch.from_numpy(feat).float() for feat in feats],
"cropped_coords": coords,
"labels": torch.LongTensor(labels),
}
def make_data_loader(
phase, augment_data, batch_size, shuffle, num_workers, repeat, config
):
dset = ModelNet40Dataset(phase, config=config)
args = {
"batch_size": batch_size,
"num_workers": num_workers,
"collate_fn": CollationAndTransformation(config.resolution),
"pin_memory": False,
"drop_last": False,
}
if repeat:
args["sampler"] = InfSampler(dset, shuffle)
else:
args["shuffle"] = shuffle
loader = torch.utils.data.DataLoader(dset, **args)
return loader
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format="%(asctime)s %(message)s",
datefmt="%m/%d %H:%M:%S",
handlers=[ch],
)
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=128)
parser.add_argument("--max_iter", type=int, default=30000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--lr", default=1e-2, type=float)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--stat_freq", type=int, default=50)
parser.add_argument("--weights", type=str, default="modelnet_completion.pth")
parser.add_argument("--load_optimizer", type=str, default="true")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--max_visualization", type=int, default=4)
###############################################################################
# End of utility functions
###############################################################################
class CompletionNet(nn.Module):
ENC_CHANNELS = [16, 32, 64, 128, 256, 512, 1024]
DEC_CHANNELS = [16, 32, 64, 128, 256, 512, 1024]
def __init__(self, resolution, in_nchannel=512):
nn.Module.__init__(self)
self.resolution = resolution
# Input sparse tensor must have tensor stride 128.
enc_ch = self.ENC_CHANNELS
dec_ch = self.DEC_CHANNELS
# Encoder
self.enc_block_s1 = nn.Sequential(
ME.MinkowskiConvolution(1, enc_ch[0], kernel_size=3, stride=1, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[0]),
ME.MinkowskiELU(),
)
self.enc_block_s1s2 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[0], enc_ch[1], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[1]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[1], enc_ch[1], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[1]),
ME.MinkowskiELU(),
)
self.enc_block_s2s4 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[1], enc_ch[2], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[2]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[2], enc_ch[2], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[2]),
ME.MinkowskiELU(),
)
self.enc_block_s4s8 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[2], enc_ch[3], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[3]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[3], enc_ch[3], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[3]),
ME.MinkowskiELU(),
)
self.enc_block_s8s16 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[3], enc_ch[4], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[4]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[4], enc_ch[4], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[4]),
ME.MinkowskiELU(),
)
self.enc_block_s16s32 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[4], enc_ch[5], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[5]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[5], enc_ch[5], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[5]),
ME.MinkowskiELU(),
)
self.enc_block_s32s64 = nn.Sequential(
ME.MinkowskiConvolution(
enc_ch[5], enc_ch[6], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(enc_ch[6]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(enc_ch[6], enc_ch[6], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(enc_ch[6]),
ME.MinkowskiELU(),
)
# Decoder
self.dec_block_s64s32 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
enc_ch[6],
dec_ch[5],
kernel_size=4,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[5]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[5], dec_ch[5], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[5]),
ME.MinkowskiELU(),
)
self.dec_s32_cls = ME.MinkowskiConvolution(
dec_ch[5], 1, kernel_size=1, bias=True, dimension=3
)
self.dec_block_s32s16 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
enc_ch[5],
dec_ch[4],
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[4]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[4], dec_ch[4], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[4]),
ME.MinkowskiELU(),
)
self.dec_s16_cls = ME.MinkowskiConvolution(
dec_ch[4], 1, kernel_size=1, bias=True, dimension=3
)
self.dec_block_s16s8 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
dec_ch[4],
dec_ch[3],
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[3]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[3], dec_ch[3], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[3]),
ME.MinkowskiELU(),
)
self.dec_s8_cls = ME.MinkowskiConvolution(
dec_ch[3], 1, kernel_size=1, bias=True, dimension=3
)
self.dec_block_s8s4 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
dec_ch[3],
dec_ch[2],
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[2]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[2], dec_ch[2], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[2]),
ME.MinkowskiELU(),
)
self.dec_s4_cls = ME.MinkowskiConvolution(
dec_ch[2], 1, kernel_size=1, bias=True, dimension=3
)
self.dec_block_s4s2 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
dec_ch[2],
dec_ch[1],
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[1]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[1], dec_ch[1], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[1]),
ME.MinkowskiELU(),
)
self.dec_s2_cls = ME.MinkowskiConvolution(
dec_ch[1], 1, kernel_size=1, bias=True, dimension=3
)
self.dec_block_s2s1 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
dec_ch[1],
dec_ch[0],
kernel_size=2,
stride=2,
dimension=3,
),
ME.MinkowskiBatchNorm(dec_ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(dec_ch[0], dec_ch[0], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(dec_ch[0]),
ME.MinkowskiELU(),
)
self.dec_s1_cls = ME.MinkowskiConvolution(
dec_ch[0], 1, kernel_size=1, bias=True, dimension=3
)
# pruning
self.pruning = ME.MinkowskiPruning()
def get_target(self, out, target_key, kernel_size=1):
with torch.no_grad():
target = torch.zeros(len(out), dtype=torch.bool, device=out.device)
cm = out.coordinate_manager
strided_target_key = cm.stride(
target_key, out.tensor_stride[0],
)
kernel_map = cm.kernel_map(
out.coordinate_map_key,
strided_target_key,
kernel_size=kernel_size,
region_type=1,
)
for k, curr_in in kernel_map.items():
target[curr_in[0].long()] = 1
return target
def valid_batch_map(self, batch_map):
for b in batch_map:
if len(b) == 0:
return False
return True
def forward(self, partial_in, target_key):
out_cls, targets = [], []
enc_s1 = self.enc_block_s1(partial_in)
enc_s2 = self.enc_block_s1s2(enc_s1)
enc_s4 = self.enc_block_s2s4(enc_s2)
enc_s8 = self.enc_block_s4s8(enc_s4)
enc_s16 = self.enc_block_s8s16(enc_s8)
enc_s32 = self.enc_block_s16s32(enc_s16)
enc_s64 = self.enc_block_s32s64(enc_s32)
##################################################
# Decoder 64 -> 32
##################################################
dec_s32 = self.dec_block_s64s32(enc_s64)
# Add encoder features
dec_s32 = dec_s32 + enc_s32
dec_s32_cls = self.dec_s32_cls(dec_s32)
keep_s32 = (dec_s32_cls.F > 0).squeeze()
target = self.get_target(dec_s32, target_key)
targets.append(target)
out_cls.append(dec_s32_cls)
if self.training:
keep_s32 += target
# Remove voxels s32
dec_s32 = self.pruning(dec_s32, keep_s32)
##################################################
# Decoder 32 -> 16
##################################################
dec_s16 = self.dec_block_s32s16(dec_s32)
# Add encoder features
dec_s16 = dec_s16 + enc_s16
dec_s16_cls = self.dec_s16_cls(dec_s16)
keep_s16 = (dec_s16_cls.F > 0).squeeze()
target = self.get_target(dec_s16, target_key)
targets.append(target)
out_cls.append(dec_s16_cls)
if self.training:
keep_s16 += target
# Remove voxels s16
dec_s16 = self.pruning(dec_s16, keep_s16)
##################################################
# Decoder 16 -> 8
##################################################
dec_s8 = self.dec_block_s16s8(dec_s16)
# Add encoder features
dec_s8 = dec_s8 + enc_s8
dec_s8_cls = self.dec_s8_cls(dec_s8)
target = self.get_target(dec_s8, target_key)
targets.append(target)
out_cls.append(dec_s8_cls)
keep_s8 = (dec_s8_cls.F > 0).squeeze()
if self.training:
keep_s8 += target
# Remove voxels s16
dec_s8 = self.pruning(dec_s8, keep_s8)
##################################################
# Decoder 8 -> 4
##################################################
dec_s4 = self.dec_block_s8s4(dec_s8)
# Add encoder features
dec_s4 = dec_s4 + enc_s4
dec_s4_cls = self.dec_s4_cls(dec_s4)
target = self.get_target(dec_s4, target_key)
targets.append(target)
out_cls.append(dec_s4_cls)
keep_s4 = (dec_s4_cls.F > 0).squeeze()
if self.training:
keep_s4 += target
# Remove voxels s4
dec_s4 = self.pruning(dec_s4, keep_s4)
##################################################
# Decoder 4 -> 2
##################################################
dec_s2 = self.dec_block_s4s2(dec_s4)
# Add encoder features
dec_s2 = dec_s2 + enc_s2
dec_s2_cls = self.dec_s2_cls(dec_s2)
target = self.get_target(dec_s2, target_key)
targets.append(target)
out_cls.append(dec_s2_cls)
keep_s2 = (dec_s2_cls.F > 0).squeeze()
if self.training:
keep_s2 += target
# Remove voxels s2
dec_s2 = self.pruning(dec_s2, keep_s2)
##################################################
# Decoder 2 -> 1
##################################################
dec_s1 = self.dec_block_s2s1(dec_s2)
dec_s1_cls = self.dec_s1_cls(dec_s1)
# Add encoder features
dec_s1 = dec_s1 + enc_s1
dec_s1_cls = self.dec_s1_cls(dec_s1)
target = self.get_target(dec_s1, target_key)
targets.append(target)
out_cls.append(dec_s1_cls)
keep_s1 = (dec_s1_cls.F > 0).squeeze()
# Last layer does not require adding the target
# if self.training:
# keep_s1 += target
# Remove voxels s1
dec_s1 = self.pruning(dec_s1, keep_s1)
return out_cls, targets, dec_s1
def train(net, dataloader, device, config):
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
crit = nn.BCEWithLogitsLoss()
net.train()
train_iter = iter(dataloader)
# val_iter = iter(val_dataloader)
logging.info(f"LR: {scheduler.get_lr()}")
for i in range(config.max_iter):
s = time()
data_dict = train_iter.next()
d = time() - s
optimizer.zero_grad()
in_feat = torch.ones((len(data_dict["coords"]), 1))
sin = ME.SparseTensor(
features=in_feat,
coordinates=data_dict["coords"],
device=device,
)
# Generate target sparse tensor
cm = sin.coordinate_manager
target_key, _ = cm.insert_and_map(
ME.utils.batched_coordinates(data_dict["xyzs"]).to(device),
string_id="target",
)
# Generate from a dense tensor
out_cls, targets, sout = net(sin, target_key)
num_layers, loss = len(out_cls), 0
losses = []
for out_cl, target in zip(out_cls, targets):
curr_loss = crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
losses.append(curr_loss.item())
loss += curr_loss / num_layers
loss.backward()
optimizer.step()
t = time() - s
if i % config.stat_freq == 0:
logging.info(
f"Iter: {i}, Loss: {loss.item():.3e}, Data Loading Time: {d:.3e}, Tot Time: {t:.3e}"
)
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
scheduler.step()
logging.info(f"LR: {scheduler.get_lr()}")
net.train()
def visualize(net, dataloader, device, config):
net.eval()
crit = nn.BCEWithLogitsLoss()
n_vis = 0
for data_dict in dataloader:
in_feat = torch.ones((len(data_dict["coords"]), 1))
sin = ME.SparseTensor(
feats=in_feat,
coords=data_dict["coords"],
).to(device)
# Generate target sparse tensor
cm = sin.coords_man
target_key = cm.create_coords_key(
ME.utils.batched_coordinates(data_dict["xyzs"]),
force_creation=True,
allow_duplicate_coords=True,
)
# Generate from a dense tensor
out_cls, targets, sout = net(sin, target_key)
num_layers, loss = len(out_cls), 0
for out_cl, target in zip(out_cls, targets):
loss += (
crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
/ num_layers
)
batch_coords, batch_feats = sout.decomposed_coordinates_and_features
for b, (coords, feats) in enumerate(zip(batch_coords, batch_feats)):
pcd = PointCloud(coords)
pcd.estimate_normals()
pcd.translate([0.6 * config.resolution, 0, 0])
pcd.rotate(M, np.array([[0.0], [0.0], [0.0]]))
opcd = PointCloud(data_dict["cropped_coords"][b])
opcd.translate([-0.6 * config.resolution, 0, 0])
opcd.estimate_normals()
opcd.rotate(M, np.array([[0.0], [0.0], [0.0]]))
o3d.visualization.draw_geometries([pcd, opcd])
n_vis += 1
if n_vis > config.max_visualization:
return
if __name__ == "__main__":
config = parser.parse_args()
logging.info(config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = make_data_loader(
"val",
augment_data=True,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
repeat=True,
config=config,
)
in_nchannel = len(dataloader.dataset)
net = CompletionNet(config.resolution, in_nchannel=in_nchannel)
net.to(device)
logging.info(net)
if not config.eval:
train(net, dataloader, device, config)
else:
if not os.path.exists(config.weights):
logging.info(f"Downloaing pretrained weights. This might take a while...")
urllib.request.urlretrieve(
"https://bit.ly/36d9m1n", filename=config.weights
)
logging.info(f"Loading weights from {config.weights}")
checkpoint = torch.load(config.weights)
net.load_state_dict(checkpoint["state_dict"])
visualize(net, dataloader, device, config)
| MinkowskiEngine-master | examples/completion.py |
# Copyright (c) NVIDIA Corporation.
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import argparse
import numpy as np
from urllib.request import urlretrieve
try:
import open3d as o3d
except ImportError:
raise ImportError(
"Please install requirements with `pip install open3d pytorch_lightning`."
)
try:
from pytorch_lightning.core import LightningModule
from pytorch_lightning import Trainer
except ImportError:
raise ImportError(
"Please install requirements with `pip install open3d pytorch_lightning`."
)
import torch
import torch.nn as nn
from torch.optim import SGD
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
if not os.path.isfile("1.ply"):
urlretrieve("http://cvgl.stanford.edu/data2/minkowskiengine/1.ply", "1.ply")
parser = argparse.ArgumentParser()
parser.add_argument("--file_name", type=str, default="1.ply")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--max_ngpu", type=int, default=2)
def minkowski_collate_fn(list_data):
r"""
Collation function for MinkowskiEngine.SparseTensor that creates batched
cooordinates given a list of dictionaries.
"""
coordinates_batch, features_batch, labels_batch = ME.utils.sparse_collate(
[d["coordinates"] for d in list_data],
[d["features"] for d in list_data],
[d["labels"] for d in list_data],
dtype=torch.float32,
)
return {
"coordinates": coordinates_batch,
"features": features_batch,
"labels": labels_batch,
}
class DummyNetwork(nn.Module):
def __init__(self, in_channels, out_channels, D=3):
nn.Module.__init__(self)
self.net = nn.Sequential(
ME.MinkowskiConvolution(in_channels, 32, 3, dimension=D),
ME.MinkowskiBatchNorm(32),
ME.MinkowskiReLU(),
ME.MinkowskiConvolution(32, 64, 3, stride=2, dimension=D),
ME.MinkowskiBatchNorm(64),
ME.MinkowskiReLU(),
ME.MinkowskiConvolutionTranspose(64, 32, 3, stride=2, dimension=D),
ME.MinkowskiBatchNorm(32),
ME.MinkowskiReLU(),
ME.MinkowskiConvolution(32, out_channels, kernel_size=1, dimension=D),
)
def forward(self, x):
return self.net(x)
class DummyDataset(Dataset):
def __init__(self, phase, dummy_file="1.ply", voxel_size=0.05):
self.CACHE = {}
self.phase = phase # do something for a real dataset.
self.voxel_size = voxel_size # in meter
self.filenames = [dummy_file] * 100
def __len__(self):
return len(self.filenames)
def __getitem__(self, i):
filename = self.filenames[i]
if filename not in self.CACHE:
pcd = o3d.io.read_point_cloud(filename)
self.CACHE[filename] = pcd
pcd = self.CACHE[filename]
quantized_coords, feats = ME.utils.sparse_quantize(
np.array(pcd.points, dtype=np.float32),
np.array(pcd.colors, dtype=np.float32),
quantization_size=self.voxel_size,
)
random_labels = torch.zeros(len(feats))
return {
"coordinates": quantized_coords,
"features": feats,
"labels": random_labels,
}
class MinkowskiSegmentationModule(LightningModule):
r"""
Segmentation Module for MinkowskiEngine.
"""
def __init__(
self,
model,
optimizer_name="SGD",
lr=1e-3,
weight_decay=1e-5,
voxel_size=0.05,
batch_size=12,
val_batch_size=6,
train_num_workers=4,
val_num_workers=2,
):
super().__init__()
for name, value in vars().items():
if name != "self":
setattr(self, name, value)
self.criterion = nn.CrossEntropyLoss()
def train_dataloader(self):
return DataLoader(
DummyDataset("train", voxel_size=self.voxel_size),
batch_size=self.batch_size,
collate_fn=minkowski_collate_fn,
shuffle=True,
)
def val_dataloader(self):
return DataLoader(
DummyDataset("val", voxel_size=self.voxel_size),
batch_size=self.val_batch_size,
collate_fn=minkowski_collate_fn,
)
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
stensor = ME.SparseTensor(
coordinates=batch["coordinates"], features=batch["features"]
)
# Must clear cache at regular interval
if self.global_step % 10 == 0:
torch.cuda.empty_cache()
return self.criterion(self(stensor).F, batch["labels"].long())
def validation_step(self, batch, batch_idx):
stensor = ME.SparseTensor(
coordinates=batch["coordinates"], features=batch["features"]
)
return self.criterion(self(stensor).F, batch["labels"].long())
def configure_optimizers(self):
return SGD(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
if __name__ == "__main__":
pa = argparse.ArgumentParser()
pa.add_argument("--max_epochs", type=int, default=100, help="Max epochs")
pa.add_argument("--lr", type=float, default=1e-2, help="Learning rate")
pa.add_argument("--batch_size", type=int, default=2, help="batch size per GPU")
pa.add_argument("--ngpus", type=int, default=1, help="num_gpus")
args = pa.parse_args()
num_devices = min(args.ngpus, torch.cuda.device_count())
print(f"Testing {num_devices} GPUs.")
# Training
model = DummyNetwork(3, 20, D=3)
if args.ngpus > 1:
model = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model)
pl_module = MinkowskiSegmentationModule(model, lr=args.lr)
trainer = Trainer(max_epochs=args.max_epochs, gpus=num_devices, accelerator="ddp")
trainer.fit(pl_module)
| MinkowskiEngine-master | examples/multigpu_lightning.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MF
from tests.python.common import data_loader
class UNet(ME.MinkowskiNetwork):
def __init__(self, in_nchannel, out_nchannel, D):
super(UNet, self).__init__(D)
self.block1 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=in_nchannel,
out_channels=8,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(8))
self.block2 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=8,
out_channels=16,
kernel_size=3,
stride=2,
dimension=D),
ME.MinkowskiBatchNorm(16),
)
self.block3 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=16,
out_channels=32,
kernel_size=3,
stride=2,
dimension=D),
ME.MinkowskiBatchNorm(32))
self.block3_tr = torch.nn.Sequential(
ME.MinkowskiConvolutionTranspose(
in_channels=32,
out_channels=16,
kernel_size=3,
stride=2,
dimension=D),
ME.MinkowskiBatchNorm(16))
self.block2_tr = torch.nn.Sequential(
ME.MinkowskiConvolutionTranspose(
in_channels=32,
out_channels=16,
kernel_size=3,
stride=2,
dimension=D),
ME.MinkowskiBatchNorm(16))
self.conv1_tr = ME.MinkowskiConvolution(
in_channels=24,
out_channels=out_nchannel,
kernel_size=1,
stride=1,
dimension=D)
def forward(self, x):
out_s1 = self.block1(x)
out = MF.relu(out_s1)
out_s2 = self.block2(out)
out = MF.relu(out_s2)
out_s4 = self.block3(out)
out = MF.relu(out_s4)
out = MF.relu(self.block3_tr(out))
out = ME.cat(out, out_s2)
out = MF.relu(self.block2_tr(out))
out = ME.cat(out, out_s1)
return self.conv1_tr(out)
if __name__ == '__main__':
# loss and network
net = UNet(3, 5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
coords, feat, label = data_loader()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
input = ME.SparseTensor(feat, coords, device=device)
# Forward
output = net(input)
| MinkowskiEngine-master | examples/unet.py |
MinkowskiEngine-master | examples/__init__.py |
|
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import argparse
import sklearn.metrics as metrics
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.nn.functional as F
import MinkowskiEngine as ME
from examples.pointnet import (
PointNet,
MinkowskiPointNet,
CoordinateTransformation,
ModelNet40H5,
stack_collate_fn,
minkowski_collate_fn,
)
from examples.common import seed_all
parser = argparse.ArgumentParser()
parser.add_argument("--voxel_size", type=float, default=0.05)
parser.add_argument("--max_steps", type=int, default=100000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--lr", default=1e-1, type=float)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--stat_freq", type=int, default=100)
parser.add_argument("--weights", type=str, default="modelnet.pth")
parser.add_argument("--seed", type=int, default=777)
parser.add_argument("--translation", type=float, default=0.2)
parser.add_argument("--test_translation", type=float, default=0.0)
parser.add_argument(
"--network",
type=str,
choices=["pointnet", "minkpointnet", "minkfcnn", "minksplatfcnn"],
default="minkfcnn",
)
class MinkowskiFCNN(ME.MinkowskiNetwork):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
ME.MinkowskiNetwork.__init__(self, D)
self.network_initialization(
in_channel,
out_channel,
channels=channels,
embedding_channel=embedding_channel,
kernel_size=3,
D=D,
)
self.weight_initialization()
def get_mlp_block(self, in_channel, out_channel):
return nn.Sequential(
ME.MinkowskiLinear(in_channel, out_channel, bias=False),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def get_conv_block(self, in_channel, out_channel, kernel_size, stride):
return nn.Sequential(
ME.MinkowskiConvolution(
in_channel,
out_channel,
kernel_size=kernel_size,
stride=stride,
dimension=self.D,
),
ME.MinkowskiBatchNorm(out_channel),
ME.MinkowskiLeakyReLU(),
)
def network_initialization(
self,
in_channel,
out_channel,
channels,
embedding_channel,
kernel_size,
D=3,
):
self.mlp1 = self.get_mlp_block(in_channel, channels[0])
self.conv1 = self.get_conv_block(
channels[0],
channels[1],
kernel_size=kernel_size,
stride=1,
)
self.conv2 = self.get_conv_block(
channels[1],
channels[2],
kernel_size=kernel_size,
stride=2,
)
self.conv3 = self.get_conv_block(
channels[2],
channels[3],
kernel_size=kernel_size,
stride=2,
)
self.conv4 = self.get_conv_block(
channels[3],
channels[4],
kernel_size=kernel_size,
stride=2,
)
self.conv5 = nn.Sequential(
self.get_conv_block(
channels[1] + channels[2] + channels[3] + channels[4],
embedding_channel // 4,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 4,
embedding_channel // 2,
kernel_size=3,
stride=2,
),
self.get_conv_block(
embedding_channel // 2,
embedding_channel,
kernel_size=3,
stride=2,
),
)
self.pool = ME.MinkowskiMaxPooling(kernel_size=3, stride=2, dimension=D)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
self.final = nn.Sequential(
self.get_mlp_block(embedding_channel * 2, 512),
ME.MinkowskiDropout(),
self.get_mlp_block(512, 512),
ME.MinkowskiLinear(512, out_channel, bias=True),
)
# No, Dropout, last 256 linear, AVG_POOLING 92%
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.sparse()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.slice(x)
x2 = y2.slice(x)
x3 = y3.slice(x)
x4 = y4.slice(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
class GlobalMaxAvgPool(torch.nn.Module):
def __init__(self):
torch.nn.Module.__init__(self)
self.global_max_pool = ME.MinkowskiGlobalMaxPooling()
self.global_avg_pool = ME.MinkowskiGlobalAvgPooling()
def forward(self, tensor):
x = self.global_max_pool(tensor)
y = self.global_avg_pool(tensor)
return ME.cat(x, y)
class MinkowskiSplatFCNN(MinkowskiFCNN):
def __init__(
self,
in_channel,
out_channel,
embedding_channel=1024,
channels=(32, 48, 64, 96, 128),
D=3,
):
MinkowskiFCNN.__init__(
self, in_channel, out_channel, embedding_channel, channels, D
)
def forward(self, x: ME.TensorField):
x = self.mlp1(x)
y = x.splat()
y = self.conv1(y)
y1 = self.pool(y)
y = self.conv2(y1)
y2 = self.pool(y)
y = self.conv3(y2)
y3 = self.pool(y)
y = self.conv4(y3)
y4 = self.pool(y)
x1 = y1.interpolate(x)
x2 = y2.interpolate(x)
x3 = y3.interpolate(x)
x4 = y4.interpolate(x)
x = ME.cat(x1, x2, x3, x4)
y = self.conv5(x.sparse())
x1 = self.global_max_pool(y)
x2 = self.global_avg_pool(y)
return self.final(ME.cat(x1, x2)).F
STR2NETWORK = dict(
pointnet=PointNet,
minkpointnet=MinkowskiPointNet,
minkfcnn=MinkowskiFCNN,
minksplatfcnn=MinkowskiSplatFCNN,
)
def create_input_batch(batch, is_minknet, device="cuda", quantization_size=0.05):
if is_minknet:
batch["coordinates"][:, 1:] = batch["coordinates"][:, 1:] / quantization_size
return ME.TensorField(
coordinates=batch["coordinates"],
features=batch["features"],
device=device,
)
else:
return batch["coordinates"].permute(0, 2, 1).to(device)
class CoordinateTranslation:
def __init__(self, translation):
self.trans = translation
def __call__(self, coords):
if self.trans > 0:
coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])
return coords
def make_data_loader(phase, is_minknet, config):
assert phase in ["train", "val", "test"]
is_train = phase == "train"
dataset = ModelNet40H5(
phase=phase,
transform=CoordinateTransformation(trans=config.translation)
if is_train
else CoordinateTranslation(config.test_translation),
data_root="modelnet40_ply_hdf5_2048",
)
return DataLoader(
dataset,
num_workers=config.num_workers,
shuffle=is_train,
collate_fn=minkowski_collate_fn if is_minknet else stack_collate_fn,
batch_size=config.batch_size,
)
def test(net, device, config, phase="val"):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
data_loader = make_data_loader(
"test",
is_minknet,
config=config,
)
net.eval()
labels, preds = [], []
with torch.no_grad():
for batch in data_loader:
input = create_input_batch(
batch,
is_minknet,
device=device,
quantization_size=config.voxel_size,
)
logit = net(input)
pred = torch.argmax(logit, 1)
labels.append(batch["labels"].cpu().numpy())
preds.append(pred.cpu().numpy())
torch.cuda.empty_cache()
return metrics.accuracy_score(np.concatenate(labels), np.concatenate(preds))
def criterion(pred, labels, smoothing=True):
"""Calculate cross entropy loss, apply label smoothing if needed."""
labels = labels.contiguous().view(-1)
if smoothing:
eps = 0.2
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, labels.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(pred, labels, reduction="mean")
return loss
def train(net, device, config):
is_minknet = isinstance(net, ME.MinkowskiNetwork)
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=0.9,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=config.max_steps,
)
print(optimizer)
print(scheduler)
train_iter = iter(make_data_loader("train", is_minknet, config))
best_metric = 0
net.train()
for i in range(config.max_steps):
optimizer.zero_grad()
try:
data_dict = train_iter.next()
except StopIteration:
train_iter = iter(make_data_loader("train", is_minknet, config))
data_dict = train_iter.next()
input = create_input_batch(
data_dict, is_minknet, device=device, quantization_size=config.voxel_size
)
logit = net(input)
loss = criterion(logit, data_dict["labels"].to(device))
loss.backward()
optimizer.step()
scheduler.step()
torch.cuda.empty_cache()
if i % config.stat_freq == 0:
print(f"Iter: {i}, Loss: {loss.item():.3e}")
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
accuracy = test(net, device, config, phase="val")
if best_metric < accuracy:
best_metric = accuracy
print(f"Validation accuracy: {accuracy}. Best accuracy: {best_metric}")
net.train()
if __name__ == "__main__":
config = parser.parse_args()
seed_all(config.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("===================ModelNet40 Dataset===================")
print(f"Training with translation {config.translation}")
print(f"Evaluating with translation {config.test_translation}")
print("=============================================\n\n")
net = STR2NETWORK[config.network](
in_channel=3, out_channel=40, embedding_channel=1024
).to(device)
print("===================Network===================")
print(net)
print("=============================================\n\n")
train(net, device, config)
accuracy = test(net, device, config, phase="test")
print(f"Test accuracy: {accuracy}")
| MinkowskiEngine-master | examples/classification_modelnet40.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import numpy as np
import random
import time
import torch
from torch.utils.data.sampler import Sampler
class Timer(object):
"""A simple timer."""
def __init__(self):
self.reset()
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
self.averate_time = 0
self.min_time = np.Inf
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=False):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if self.diff < self.min_time:
self.min_time = self.diff
if average:
return self.average_time
else:
return self.diff
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=False):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
else:
perm = torch.arange(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
def seed_all(random_seed):
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
np.random.seed(random_seed)
random.seed(random_seed)
| MinkowskiEngine-master | examples/common.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
from urllib.request import urlretrieve
import numpy as np
import torch
import torch.nn as nn
from torch.optim import SGD
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
if not os.path.isfile("1.ply"):
print('Downloading an example pointcloud...')
urlretrieve("https://bit.ly/3c2iLhg", "1.ply")
def load_file(file_name):
pcd = o3d.io.read_point_cloud(file_name)
coords = np.array(pcd.points)
colors = np.array(pcd.colors)
return coords, colors, pcd
class ResNetBase(nn.Module):
BLOCK = None
LAYERS = ()
INIT_DIM = 64
PLANES = (64, 128, 256, 512)
def __init__(self, in_channels, out_channels, D=3):
nn.Module.__init__(self)
self.D = D
assert self.BLOCK is not None
self.network_initialization(in_channels, out_channels, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, D):
self.inplanes = self.INIT_DIM
self.conv1 = nn.Sequential(
ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=3, stride=2, dimension=D
),
ME.MinkowskiInstanceNorm(self.inplanes),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiMaxPooling(kernel_size=2, stride=2, dimension=D),
)
self.layer1 = self._make_layer(
self.BLOCK, self.PLANES[0], self.LAYERS[0], stride=2
)
self.layer2 = self._make_layer(
self.BLOCK, self.PLANES[1], self.LAYERS[1], stride=2
)
self.layer3 = self._make_layer(
self.BLOCK, self.PLANES[2], self.LAYERS[2], stride=2
)
self.layer4 = self._make_layer(
self.BLOCK, self.PLANES[3], self.LAYERS[3], stride=2
)
self.conv5 = nn.Sequential(
ME.MinkowskiDropout(),
ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D
),
ME.MinkowskiInstanceNorm(self.inplanes),
ME.MinkowskiGELU(),
)
self.glob_pool = ME.MinkowskiGlobalMaxPooling()
self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
ME.MinkowskiConvolution(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
dimension=self.D,
),
ME.MinkowskiBatchNorm(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
dimension=self.D,
)
)
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes, planes, stride=1, dilation=dilation, dimension=self.D
)
)
return nn.Sequential(*layers)
def forward(self, x: ME.SparseTensor):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.glob_pool(x)
return self.final(x)
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
class ResFieldNetBase(ResNetBase):
def network_initialization(self, in_channels, out_channels, D):
field_ch = 32
field_ch2 = 64
self.field_network = nn.Sequential(
ME.MinkowskiSinusoidal(in_channels, field_ch),
ME.MinkowskiBatchNorm(field_ch),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(field_ch, field_ch),
ME.MinkowskiBatchNorm(field_ch),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiToSparseTensor(),
)
self.field_network2 = nn.Sequential(
ME.MinkowskiSinusoidal(field_ch + in_channels, field_ch2),
ME.MinkowskiBatchNorm(field_ch2),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(field_ch2, field_ch2),
ME.MinkowskiBatchNorm(field_ch2),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiToSparseTensor(),
)
ResNetBase.network_initialization(self, field_ch2, out_channels, D)
def forward(self, x: ME.TensorField):
otensor = self.field_network(x)
otensor2 = self.field_network2(otensor.cat_slice(x))
return ResNetBase.forward(self, otensor2)
class ResFieldNet14(ResFieldNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResFieldNet18(ResFieldNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResFieldNet34(ResFieldNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResFieldNet50(ResFieldNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResFieldNet101(ResFieldNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
if __name__ == "__main__":
# loss and network
voxel_size = 0.02
N_labels = 10
criterion = nn.CrossEntropyLoss()
net = ResNet14(in_channels=3, out_channels=N_labels, D=3)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
coords, colors, pcd = load_file("1.ply")
coords = torch.from_numpy(coords)
# Get new data
coordinates = ME.utils.batched_coordinates(
[coords / voxel_size, coords / 2 / voxel_size, coords / 4 / voxel_size],
dtype=torch.float32,
)
features = torch.rand((len(coordinates), 3), device=device)
for i in range(10):
optimizer.zero_grad()
input = ME.SparseTensor(features, coordinates, device=device)
dummy_label = torch.randint(0, N_labels, (3,), device=device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, dummy_label)
print("Iteration: ", i, ", Loss: ", loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), "test.pth")
net.load_state_dict(torch.load("test.pth"))
| MinkowskiEngine-master | examples/resnet.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from tests.python.common import data_loader
class ExampleNetwork(ME.MinkowskiNetwork):
def __init__(self, in_feat, out_feat, D):
super(ExampleNetwork, self).__init__(D)
self.net = nn.Sequential(
ME.MinkowskiConvolution(
in_channels=in_feat,
out_channels=64,
kernel_size=3,
stride=2,
dilation=1,
bias=False,
dimension=D), ME.MinkowskiBatchNorm(64), ME.MinkowskiReLU(),
ME.MinkowskiConvolution(
in_channels=64,
out_channels=128,
kernel_size=3,
stride=2,
dimension=D), ME.MinkowskiBatchNorm(128), ME.MinkowskiReLU(),
ME.MinkowskiGlobalPooling(),
ME.MinkowskiLinear(128, out_feat))
def forward(self, x):
return self.net(x)
if __name__ == '__main__':
# loss and network
criterion = nn.CrossEntropyLoss()
net = ExampleNetwork(in_feat=3, out_feat=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-1)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader()
input = ME.SparseTensor(feat, coords, device=device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| MinkowskiEngine-master | examples/example.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MF
from tests.python.common import data_loader
class StackUNet(ME.MinkowskiNetwork):
def __init__(self, in_nchannel, out_nchannel, D):
ME.MinkowskiNetwork.__init__(self, D)
channels = [in_nchannel, 16, 32]
self.net = nn.Sequential(
ME.MinkowskiStackSum(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=1,
dimension=D,
),
nn.Sequential(
ME.MinkowskiConvolution(
channels[0],
channels[1],
kernel_size=3,
stride=2,
dimension=D,
),
ME.MinkowskiStackSum(
nn.Identity(),
nn.Sequential(
ME.MinkowskiConvolution(
channels[1],
channels[2],
kernel_size=3,
stride=2,
dimension=D,
),
ME.MinkowskiConvolutionTranspose(
channels[2],
channels[1],
kernel_size=3,
stride=1,
dimension=D,
),
ME.MinkowskiPoolingTranspose(
kernel_size=2, stride=2, dimension=D
),
),
),
ME.MinkowskiPoolingTranspose(kernel_size=2, stride=2, dimension=D),
),
),
ME.MinkowskiToFeature(),
nn.Linear(channels[1], out_nchannel, bias=True),
)
def forward(self, x):
return self.net(x)
if __name__ == "__main__":
# loss and network
net = StackUNet(3, 5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
coords, feat, label = data_loader()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = net.to(device)
input = ME.SparseTensor(feat, coords, device=device)
# Forward
output = net(input)
| MinkowskiEngine-master | examples/stack_unet.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import random
import numpy as np
import glob
try:
import h5py
except:
print("Install h5py with `pip install h5py`")
import subprocess
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
def minkowski_collate_fn(list_data):
coordinates_batch, features_batch, labels_batch = ME.utils.sparse_collate(
[d["coordinates"] for d in list_data],
[d["features"] for d in list_data],
[d["label"] for d in list_data],
dtype=torch.float32,
)
return {
"coordinates": coordinates_batch,
"features": features_batch,
"labels": labels_batch,
}
def stack_collate_fn(list_data):
coordinates_batch, features_batch, labels_batch = (
torch.stack([d["coordinates"] for d in list_data]),
torch.stack([d["features"] for d in list_data]),
torch.cat([d["label"] for d in list_data]),
)
return {
"coordinates": coordinates_batch,
"features": features_batch,
"labels": labels_batch,
}
class PointNet(nn.Module):
def __init__(self, in_channel, out_channel, embedding_channel=1024):
super(PointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, embedding_channel, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(embedding_channel)
self.linear1 = nn.Linear(embedding_channel, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout()
self.linear2 = nn.Linear(512, out_channel, bias=True)
def forward(self, x: torch.Tensor):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.adaptive_max_pool1d(x, 1).squeeze()
x = F.relu(self.bn6(self.linear1(x)))
x = self.dp1(x)
x = self.linear2(x)
return x
# MinkowskiNet implementation of a pointnet.
#
# This network allows the number of points per batch to be arbitrary. For
# instance batch index 0 could have 500 points, batch index 1 could have 1000
# points.
class MinkowskiPointNet(ME.MinkowskiNetwork):
def __init__(self, in_channel, out_channel, embedding_channel=1024, dimension=3):
ME.MinkowskiNetwork.__init__(self, dimension)
self.conv1 = nn.Sequential(
ME.MinkowskiLinear(3, 64, bias=False),
ME.MinkowskiBatchNorm(64),
ME.MinkowskiReLU(),
)
self.conv2 = nn.Sequential(
ME.MinkowskiLinear(64, 64, bias=False),
ME.MinkowskiBatchNorm(64),
ME.MinkowskiReLU(),
)
self.conv3 = nn.Sequential(
ME.MinkowskiLinear(64, 64, bias=False),
ME.MinkowskiBatchNorm(64),
ME.MinkowskiReLU(),
)
self.conv4 = nn.Sequential(
ME.MinkowskiLinear(64, 128, bias=False),
ME.MinkowskiBatchNorm(128),
ME.MinkowskiReLU(),
)
self.conv5 = nn.Sequential(
ME.MinkowskiLinear(128, embedding_channel, bias=False),
ME.MinkowskiBatchNorm(embedding_channel),
ME.MinkowskiReLU(),
)
self.max_pool = ME.MinkowskiGlobalMaxPooling()
self.linear1 = nn.Sequential(
ME.MinkowskiLinear(embedding_channel, 512, bias=False),
ME.MinkowskiBatchNorm(512),
ME.MinkowskiReLU(),
)
self.dp1 = ME.MinkowskiDropout()
self.linear2 = ME.MinkowskiLinear(512, out_channel, bias=True)
def forward(self, x: ME.TensorField):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.max_pool(x)
x = self.linear1(x)
x = self.dp1(x)
return self.linear2(x).F
class CoordinateTransformation:
def __init__(self, scale_range=(0.9, 1.1), trans=0.25, jitter=0.025, clip=0.05):
self.scale_range = scale_range
self.trans = trans
self.jitter = jitter
self.clip = clip
def __call__(self, coords):
if random.random() < 0.9:
coords *= np.random.uniform(
low=self.scale_range[0], high=self.scale_range[1], size=[1, 3]
)
if random.random() < 0.9:
coords += np.random.uniform(low=-self.trans, high=self.trans, size=[1, 3])
if random.random() < 0.7:
coords += np.clip(
self.jitter * (np.random.rand(len(coords), 3) - 0.5),
-self.clip,
self.clip,
)
return coords
def __repr__(self):
return f"Transformation(scale={self.scale_range}, translation={self.trans}, jitter={self.jitter})"
def download_modelnet40_dataset():
if not os.path.exists("modelnet40_ply_hdf5_2048.zip"):
print("Downloading the 2k downsampled ModelNet40 dataset...")
subprocess.run(
[
"wget",
"--no-check-certificate",
"https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip",
]
)
subprocess.run(["unzip", "modelnet40_ply_hdf5_2048.zip"])
class ModelNet40H5(Dataset):
def __init__(
self,
phase: str,
data_root: str = "modelnet40h5",
transform=None,
num_points=2048,
):
Dataset.__init__(self)
download_modelnet40_dataset()
phase = "test" if phase in ["val", "test"] else "train"
self.data, self.label = self.load_data(data_root, phase)
self.transform = transform
self.phase = phase
self.num_points = num_points
def load_data(self, data_root, phase):
data, labels = [], []
assert os.path.exists(data_root), f"{data_root} does not exist"
files = glob.glob(os.path.join(data_root, "ply_data_%s*.h5" % phase))
assert len(files) > 0, "No files found"
for h5_name in files:
with h5py.File(h5_name) as f:
data.extend(f["data"][:].astype("float32"))
labels.extend(f["label"][:].astype("int64"))
data = np.stack(data, axis=0)
labels = np.stack(labels, axis=0)
return data, labels
def __getitem__(self, i: int) -> dict:
xyz = self.data[i]
if self.phase == "train":
np.random.shuffle(xyz)
if len(xyz) > self.num_points:
xyz = xyz[: self.num_points]
if self.transform is not None:
xyz = self.transform(xyz)
label = self.label[i]
xyz = torch.from_numpy(xyz)
label = torch.from_numpy(label)
return {
"coordinates": xyz.to(torch.float32),
"features": xyz.to(torch.float32),
"label": label,
}
def __len__(self):
return self.data.shape[0]
def __repr__(self):
return f"ModelNet40H5(phase={self.phase}, length={len(self)}, transform={self.transform})"
if __name__ == "__main__":
dataset = ModelNet40H5(phase="train", data_root="modelnet40_ply_hdf5_2048")
# Use stack_collate_fn for pointnet
pointnet_data_loader = DataLoader(
dataset, num_workers=4, collate_fn=stack_collate_fn, batch_size=16,
)
# Use minkowski_collate_fn for pointnet
minknet_data_loader = DataLoader(
dataset, num_workers=4, collate_fn=minkowski_collate_fn, batch_size=16,
)
# Network
pointnet = PointNet(in_channel=3, out_channel=20, embedding_channel=1024)
minkpointnet = MinkowskiPointNet(
in_channel=3, out_channel=20, embedding_channel=1024, dimension=3
)
for i, (pointnet_batch, minknet_batch) in enumerate(
zip(pointnet_data_loader, minknet_data_loader)
):
# PointNet.
# WARNING: PointNet inputs must have the same number of points.
pointnet_input = pointnet_batch["coordinates"].permute(0, 2, 1)
pred = pointnet(pointnet_input)
# MinkNet
# Unlike pointnet, number of points for each point cloud do not need to be the same.
minknet_input = ME.TensorField(
coordinates=minknet_batch["coordinates"], features=minknet_batch["features"]
)
minkpointnet(minknet_input)
print(f"Processed batch {i}")
| MinkowskiEngine-master | examples/pointnet.py |
#!/usr/bin/env python
"""
File Name : MinkowskiEngine-multigpu_ddp
date : 16/12/2019
Author : wenbo
Email : [email protected]
Description :
_ _
( |---/ )
) . . (
________________________,--._(___Y___)_,--._______________________
`--' `--'
"""
import os
import argparse
import numpy as np
from time import time
from urllib.request import urlretrieve
import open3d as o3d
import torch
import torch.nn as nn
from torch.optim import SGD
import torch.multiprocessing as mp
import torch.distributed as dist
import MinkowskiEngine as ME
from examples.minkunet import MinkUNet34C
if not os.path.isfile("weights.pth"):
urlretrieve("http://cvgl.stanford.edu/data2/minkowskiengine/1.ply", "1.ply")
parser = argparse.ArgumentParser()
parser.add_argument("--file_name", type=str, default="1.ply")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--max_ngpu", type=int, default=2)
cache = {}
min_time = np.inf
def load_file(file_name, voxel_size):
if file_name not in cache:
pcd = o3d.io.read_point_cloud(file_name)
cache[file_name] = pcd
pcd = cache[file_name]
quantized_coords, feats = ME.utils.sparse_quantize(
np.array(pcd.points, dtype=np.float32),
np.array(pcd.colors, dtype=np.float32),
quantization_size=voxel_size,
)
random_labels = torch.zeros(len(feats))
return quantized_coords, feats, random_labels
def main():
# loss and network
config = parser.parse_args()
num_devices = torch.cuda.device_count()
num_devices = min(config.max_ngpu, num_devices)
print(
"Testing ",
num_devices,
" GPUs. Total batch size: ",
num_devices * config.batch_size,
)
config.world_size = num_devices
mp.spawn(main_worker, nprocs=num_devices, args=(num_devices, config))
def main_worker(gpu, ngpus_per_node, args):
global min_time
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
args.rank = 0 * ngpus_per_node + gpu
dist.init_process_group(
backend="nccl",
init_method="tcp://127.0.0.1:23456",
world_size=args.world_size,
rank=args.rank,
)
# create model
model = MinkUNet34C(3, 20, D=3)
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
# Synchronized batch norm
net = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(model)
optimizer = SGD(net.parameters(), lr=1e-1)
for iteration in range(10):
optimizer.zero_grad()
# Get new data
# inputs, labels = [], []
batch = [load_file(args.file_name, 0.05) for _ in range(args.batch_size)]
coordinates_, featrues_, random_labels = list(zip(*batch))
coordinates, features = ME.utils.sparse_collate(coordinates_, featrues_)
inputs = ME.SparseTensor(features, coordinates, device=args.gpu)
labels = torch.cat(random_labels).long().to(args.gpu)
# The raw version of the parallel_apply
st = time()
outputs = net(inputs)
# Extract features from the sparse tensors to use a pytorch criterion
out_features = outputs.F
loss = criterion(out_features, labels)
# Gradient
loss.backward()
optimizer.step()
t = torch.tensor(time() - st, dtype=torch.float).cuda(args.gpu)
dist.all_reduce(t)
min_time = min(t.detach().cpu().numpy() / ngpus_per_node, min_time)
print(
f"Iteration: {iteration}, Loss: {loss.item()}, Time: {t.detach().item()}, Min time: {min_time}"
)
# Must clear cache at regular interval
if iteration % 10 == 0:
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
| MinkowskiEngine-master | examples/multigpu_ddp.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import argparse
import numpy as np
from time import time
from urllib.request import urlretrieve
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d-python with `pip install open3d`.")
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from examples.minkunet import MinkUNet34C
import torch.nn.parallel as parallel
if not os.path.isfile("weights.pth"):
urlretrieve("http://cvgl.stanford.edu/data2/minkowskiengine/1.ply", "1.ply")
parser = argparse.ArgumentParser()
parser.add_argument("--file_name", type=str, default="1.ply")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--max_ngpu", type=int, default=2)
cache = {}
def load_file(file_name, voxel_size):
if file_name not in cache:
pcd = o3d.io.read_point_cloud(file_name)
cache[file_name] = pcd
pcd = cache[file_name]
quantized_coords, feats = ME.utils.sparse_quantize(
np.array(pcd.points, dtype=np.float32),
np.array(pcd.colors, dtype=np.float32),
quantization_size=voxel_size,
)
random_labels = torch.zeros(len(feats))
return quantized_coords, feats, random_labels
def generate_input(file_name, voxel_size):
# Create a batch, this process is done in a data loader during training in parallel.
batch = [load_file(file_name, voxel_size)]
coordinates_, featrues_, labels_ = list(zip(*batch))
coordinates, features, labels = ME.utils.sparse_collate(
coordinates_, featrues_, labels_
)
# Normalize features and create a sparse tensor
return coordinates, (features - 0.5).float(), labels
if __name__ == "__main__":
# loss and network
config = parser.parse_args()
num_devices = torch.cuda.device_count()
num_devices = min(config.max_ngpu, num_devices)
devices = list(range(num_devices))
print("''''''''''''''''''''''''''''''''''''''''''''''''''''''''''")
print("' WARNING: This example is deprecated. '")
print("' Please use DistributedDataParallel or pytorch-lightning'")
print("''''''''''''''''''''''''''''''''''''''''''''''''''''''''''")
print(
f"Testing {num_devices} GPUs. Total batch size: {num_devices * config.batch_size}"
)
# For copying the final loss back to one GPU
target_device = devices[0]
# Copy the network to GPU
net = MinkUNet34C(3, 20, D=3)
net = net.to(target_device)
# Synchronized batch norm
net = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(net)
optimizer = SGD(net.parameters(), lr=1e-1)
# Copy the loss layer
criterion = nn.CrossEntropyLoss()
criterions = parallel.replicate(criterion, devices)
min_time = np.inf
for iteration in range(10):
optimizer.zero_grad()
# Get new data
inputs, all_labels = [], []
for i in range(num_devices):
coordinates, features, labels = generate_input(config.file_name, 0.05)
with torch.cuda.device(devices[i]):
inputs.append(ME.SparseTensor(features, coordinates, device=devices[i]))
all_labels.append(labels.long().to(devices[i]))
# The raw version of the parallel_apply
st = time()
replicas = parallel.replicate(net, devices)
outputs = parallel.parallel_apply(replicas, inputs, devices=devices)
# Extract features from the sparse tensors to use a pytorch criterion
out_features = [output.F for output in outputs]
losses = parallel.parallel_apply(
criterions, tuple(zip(out_features, all_labels)), devices=devices
)
loss = parallel.gather(losses, target_device, dim=0).mean()
# Gradient
loss.backward()
optimizer.step()
t = time() - st
min_time = min(t, min_time)
print(
f"Iteration: {iteration}, Loss: {loss.item()}, Time: {t}, Min time: {min_time}"
)
# Must clear cache at regular interval
if iteration % 10 == 0:
torch.cuda.empty_cache()
| MinkowskiEngine-master | examples/multigpu.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import MinkowskiEngine as ME
data_batch_0 = [
[0, 0, 2.1, 0, 0], #
[0, 1, 1.4, 3, 0], #
[0, 0, 4.0, 0, 0]
]
data_batch_1 = [
[1, 0, 0], #
[0, 2, 0], #
[0, 0, 3]
]
def to_sparse_coo(data):
# An intuitive way to extract coordinates and features
coords, feats = [], []
for i, row in enumerate(data):
for j, val in enumerate(row):
if val != 0:
coords.append([i, j])
feats.append([val])
return torch.IntTensor(coords), torch.FloatTensor(feats)
def sparse_tensor_initialization():
coords, feats = to_sparse_coo(data_batch_0)
# collate sparse tensor data to augment batch indices
# Note that it is wrapped inside a list!!
coords, feats = ME.utils.sparse_collate(coords=[coords], feats=[feats])
sparse_tensor = ME.SparseTensor(coordinates=coords, features=feats)
def sparse_tensor_arithmetics():
coords0, feats0 = to_sparse_coo(data_batch_0)
coords0, feats0 = ME.utils.sparse_collate(coords=[coords0], feats=[feats0])
coords1, feats1 = to_sparse_coo(data_batch_1)
coords1, feats1 = ME.utils.sparse_collate(coords=[coords1], feats=[feats1])
# sparse tensors
A = ME.SparseTensor(coordinates=coords0, features=feats0)
B = ME.SparseTensor(coordinates=coords1, features=feats1)
# The following fails
try:
C = A + B
except AssertionError:
pass
B = ME.SparseTensor(
coordinates=coords1,
features=feats1,
coordinate_manager=A.coordinate_manager # must share the same coordinate manager
)
C = A + B
C = A - B
C = A * B
C = A / B
# in place operations
# Note that it requires the same coords_key (no need to feed coords)
D = ME.SparseTensor(
# coords=coords, not required
features=feats0,
coordinate_manager=A.coordinate_manager, # must share the same coordinate manager
coordinate_map_key=A.coordinate_map_key # For inplace, must share the same coords key
)
A += D
A -= D
A *= D
A /= D
# If you have two or more sparse tensors with the same coords_key, you can concatenate features
E = ME.cat(A, D)
def operation_mode():
# Set to share the coordinate_manager by default
ME.set_sparse_tensor_operation_mode(
ME.SparseTensorOperationMode.SHARE_COORDINATE_MANAGER)
print(ME.sparse_tensor_operation_mode())
coords0, feats0 = to_sparse_coo(data_batch_0)
coords0, feats0 = ME.utils.sparse_collate(coords=[coords0], feats=[feats0])
coords1, feats1 = to_sparse_coo(data_batch_1)
coords1, feats1 = ME.utils.sparse_collate(coords=[coords1], feats=[feats1])
for _ in range(2):
# sparse tensors
A = ME.SparseTensor(coordinates=coords0, features=feats0)
B = ME.SparseTensor(
coordinates=coords1,
features=feats1,
# coords_manager=A.coordinate_manager, No need to feed the coordinate_manager
)
C = A + B
# When done using it for forward and backward, you must cleanup the coords man
ME.clear_global_coordinate_manager()
def decomposition():
coords0, feats0 = to_sparse_coo(data_batch_0)
coords1, feats1 = to_sparse_coo(data_batch_1)
coords, feats = ME.utils.sparse_collate(
coords=[coords0, coords1], feats=[feats0, feats1])
# sparse tensors
A = ME.SparseTensor(coordinates=coords, features=feats)
conv = ME.MinkowskiConvolution(
in_channels=1, out_channels=2, kernel_size=3, stride=2, dimension=2)
B = conv(A)
# Extract features and coordinates per batch index
list_of_coords = B.decomposed_coordinates
list_of_feats = B.decomposed_features
list_of_coords, list_of_feats = B.decomposed_coordinates_and_features
# To specify a batch index
batch_index = 1
coords = B.coordinates_at(batch_index)
feats = B.features_at(batch_index)
# Empty list if given an invalid batch index
batch_index = 3
print(B.coordinates_at(batch_index))
if __name__ == '__main__':
sparse_tensor_initialization()
sparse_tensor_arithmetics()
operation_mode()
decomposition()
| MinkowskiEngine-master | examples/sparse_tensor_basic.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import sys
import subprocess
import argparse
import logging
import glob
import numpy as np
from time import time
import urllib
# Must be imported before large libs
try:
import open3d as o3d
except ImportError:
raise ImportError("Please install open3d with `pip install open3d`.")
import torch
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import MinkowskiEngine as ME
from examples.reconstruction import InfSampler, resample_mesh
M = np.array(
[
[0.80656762, -0.5868724, -0.07091862],
[0.3770505, 0.418344, 0.82632997],
[-0.45528188, -0.6932309, 0.55870326],
]
)
if not os.path.exists("ModelNet40"):
logging.info("Downloading the fixed ModelNet40 dataset...")
subprocess.run(["sh", "./examples/download_modelnet40.sh"])
###############################################################################
# Utility functions
###############################################################################
def PointCloud(points, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def collate_pointcloud_fn(list_data):
coords, feats, labels = list(zip(*list_data))
# Concatenate all lists
return {
"coords": ME.utils.batched_coordinates(coords),
"xyzs": [torch.from_numpy(feat).float() for feat in feats],
"labels": torch.LongTensor(labels),
}
class ModelNet40Dataset(torch.utils.data.Dataset):
def __init__(self, phase, transform=None, config=None):
self.phase = phase
self.files = []
self.cache = {}
self.data_objects = []
self.transform = transform
self.resolution = config.resolution
self.last_cache_percent = 0
self.root = "./ModelNet40"
fnames = glob.glob(os.path.join(self.root, f"chair/{phase}/*.off"))
fnames = sorted([os.path.relpath(fname, self.root) for fname in fnames])
self.files = fnames
assert len(self.files) > 0, "No file loaded"
logging.info(
f"Loading the subset {phase} from {self.root} with {len(self.files)} files"
)
self.density = 30000
# Ignore warnings in obj loader
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Error)
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
mesh_file = os.path.join(self.root, self.files[idx])
if idx in self.cache:
xyz = self.cache[idx]
else:
# Load a mesh, over sample, copy, rotate, voxelization
assert os.path.exists(mesh_file)
pcd = o3d.io.read_triangle_mesh(mesh_file)
# Normalize to fit the mesh inside a unit cube while preserving aspect ratio
vertices = np.asarray(pcd.vertices)
vmax = vertices.max(0, keepdims=True)
vmin = vertices.min(0, keepdims=True)
pcd.vertices = o3d.utility.Vector3dVector(
(vertices - vmin) / (vmax - vmin).max()
)
# Oversample points and copy
xyz = resample_mesh(pcd, density=self.density)
self.cache[idx] = xyz
cache_percent = int((len(self.cache) / len(self)) * 100)
if (
cache_percent > 0
and cache_percent % 10 == 0
and cache_percent != self.last_cache_percent
):
logging.info(
f"Cached {self.phase}: {len(self.cache)} / {len(self)}: {cache_percent}%"
)
self.last_cache_percent = cache_percent
# Use color or other features if available
feats = np.ones((len(xyz), 1))
if len(xyz) < 1000:
logging.info(
f"Skipping {mesh_file}: does not have sufficient CAD sampling density after resampling: {len(xyz)}."
)
return None
if self.transform:
xyz, feats = self.transform(xyz, feats)
# Get coords
xyz = xyz * self.resolution
coords = np.floor(xyz)
inds = ME.utils.sparse_quantize(
coords, return_index=True, return_maps_only=True
)
return (coords[inds], xyz[inds], idx)
def make_data_loader(
phase, augment_data, batch_size, shuffle, num_workers, repeat, config
):
dset = ModelNet40Dataset(phase, config=config)
args = {
"batch_size": batch_size,
"num_workers": num_workers,
"collate_fn": collate_pointcloud_fn,
"pin_memory": False,
"drop_last": False,
}
if repeat:
args["sampler"] = InfSampler(dset, shuffle)
else:
args["shuffle"] = shuffle
loader = torch.utils.data.DataLoader(dset, **args)
return loader
ch = logging.StreamHandler(sys.stdout)
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(
format=os.uname()[1].split(".")[0] + " %(asctime)s %(message)s",
datefmt="%m/%d %H:%M:%S",
handlers=[ch],
)
parser = argparse.ArgumentParser()
parser.add_argument("--resolution", type=int, default=128)
parser.add_argument("--max_iter", type=int, default=30000)
parser.add_argument("--val_freq", type=int, default=1000)
parser.add_argument("--batch_size", default=16, type=int)
parser.add_argument("--lr", default=1e-2, type=float)
parser.add_argument("--momentum", type=float, default=0.9)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--num_workers", type=int, default=1)
parser.add_argument("--stat_freq", type=int, default=50)
parser.add_argument("--weights", type=str, default="modelnet_vae.pth")
parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--load_optimizer", type=str, default="true")
parser.add_argument("--train", action="store_true")
parser.add_argument("--max_visualization", type=int, default=4)
###############################################################################
# End of utility functions
###############################################################################
class Encoder(nn.Module):
CHANNELS = [16, 32, 64, 128, 256, 512, 1024]
def __init__(self):
nn.Module.__init__(self)
# Input sparse tensor must have tensor stride 128.
ch = self.CHANNELS
# Block 1
self.block1 = nn.Sequential(
ME.MinkowskiConvolution(1, ch[0], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[0], ch[0], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
)
self.block2 = nn.Sequential(
ME.MinkowskiConvolution(ch[0], ch[1], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[1], ch[1], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
)
self.block3 = nn.Sequential(
ME.MinkowskiConvolution(ch[1], ch[2], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[2], ch[2], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
)
self.block4 = nn.Sequential(
ME.MinkowskiConvolution(ch[2], ch[3], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[3], ch[3], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
)
self.block5 = nn.Sequential(
ME.MinkowskiConvolution(ch[3], ch[4], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[4], ch[4], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
)
self.block6 = nn.Sequential(
ME.MinkowskiConvolution(ch[4], ch[5], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[5], ch[5], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
)
self.block7 = nn.Sequential(
ME.MinkowskiConvolution(ch[5], ch[6], kernel_size=3, stride=2, dimension=3),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[6], ch[6], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
)
self.global_pool = ME.MinkowskiGlobalPooling()
self.linear_mean = ME.MinkowskiLinear(ch[6], ch[6], bias=True)
self.linear_log_var = ME.MinkowskiLinear(ch[6], ch[6], bias=True)
self.weight_initialization()
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode="fan_out", nonlinearity="relu")
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def forward(self, sinput):
out = self.block1(sinput)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = self.block6(out)
out = self.block7(out)
out = self.global_pool(out)
mean = self.linear_mean(out)
log_var = self.linear_log_var(out)
return mean, log_var
class Decoder(nn.Module):
CHANNELS = [1024, 512, 256, 128, 64, 32, 16]
resolution = 128
def __init__(self):
nn.Module.__init__(self)
# Input sparse tensor must have tensor stride 128.
ch = self.CHANNELS
# Block 1
self.block1 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[0], ch[0], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[0], ch[0], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[0]),
ME.MinkowskiELU(),
ME.MinkowskiGenerativeConvolutionTranspose(
ch[0], ch[1], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[1], ch[1], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[1]),
ME.MinkowskiELU(),
)
self.block1_cls = ME.MinkowskiConvolution(
ch[1], 1, kernel_size=1, bias=True, dimension=3
)
# Block 2
self.block2 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[1], ch[2], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[2], ch[2], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[2]),
ME.MinkowskiELU(),
)
self.block2_cls = ME.MinkowskiConvolution(
ch[2], 1, kernel_size=1, bias=True, dimension=3
)
# Block 3
self.block3 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[2], ch[3], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[3], ch[3], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[3]),
ME.MinkowskiELU(),
)
self.block3_cls = ME.MinkowskiConvolution(
ch[3], 1, kernel_size=1, bias=True, dimension=3
)
# Block 4
self.block4 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[3], ch[4], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[4], ch[4], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[4]),
ME.MinkowskiELU(),
)
self.block4_cls = ME.MinkowskiConvolution(
ch[4], 1, kernel_size=1, bias=True, dimension=3
)
# Block 5
self.block5 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[4], ch[5], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[5], ch[5], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[5]),
ME.MinkowskiELU(),
)
self.block5_cls = ME.MinkowskiConvolution(
ch[5], 1, kernel_size=1, bias=True, dimension=3
)
# Block 6
self.block6 = nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
ch[5], ch[6], kernel_size=2, stride=2, dimension=3
),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(ch[6], ch[6], kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(ch[6]),
ME.MinkowskiELU(),
)
self.block6_cls = ME.MinkowskiConvolution(
ch[6], 1, kernel_size=1, bias=True, dimension=3
)
# pruning
self.pruning = ME.MinkowskiPruning()
def get_batch_indices(self, out):
return out.coords_man.get_row_indices_per_batch(out.coords_key)
@torch.no_grad()
def get_target(self, out, target_key, kernel_size=1):
target = torch.zeros(len(out), dtype=torch.bool, device=out.device)
cm = out.coordinate_manager
strided_target_key = cm.stride(target_key, out.tensor_stride[0])
kernel_map = cm.kernel_map(
out.coordinate_map_key,
strided_target_key,
kernel_size=kernel_size,
region_type=1,
)
for k, curr_in in kernel_map.items():
target[curr_in[0].long()] = 1
return target
def valid_batch_map(self, batch_map):
for b in batch_map:
if len(b) == 0:
return False
return True
def forward(self, z_glob, target_key):
out_cls, targets = [], []
z = ME.SparseTensor(
features=z_glob.F,
coordinates=z_glob.C,
tensor_stride=self.resolution,
coordinate_manager=z_glob.coordinate_manager,
)
# Block1
out1 = self.block1(z)
out1_cls = self.block1_cls(out1)
target = self.get_target(out1, target_key)
targets.append(target)
out_cls.append(out1_cls)
keep1 = (out1_cls.F > 0).squeeze()
# If training, force target shape generation, use net.eval() to disable
if self.training:
keep1 += target
# Remove voxels 32
out1 = self.pruning(out1, keep1)
# Block 2
out2 = self.block2(out1)
out2_cls = self.block2_cls(out2)
target = self.get_target(out2, target_key)
targets.append(target)
out_cls.append(out2_cls)
keep2 = (out2_cls.F > 0).squeeze()
if self.training:
keep2 += target
# Remove voxels 16
out2 = self.pruning(out2, keep2)
# Block 3
out3 = self.block3(out2)
out3_cls = self.block3_cls(out3)
target = self.get_target(out3, target_key)
targets.append(target)
out_cls.append(out3_cls)
keep3 = (out3_cls.F > 0).squeeze()
if self.training:
keep3 += target
# Remove voxels 8
out3 = self.pruning(out3, keep3)
# Block 4
out4 = self.block4(out3)
out4_cls = self.block4_cls(out4)
target = self.get_target(out4, target_key)
targets.append(target)
out_cls.append(out4_cls)
keep4 = (out4_cls.F > 0).squeeze()
if self.training:
keep4 += target
# Remove voxels 4
out4 = self.pruning(out4, keep4)
# Block 5
out5 = self.block5(out4)
out5_cls = self.block5_cls(out5)
target = self.get_target(out5, target_key)
targets.append(target)
out_cls.append(out5_cls)
keep5 = (out5_cls.F > 0).squeeze()
if self.training:
keep5 += target
# Remove voxels 2
out5 = self.pruning(out5, keep5)
# Block 5
out6 = self.block6(out5)
out6_cls = self.block6_cls(out6)
target = self.get_target(out6, target_key)
targets.append(target)
out_cls.append(out6_cls)
keep6 = (out6_cls.F > 0).squeeze()
# Last layer does not require keep
# if self.training:
# keep6 += target
# Remove voxels 1
if keep6.sum() > 0:
out6 = self.pruning(out6, keep6)
return out_cls, targets, out6
class VAE(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.encoder = Encoder()
self.decoder = Decoder()
def forward(self, sinput, gt_target):
means, log_vars = self.encoder(sinput)
zs = means
if self.training:
zs = zs + torch.exp(0.5 * log_vars.F) * torch.randn_like(log_vars.F)
out_cls, targets, sout = self.decoder(zs, gt_target)
return out_cls, targets, sout, means, log_vars, zs
def train(net, dataloader, device, config):
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay,
)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, 0.95)
crit = nn.BCEWithLogitsLoss()
start_iter = 0
if config.resume is not None:
checkpoint = torch.load(config.resume)
print("Resuming weights")
net.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
start_iter = checkpoint["curr_iter"]
net.train()
train_iter = iter(dataloader)
# val_iter = iter(val_dataloader)
logging.info(f"LR: {scheduler.get_lr()}")
for i in range(start_iter, config.max_iter):
s = time()
data_dict = train_iter.next()
d = time() - s
optimizer.zero_grad()
sin = ME.SparseTensor(
features=torch.ones(len(data_dict["coords"]), 1),
coordinates=data_dict["coords"].int(),
device=device,
)
# Generate target sparse tensor
target_key = sin.coordinate_map_key
out_cls, targets, sout, means, log_vars, zs = net(sin, target_key)
num_layers, BCE = len(out_cls), 0
losses = []
for out_cl, target in zip(out_cls, targets):
curr_loss = crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
losses.append(curr_loss.item())
BCE += curr_loss / num_layers
KLD = -0.5 * torch.mean(
torch.mean(1 + log_vars.F - means.F.pow(2) - log_vars.F.exp(), 1)
)
loss = KLD + BCE
loss.backward()
optimizer.step()
t = time() - s
if i % config.stat_freq == 0:
logging.info(
f"Iter: {i}, Loss: {loss.item():.3e}, Depths: {len(out_cls)} Data Loading Time: {d:.3e}, Tot Time: {t:.3e}"
)
if i % config.val_freq == 0 and i > 0:
torch.save(
{
"state_dict": net.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"curr_iter": i,
},
config.weights,
)
scheduler.step()
logging.info(f"LR: {scheduler.get_lr()}")
net.train()
def visualize(net, dataloader, device, config):
net.eval()
crit = nn.BCEWithLogitsLoss()
n_vis = 0
for data_dict in dataloader:
sin = ME.SparseTensor(
torch.ones(len(data_dict["coords"]), 1),
data_dict["coords"].int(),
device=device,
)
# Generate target sparse tensor
target_key = sin.coords_key
out_cls, targets, sout, means, log_vars, zs = net(sin, target_key)
num_layers, BCE = len(out_cls), 0
losses = []
for out_cl, target in zip(out_cls, targets):
curr_loss = crit(out_cl.F.squeeze(), target.type(out_cl.F.dtype).to(device))
losses.append(curr_loss.item())
BCE += curr_loss / num_layers
KLD = -0.5 * torch.mean(
torch.sum(1 + log_vars.F - means.F.pow(2) - log_vars.F.exp(), 1)
)
loss = KLD + BCE
print(loss)
batch_coords, batch_feats = sout.decomposed_coordinates_and_features
for b, (coords, feats) in enumerate(zip(batch_coords, batch_feats)):
pcd = PointCloud(coords)
pcd.estimate_normals()
pcd.translate([0.6 * config.resolution, 0, 0])
pcd.rotate(M)
opcd = PointCloud(data_dict["xyzs"][b])
opcd.translate([-0.6 * config.resolution, 0, 0])
opcd.estimate_normals()
opcd.rotate(M)
o3d.visualization.draw_geometries([pcd, opcd])
n_vis += 1
if n_vis > config.max_visualization:
return
if __name__ == "__main__":
config = parser.parse_args()
logging.info(config)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net = VAE()
net.to(device)
logging.info(net)
if config.train:
dataloader = make_data_loader(
"train",
augment_data=True,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
repeat=True,
config=config,
)
train(net, dataloader, device, config)
else:
if not os.path.exists(config.weights):
logging.info(f"Downloaing pretrained weights. This might take a while...")
urllib.request.urlretrieve(
"https://bit.ly/39TvWys", filename=config.weights
)
logging.info(f"Loading weights from {config.weights}")
checkpoint = torch.load(config.weights)
net.load_state_dict(checkpoint["state_dict"])
dataloader = make_data_loader(
"test",
augment_data=True,
batch_size=config.batch_size,
shuffle=True,
num_workers=config.num_workers,
repeat=True,
config=config,
)
with torch.no_grad():
visualize(net, dataloader, device, config)
| MinkowskiEngine-master | examples/vae.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
#
# ############################################################################
# Example training to demonstrate usage of MinkowskiEngine with torch dataset
# and dataloader classes.
#
# $ python -m examples.training
# Epoch: 0 iter: 1, Loss: 0.7992178201675415
# Epoch: 0 iter: 10, Loss: 0.5555745628145006
# Epoch: 0 iter: 20, Loss: 0.4025680094957352
# Epoch: 0 iter: 30, Loss: 0.3157463788986206
# Epoch: 0 iter: 40, Loss: 0.27348957359790804
# Epoch: 0 iter: 50, Loss: 0.2690591633319855
# Epoch: 0 iter: 60, Loss: 0.258208692073822
# Epoch: 0 iter: 70, Loss: 0.34842072874307634
# Epoch: 0 iter: 80, Loss: 0.27565130293369294
# Epoch: 0 iter: 90, Loss: 0.2860450878739357
# Epoch: 0 iter: 100, Loss: 0.24737665355205535
# Epoch: 1 iter: 110, Loss: 0.2428090125322342
# Epoch: 1 iter: 120, Loss: 0.25397603064775465
# Epoch: 1 iter: 130, Loss: 0.23624965399503708
# Epoch: 1 iter: 140, Loss: 0.2247777447104454
# Epoch: 1 iter: 150, Loss: 0.22956613600254058
# Epoch: 1 iter: 160, Loss: 0.22803852707147598
# Epoch: 1 iter: 170, Loss: 0.24081039279699326
# Epoch: 1 iter: 180, Loss: 0.22322929948568343
# Epoch: 1 iter: 190, Loss: 0.22531934976577758
# Epoch: 1 iter: 200, Loss: 0.2116936132311821
#
# ############################################################################
import argparse
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import MinkowskiEngine as ME
from examples.unet import UNet
def plot(C, L):
import matplotlib.pyplot as plt
mask = L == 0
cC = C[mask].t().numpy()
plt.scatter(cC[0], cC[1], c='r', s=0.1)
mask = L == 1
cC = C[mask].t().numpy()
plt.scatter(cC[0], cC[1], c='b', s=0.1)
plt.show()
class RandomLineDataset(Dataset):
# Warning: read using mutable obects for default input arguments in python.
def __init__(
self,
angle_range_rad=[-np.pi, np.pi],
line_params=[
-1, # Start
1, # end
],
is_linear_noise=True,
dataset_size=100,
num_samples=10000,
quantization_size=0.005):
self.angle_range_rad = angle_range_rad
self.is_linear_noise = is_linear_noise
self.line_params = line_params
self.dataset_size = dataset_size
self.rng = np.random.RandomState(0)
self.num_samples = num_samples
self.num_data = int(0.2 * num_samples)
self.num_noise = num_samples - self.num_data
self.quantization_size = quantization_size
def __len__(self):
return self.dataset_size
def _uniform_to_angle(self, u):
return (self.angle_range_rad[1] -
self.angle_range_rad[0]) * u + self.angle_range_rad[0]
def _sample_noise(self, num, noise_params):
noise = noise_params[0] + self.rng.randn(num, 1) * noise_params[1]
return noise
def _sample_xs(self, num):
"""Return random numbers between line_params[0], line_params[1]"""
return (self.line_params[1] - self.line_params[0]) * self.rng.rand(
num, 1) + self.line_params[0]
def __getitem__(self, i):
# Regardless of the input index, return randomized data
angle, intercept = np.tan(self._uniform_to_angle(
self.rng.rand())), self.rng.rand()
# Line as x = cos(theta) * t, y = sin(theta) * t + intercept and random t's
# Drop some samples
xs_data = self._sample_xs(self.num_data)
ys_data = angle * xs_data + intercept + self._sample_noise(
self.num_data, [0, 0.1])
noise = 4 * (self.rng.rand(self.num_noise, 2) - 0.5)
# Concatenate data
input = np.vstack([np.hstack([xs_data, ys_data]), noise])
feats = input
labels = np.vstack(
[np.ones((self.num_data, 1)),
np.zeros((self.num_noise, 1))]).astype(np.int32)
# Quantize the input
discrete_coords, unique_feats, unique_labels = ME.utils.sparse_quantize(
coordinates=input,
features=feats,
labels=labels,
quantization_size=self.quantization_size,
ignore_label=-100)
return discrete_coords, unique_feats, unique_labels
def collation_fn(data_labels):
coords, feats, labels = list(zip(*data_labels))
coords_batch, feats_batch, labels_batch = [], [], []
# Generate batched coordinates
coords_batch = ME.utils.batched_coordinates(coords)
# Concatenate all lists
feats_batch = torch.from_numpy(np.concatenate(feats, 0)).float()
labels_batch = torch.from_numpy(np.concatenate(labels, 0))
return coords_batch, feats_batch, labels_batch
def main(config):
# Binary classification
net = UNet(
2, # in nchannel
2, # out_nchannel
D=2)
optimizer = optim.SGD(
net.parameters(),
lr=config.lr,
momentum=config.momentum,
weight_decay=config.weight_decay)
criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
# Dataset, data loader
train_dataset = RandomLineDataset()
train_dataloader = DataLoader(
train_dataset,
batch_size=config.batch_size,
# 1) collate_fn=collation_fn,
# 2) collate_fn=ME.utils.batch_sparse_collate,
# 3) collate_fn=ME.utils.SparseCollation(),
collate_fn=ME.utils.batch_sparse_collate,
num_workers=1)
accum_loss, accum_iter, tot_iter = 0, 0, 0
for epoch in range(config.max_epochs):
train_iter = iter(train_dataloader)
# Training
net.train()
for i, data in enumerate(train_iter):
coords, feats, labels = data
out = net(ME.SparseTensor(feats.float(), coords))
optimizer.zero_grad()
loss = criterion(out.F.squeeze(), labels.long())
loss.backward()
optimizer.step()
accum_loss += loss.item()
accum_iter += 1
tot_iter += 1
if tot_iter % 10 == 0 or tot_iter == 1:
print(
f'Epoch: {epoch} iter: {tot_iter}, Loss: {accum_loss / accum_iter}'
)
accum_loss, accum_iter = 0, 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=12, type=int)
parser.add_argument('--max_epochs', default=10, type=int)
parser.add_argument('--lr', default=0.1, type=float)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=1e-4)
config = parser.parse_args()
main(config)
| MinkowskiEngine-master | examples/training.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import numpy as np
from collections.abc import Sequence
from typing import Union, List, Tuple
import torch
from MinkowskiCommon import convert_to_int_list, StrideType
from MinkowskiEngineBackend._C import (
GPUMemoryAllocatorType,
MinkowskiAlgorithm,
CoordinateMapKey,
CoordinateMapType,
)
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiTensor import (
SparseTensorOperationMode,
SparseTensorQuantizationMode,
Tensor,
sparse_tensor_operation_mode,
global_coordinate_manager,
set_global_coordinate_manager,
COORDINATE_MANAGER_DIFFERENT_ERROR,
COORDINATE_KEY_DIFFERENT_ERROR,
)
from MinkowskiSparseTensor import SparseTensor
from sparse_matrix_functions import MinkowskiSPMMFunction, MinkowskiSPMMAverageFunction
from MinkowskiPooling import MinkowskiDirectMaxPoolingFunction
def create_splat_coordinates(coordinates: torch.Tensor) -> torch.Tensor:
r"""Create splat coordinates. splat coordinates could have duplicate coordinates."""
dimension = coordinates.shape[1] - 1
region_offset = [
[
0,
]
* (dimension + 1)
]
for d in reversed(range(1, dimension + 1)):
new_offset = []
for offset in region_offset:
offset = offset.copy() # Do not modify the original
offset[d] = 1
new_offset.append(offset)
region_offset.extend(new_offset)
region_offset = torch.IntTensor(region_offset).to(coordinates.device)
coordinates = torch.floor(coordinates).int().unsqueeze(1) + region_offset.unsqueeze(
0
)
return coordinates.reshape(-1, dimension + 1)
class TensorField(Tensor):
def __init__(
self,
features: torch.Tensor,
coordinates: torch.Tensor = None,
# optional coordinate related arguments
tensor_stride: StrideType = 1,
coordinate_field_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
quantization_mode: SparseTensorQuantizationMode = SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
# optional manager related arguments
allocator_type: GPUMemoryAllocatorType = None,
minkowski_algorithm: MinkowskiAlgorithm = None,
requires_grad=None,
device=None,
):
r"""
Args:
:attr:`features` (:attr:`torch.FloatTensor`,
:attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
:attr:`torch.cuda.DoubleTensor`): The features of a sparse
tensor.
:attr:`coordinates` (:attr:`torch.IntTensor`): The coordinates
associated to the features. If not provided, :attr:`coordinate_map_key`
must be provided.
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
of the current sparse tensor. By default, it is 1.
:attr:`coordinate_field_map_key`
(:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
are already cached in the MinkowskiEngine, we could reuse the same
coordinate map by simply providing the coordinate map key. In most
case, this process is done automatically. When you provide a
`coordinate_field_map_key`, `coordinates` will be be ignored.
:attr:`coordinate_manager`
(:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
manages all coordinate maps using the `_C.CoordinateMapManager`. If
not provided, the MinkowskiEngine will create a new computation
graph. In most cases, this process is handled automatically and you
do not need to use this.
:attr:`quantization_mode`
(:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines how
continuous coordinates will be quantized to define a sparse tensor.
Please refer to :attr:`SparseTensorQuantizationMode` for details.
:attr:`allocator_type`
(:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
memory allocator type. By default, it uses the c10 allocator.
:attr:`minkowski_algorithm`
(:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
minkowski engine runs, Use
:attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
the memory footprint. Or use
:attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
run fasterat the cost of more memory.
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
:attr:`device` (:attr:`torch.device`): Set the device the sparse
tensor is defined.
"""
# Type checks
assert isinstance(features, torch.Tensor), "Features must be a torch.Tensor"
assert (
features.ndim == 2
), f"The feature should be a matrix, The input feature is an order-{features.ndim} tensor."
assert isinstance(quantization_mode, SparseTensorQuantizationMode)
assert quantization_mode in [
SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
SparseTensorQuantizationMode.UNWEIGHTED_SUM,
SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
SparseTensorQuantizationMode.MAX_POOL,
], "invalid quantization mode"
self.quantization_mode = quantization_mode
if coordinates is not None:
assert isinstance(coordinates, torch.Tensor)
if coordinate_field_map_key is not None:
assert isinstance(coordinate_field_map_key, CoordinateMapKey)
assert coordinate_manager is not None, "Must provide coordinate_manager if coordinate_field_map_key is provided"
assert coordinates is None, "Must not provide coordinates if coordinate_field_map_key is provided"
if coordinate_manager is not None:
assert isinstance(coordinate_manager, CoordinateManager)
if coordinates is None and (
coordinate_field_map_key is None or coordinate_manager is None
):
raise ValueError(
"Either coordinates or (coordinate_field_map_key, coordinate_manager) pair must be provided."
)
Tensor.__init__(self)
# To device
if device is not None:
features = features.to(device)
if coordinates is not None:
# assertion check for the map key done later
coordinates = coordinates.to(device)
self._D = (
coordinates.size(1) - 1 if coordinates is not None else coordinate_manager.D
)
##########################
# Setup CoordsManager
##########################
if coordinate_manager is None:
# If set to share the coords man, use the global coords man
if (
sparse_tensor_operation_mode()
== SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
):
coordinate_manager = global_coordinate_manager()
if coordinate_manager is None:
coordinate_manager = CoordinateManager(
D=self._D,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
set_global_coordinate_manager(coordinate_manager)
else:
coordinate_manager = CoordinateManager(
D=coordinates.size(1) - 1,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
self._manager = coordinate_manager
##########################
# Initialize coords
##########################
# Coordinate Management
if coordinates is not None:
assert (
features.shape[0] == coordinates.shape[0]
), "The number of rows in features and coordinates must match."
assert (
features.is_cuda == coordinates.is_cuda
), "Features and coordinates must have the same backend."
coordinate_field_map_key = CoordinateMapKey(
convert_to_int_list(tensor_stride, self._D), ""
)
coordinate_field_map_key = self._manager.insert_field(
coordinates.float(), convert_to_int_list(tensor_stride, self._D), ""
)
else:
assert (
coordinate_field_map_key.is_key_set()
), "The coordinate field map key must be valid."
if requires_grad is not None:
features.requires_grad_(requires_grad)
self._F = features
self._C = coordinates
self.coordinate_field_map_key = coordinate_field_map_key
self._batch_rows = None
self._inverse_mapping = {}
self._splat = {}
@property
def coordinate_key(self):
return self.coordinate_field_map_key
@property
def C(self):
r"""The alias of :attr:`coords`."""
return self.coordinates
@property
def coordinates(self):
r"""
The coordinates of the current sparse tensor. The coordinates are
represented as a :math:`N \times (D + 1)` dimensional matrix where
:math:`N` is the number of points in the space and :math:`D` is the
dimension of the space (e.g. 3 for 3D, 4 for 3D + Time). Additional
dimension of the column of the matrix C is for batch indices which is
internally treated as an additional spatial dimension to disassociate
different instances in a batch.
"""
if self._C is None:
self._C = self._get_coordinate_field()
return self._C
@property
def _batchwise_row_indices(self):
if self._batch_rows is None:
_, self._batch_rows = self._manager.origin_field_map(
self.coordinate_field_map_key
)
return self._batch_rows
def _get_coordinate_field(self):
return self._manager.get_coordinate_field(self.coordinate_field_map_key)
def sparse(
self,
tensor_stride: Union[int, Sequence, np.array] = 1,
coordinate_map_key: CoordinateMapKey = None,
quantization_mode: SparseTensorQuantizationMode = None,
):
r"""Converts the current sparse tensor field to a sparse tensor."""
if quantization_mode is None:
quantization_mode = self.quantization_mode
assert (
quantization_mode != SparseTensorQuantizationMode.SPLAT_LINEAR_INTERPOLATION
), "Please use .splat() for splat quantization."
if coordinate_map_key is None:
tensor_stride = convert_to_int_list(tensor_stride, self.D)
coordinate_map_key, (
unique_index,
inverse_mapping,
) = self._manager.field_to_sparse_insert_and_map(
self.coordinate_field_map_key,
tensor_stride,
)
N_rows = len(unique_index)
else:
# sparse index, field index
inverse_mapping, unique_index = self._manager.field_to_sparse_map(
self.coordinate_field_map_key,
coordinate_map_key,
)
N_rows = self._manager.size(coordinate_map_key)
assert N_rows > 0, f"Invalid out coordinate map key. Found {N_row} elements."
if len(inverse_mapping) == 0:
# When the input has the same shape as the output
self._inverse_mapping[coordinate_map_key] = torch.arange(
len(self._F),
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
return SparseTensor(
self._F,
coordinate_map_key=coordinate_map_key,
coordinate_manager=self._manager,
)
# Create features
if quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM:
N = len(self._F)
cols = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
vals = torch.ones(N, dtype=self._F.dtype, device=self._F.device)
size = torch.Size([N_rows, len(inverse_mapping)])
features = MinkowskiSPMMFunction().apply(
inverse_mapping, cols, vals, size, self._F
)
elif quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:
N = len(self._F)
cols = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
size = torch.Size([N_rows, len(inverse_mapping)])
features = MinkowskiSPMMAverageFunction().apply(
inverse_mapping, cols, size, self._F
)
elif quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:
features = self._F[unique_index]
elif quantization_mode == SparseTensorQuantizationMode.MAX_POOL:
N = len(self._F)
in_map = torch.arange(
N,
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
features = MinkowskiDirectMaxPoolingFunction().apply(
in_map, inverse_mapping, self._F, N_rows
)
else:
# No quantization
raise ValueError("Invalid quantization mode")
self._inverse_mapping[coordinate_map_key] = inverse_mapping
return SparseTensor(
features,
coordinate_map_key=coordinate_map_key,
coordinate_manager=self._manager,
)
def splat(self):
r"""
For slice, use Y.slice(X) where X is the tensor field and Y is the
resulting sparse tensor.
"""
splat_coordinates = create_splat_coordinates(self.C)
(coordinate_map_key, _) = self._manager.insert_and_map(splat_coordinates)
N_rows = self._manager.size(coordinate_map_key)
tensor_map, field_map, weights = self._manager.interpolation_map_weight(
coordinate_map_key, self._C
)
# features
N = len(self._F)
assert weights.dtype == self._F.dtype
size = torch.Size([N_rows, N])
# Save the results for slice
self._splat[coordinate_map_key] = (tensor_map, field_map, weights, size)
features = MinkowskiSPMMFunction().apply(
tensor_map, field_map, weights, size, self._F
)
return SparseTensor(
features,
coordinate_map_key=coordinate_map_key,
coordinate_manager=self._manager,
)
def inverse_mapping(self, sparse_tensor_map_key: CoordinateMapKey):
if sparse_tensor_map_key not in self._inverse_mapping:
if not self._manager.exists_field_to_sparse(
self.coordinate_field_map_key, sparse_tensor_map_key
):
sparse_keys = self.coordinate_manager.field_to_sparse_keys(
self.coordinate_field_map_key
)
one_key = None
if len(sparse_keys) > 0:
for key in sparse_keys:
if np.prod(key.get_tensor_stride()) == 1:
one_key = key
else:
one_key = CoordinateMapKey(
[
1,
]
* self.D,
"",
)
if one_key not in self._inverse_mapping:
(
_,
self._inverse_mapping[one_key],
) = self._manager.get_field_to_sparse_map(
self.coordinate_field_map_key, one_key
)
_, stride_map = self.coordinate_manager.stride_map(
one_key, sparse_tensor_map_key
)
field_map = self._inverse_mapping[one_key]
self._inverse_mapping[sparse_tensor_map_key] = stride_map[field_map]
else:
# Extract the mapping
(
_,
self._inverse_mapping[sparse_tensor_map_key],
) = self._manager.get_field_to_sparse_map(
self.coordinate_field_map_key, sparse_tensor_map_key
)
return self._inverse_mapping[sparse_tensor_map_key]
def _is_same_key(self, other):
assert isinstance(other, self.__class__)
assert self._manager == other._manager, COORDINATE_MANAGER_DIFFERENT_ERROR
assert (
self.coordinate_field_map_key == other.coordinate_field_map_key
), COORDINATE_KEY_DIFFERENT_ERROR
def _binary_functor(self, other, binary_fn):
assert isinstance(other, (self.__class__, torch.Tensor))
if isinstance(other, self.__class__):
self._is_same_key(other)
return self.__class__(
binary_fn(self._F, other.F),
coordinate_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
else: # when it is a torch.Tensor
return self.__class__(
binary_fn(self._F, other),
coordinate_field_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ os.linesep
+ " coordinates="
+ str(self.C)
+ os.linesep
+ " features="
+ str(self.F)
+ os.linesep
+ " coordinate_field_map_key="
+ str(self.coordinate_field_map_key)
+ os.linesep
+ " coordinate_manager="
+ str(self._manager)
+ " spatial dimension="
+ str(self._D)
+ ")"
)
__slots__ = (
"_C",
"_F",
"_D",
"coordinate_field_map_key",
"_manager",
"quantization_mode",
"_inverse_mapping",
"_batch_rows",
"_splat",
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiTensorField.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
from torch.autograd import Function
import MinkowskiEngineBackend._C as MEB
EPS = 1e-10
def spmm(
rows: torch.Tensor,
cols: torch.Tensor,
vals: torch.Tensor,
size: torch.Size,
mat: torch.Tensor,
is_sorted: bool = False,
cuda_spmm_alg: int = 1,
) -> torch.Tensor:
assert len(rows) == len(cols), "Invalid length"
assert len(rows) == len(vals), "Invalid length"
assert vals.dtype == mat.dtype, "dtype mismatch"
assert vals.device == mat.device, "device mismatch"
if mat.is_cuda:
assert (
rows.is_cuda and cols.is_cuda and vals.is_cuda
), "All inputs must be on cuda"
rows = rows.int()
cols = cols.int()
result = MEB.coo_spmm_int32(
rows, cols, vals, size[0], size[1], mat, cuda_spmm_alg, is_sorted
)
# WARNING: TODO: not sorting the vals. Should not be used for generic SPMM
# coosort only supports int32
# return MEB.coo_spmm_int64(
# rows, cols, vals, size[0], size[1], mat, cuda_spmm_alg
# )
else:
COO = torch.stack(
(rows, cols),
0,
).long()
torchSparseTensor = None
if vals.dtype == torch.float64:
torchSparseTensor = torch.sparse.DoubleTensor
elif vals.dtype == torch.float32:
torchSparseTensor = torch.sparse.FloatTensor
else:
raise ValueError(f"Unsupported data type: {vals.dtype}")
sp = torchSparseTensor(COO, vals, size)
result = sp.matmul(mat)
return result
def spmm_average(
rows: torch.Tensor,
cols: torch.Tensor,
size: torch.Size,
mat: torch.Tensor,
cuda_spmm_alg: int = 1,
) -> (torch.Tensor, torch.Tensor, torch.Tensor):
assert len(rows) == len(cols), "Invalid length"
if mat.is_cuda:
assert rows.is_cuda and cols.is_cuda, "All inputs must be on cuda"
rows = rows.int()
cols = cols.int()
result, COO, vals = MEB.coo_spmm_average_int32(
rows, cols, size[0], size[1], mat, cuda_spmm_alg
)
# WARNING: TODO: not sorting the vals. Should not be used for generic SPMM
# coosort only supports int32
# return MEB.coo_spmm_int64(
# rows, cols, vals, size[0], size[1], mat, cuda_spmm_alg
# )
else:
# fmt: off
rows, sort_ind = torch.sort(rows)
cols = cols[sort_ind]
COO = torch.stack((rows, cols), 0,).long()
# Vals
_, inverse_ind, counts = torch.unique(rows, return_counts=True, return_inverse=True)
vals = (1 / counts[inverse_ind]).to(mat.dtype)
# fmt: on
torchSparseTensor = None
if mat.dtype == torch.float64:
torchSparseTensor = torch.sparse.DoubleTensor
elif mat.dtype == torch.float32:
torchSparseTensor = torch.sparse.FloatTensor
else:
raise ValueError(f"Unsupported data type: {mat.dtype}")
sp = torchSparseTensor(COO, vals, size)
result = sp.matmul(mat)
return result, COO, vals
class MinkowskiSPMMFunction(Function):
@staticmethod
def forward(
ctx,
rows: torch.Tensor,
cols: torch.Tensor,
vals: torch.Tensor,
size: torch.Size,
mat: torch.Tensor,
cuda_spmm_alg: int = 1,
):
ctx.misc_args = size, cuda_spmm_alg
ctx.save_for_backward(rows, cols, vals)
result = spmm(
rows,
cols,
vals,
size,
mat,
is_sorted=False,
cuda_spmm_alg=cuda_spmm_alg,
)
return result
@staticmethod
def backward(ctx, grad: torch.Tensor):
size, cuda_spmm_alg = ctx.misc_args
rows, cols, vals = ctx.saved_tensors
new_size = torch.Size([size[1], size[0]])
grad = spmm(
cols,
rows,
vals,
new_size,
grad,
is_sorted=False,
cuda_spmm_alg=cuda_spmm_alg,
)
return (
None,
None,
None,
None,
grad,
None,
)
class MinkowskiSPMMAverageFunction(Function):
@staticmethod
def forward(
ctx,
rows: torch.Tensor,
cols: torch.Tensor,
size: torch.Size,
mat: torch.Tensor,
cuda_spmm_alg: int = 1,
):
ctx.misc_args = size, cuda_spmm_alg
result, COO, vals = spmm_average(
rows,
cols,
size,
mat,
cuda_spmm_alg=cuda_spmm_alg,
)
ctx.save_for_backward(COO, vals)
return result
@staticmethod
def backward(ctx, grad: torch.Tensor):
size, cuda_spmm_alg = ctx.misc_args
COO, vals = ctx.saved_tensors
new_size = torch.Size([size[1], size[0]])
grad = spmm(
COO[1],
COO[0],
vals,
new_size,
grad,
is_sorted=False,
cuda_spmm_alg=cuda_spmm_alg,
)
return (
None,
None,
None,
grad,
None,
)
| MinkowskiEngine-master | MinkowskiEngine/sparse_matrix_functions.py |
import sys
import os
import platform
import subprocess
def parse_nvidia_smi():
sp = subprocess.Popen(
["nvidia-smi", "-q"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out_dict = dict()
for item in sp.communicate()[0].decode("utf-8").split("\n"):
if item.count(":") == 1:
key, val = [i.strip() for i in item.split(":")]
out_dict[key] = val
return out_dict
def print_diagnostics():
print("==========System==========")
print(platform.platform())
os.system("cat /etc/lsb-release")
print(sys.version)
print("==========Pytorch==========")
try:
import torch
print(torch.__version__)
print(f"torch.cuda.is_available(): {torch.cuda.is_available()}")
except ImportError:
print("torch not installed")
print("==========NVIDIA-SMI==========")
os.system("which nvidia-smi")
for k, v in parse_nvidia_smi().items():
if "version" in k.lower():
print(k, v)
print("==========NVCC==========")
os.system("which nvcc")
os.system("nvcc --version")
print("==========CC==========")
CC = "c++"
if "CC" in os.environ or "CXX" in os.environ:
# distutils only checks CC not CXX
if "CXX" in os.environ:
os.environ["CC"] = os.environ["CXX"]
CC = os.environ["CXX"]
else:
CC = os.environ["CC"]
print(f"CC={CC}")
os.system(f"which {CC}")
os.system(f"{CC} --version")
print("==========MinkowskiEngine==========")
try:
import MinkowskiEngine as ME
print(ME.__version__)
print(f"MinkowskiEngine compiled with CUDA Support: {ME.is_cuda_available()}")
print(f"NVCC version MinkowskiEngine is compiled: {ME.cuda_version()}")
print(f"CUDART version MinkowskiEngine is compiled: {ME.cudart_version()}")
except ImportError:
print("MinkowskiEngine not installed")
if __name__ == "__main__":
print_diagnostics()
| MinkowskiEngine-master | MinkowskiEngine/diagnostics.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import torch
import warnings
from MinkowskiCommon import convert_to_int_list, StrideType
from MinkowskiEngineBackend._C import (
CoordinateMapKey,
CoordinateMapType,
GPUMemoryAllocatorType,
MinkowskiAlgorithm,
)
from MinkowskiTensor import (
SparseTensorQuantizationMode,
SparseTensorOperationMode,
Tensor,
sparse_tensor_operation_mode,
global_coordinate_manager,
set_global_coordinate_manager,
)
from MinkowskiCoordinateManager import CoordinateManager
from sparse_matrix_functions import MinkowskiSPMMFunction, MinkowskiSPMMAverageFunction
class SparseTensor(Tensor):
r"""A sparse tensor class. Can be accessed via
:attr:`MinkowskiEngine.SparseTensor`.
The :attr:`SparseTensor` class is the basic tensor in MinkowskiEngine. For
the definition of a sparse tensor, please visit `the terminology page
<https://nvidia.github.io/MinkowskiEngine/terminology.html#sparse-tensor>`_.
We use the COOrdinate (COO) format to save a sparse tensor `[1]
<http://groups.csail.mit.edu/commit/papers/2016/parker-thesis.pdf>`_. This
representation is simply a concatenation of coordinates in a matrix
:math:`C` and associated features :math:`F`.
.. math::
\mathbf{C} = \begin{bmatrix}
b_1 & x_1^1 & x_1^2 & \cdots & x_1^D \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
b_N & x_N^1 & x_N^2 & \cdots & x_N^D
\end{bmatrix}, \; \mathbf{F} = \begin{bmatrix}
\mathbf{f}_1^T\\
\vdots\\
\mathbf{f}_N^T
\end{bmatrix}
where :math:`\mathbf{x}_i \in \mathcal{Z}^D` is a :math:`D`-dimensional
coordinate and :math:`b_i \in \mathcal{Z}_+` denotes the corresponding
batch index. :math:`N` is the number of non-zero elements in the sparse
tensor, each with the coordinate :math:`(b_i, x_i^1, x_i^1, \cdots,
x_i^D)`, and the associated feature :math:`\mathbf{f}_i`. Internally, we
handle the batch index as an additional spatial dimension.
Example::
>>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])
>>> A = ME.SparseTensor(features=feats, coordinates=coords)
>>> B = ME.SparseTensor(features=feats, coordinate_map_key=A.coordiante_map_key, coordinate_manager=A.coordinate_manager)
>>> C = ME.SparseTensor(features=feats, coordinates=coords, quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)
>>> D = ME.SparseTensor(features=feats, coordinates=coords, quantization_mode=ME.SparseTensorQuantizationMode.RANDOM_SUBSAMPLE)
>>> E = ME.SparseTensor(features=feats, coordinates=coords, tensor_stride=2)
.. warning::
To use the GPU-backend for coordinate management, the
:attr:`coordinates` must be a torch tensor on GPU. Applying `to(device)`
after :attr:`MinkowskiEngine.SparseTensor` initialization with a CPU
`coordinates` will waste time and computation on creating an unnecessary
CPU CoordinateMap since the GPU CoordinateMap will be created from
scratch as well.
.. warning::
Before MinkowskiEngine version 0.4, we put the batch indices on the last
column. Thus, direct manipulation of coordinates will be incompatible
with the latest versions. Instead, please use
:attr:`MinkowskiEngine.utils.batched_coordinates` or
:attr:`MinkowskiEngine.utils.sparse_collate` to create batched
coordinates.
Also, to access coordinates or features batch-wise, use the functions
:attr:`coordinates_at(batch_index : int)`, :attr:`features_at(batch_index : int)` of
a sparse tensor. Or to access all batch-wise coordinates and features,
`decomposed_coordinates`, `decomposed_features`,
`decomposed_coordinates_and_features` of a sparse tensor.
Example::
>>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])
>>> A = ME.SparseTensor(features=feats, coordinates=coords)
>>> coords_batch0 = A.coordinates_at(batch_index=0)
>>> feats_batch1 = A.features_at(batch_index=1)
>>> list_of_coords, list_of_featurs = A.decomposed_coordinates_and_features
"""
def __init__(
self,
features: torch.Tensor,
coordinates: torch.Tensor = None,
# optional coordinate related arguments
tensor_stride: StrideType = 1,
coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
quantization_mode: SparseTensorQuantizationMode = SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
# optional manager related arguments
allocator_type: GPUMemoryAllocatorType = None,
minkowski_algorithm: MinkowskiAlgorithm = None,
requires_grad=None,
device=None,
):
r"""
Args:
:attr:`features` (:attr:`torch.FloatTensor`,
:attr:`torch.DoubleTensor`, :attr:`torch.cuda.FloatTensor`, or
:attr:`torch.cuda.DoubleTensor`): The features of a sparse
tensor.
:attr:`coordinates` (:attr:`torch.IntTensor`): The coordinates
associated to the features. If not provided, :attr:`coordinate_map_key`
must be provided.
:attr:`tensor_stride` (:attr:`int`, :attr:`list`,
:attr:`numpy.array`, or :attr:`tensor.Tensor`): The tensor stride
of the current sparse tensor. By default, it is 1.
:attr:`coordinate_map_key`
(:attr:`MinkowskiEngine.CoordinateMapKey`): When the coordinates
are already cached in the MinkowskiEngine, we could reuse the same
coordinate map by simply providing the coordinate map key. In most
case, this process is done automatically. When you provide a
`coordinate_map_key`, `coordinates` will be be ignored.
:attr:`coordinate_manager`
(:attr:`MinkowskiEngine.CoordinateManager`): The MinkowskiEngine
manages all coordinate maps using the `_C.CoordinateMapManager`. If
not provided, the MinkowskiEngine will create a new computation
graph. In most cases, this process is handled automatically and you
do not need to use this.
:attr:`quantization_mode`
(:attr:`MinkowskiEngine.SparseTensorQuantizationMode`): Defines how
continuous coordinates will be quantized to define a sparse tensor.
Please refer to :attr:`SparseTensorQuantizationMode` for details.
:attr:`allocator_type`
(:attr:`MinkowskiEngine.GPUMemoryAllocatorType`): Defines the GPU
memory allocator type. By default, it uses the c10 allocator.
:attr:`minkowski_algorithm`
(:attr:`MinkowskiEngine.MinkowskiAlgorithm`): Controls the mode the
minkowski engine runs, Use
:attr:`MinkowskiAlgorithm.MEMORY_EFFICIENT` if you want to reduce
the memory footprint. Or use
:attr:`MinkowskiAlgorithm.SPEED_OPTIMIZED` if you want to make it
run fasterat the cost of more memory.
:attr:`requires_grad` (:attr:`bool`): Set the requires_grad flag.
:attr:`device` (:attr:`torch.device`): Set the device the sparse
tensor is defined.
"""
# Type checks
assert isinstance(features, torch.Tensor), "Features must be a torch.Tensor"
assert (
features.ndim == 2
), f"The feature should be a matrix, The input feature is an order-{features.ndim} tensor."
assert isinstance(quantization_mode, SparseTensorQuantizationMode)
self.quantization_mode = quantization_mode
if coordinates is not None:
assert isinstance(coordinates, torch.Tensor)
if coordinate_map_key is not None:
assert isinstance(coordinate_map_key, CoordinateMapKey)
assert (
coordinate_manager is not None
), "Must provide coordinate_manager if coordinate_map_key is provided"
assert (
coordinates is None
), "Must not provide coordinates if coordinate_map_key is provided"
if coordinate_manager is not None:
assert isinstance(coordinate_manager, CoordinateManager)
if coordinates is None and (
coordinate_map_key is None or coordinate_manager is None
):
raise ValueError(
"Either coordinates or (coordinate_map_key, coordinate_manager) pair must be provided."
)
Tensor.__init__(self)
# To device
if device is not None:
features = features.to(device)
if coordinates is not None:
# assertion check for the map key done later
coordinates = coordinates.to(device)
self._D = (
coordinates.size(1) - 1 if coordinates is not None else coordinate_manager.D
)
##########################
# Setup CoordsManager
##########################
if coordinate_manager is None:
# If set to share the coords man, use the global coords man
if (
sparse_tensor_operation_mode()
== SparseTensorOperationMode.SHARE_COORDINATE_MANAGER
):
coordinate_manager = global_coordinate_manager()
if coordinate_manager is None:
coordinate_manager = CoordinateManager(
D=self._D,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
set_global_coordinate_manager(coordinate_manager)
else:
coordinate_manager = CoordinateManager(
D=coordinates.size(1) - 1,
coordinate_map_type=CoordinateMapType.CUDA
if coordinates.is_cuda
else CoordinateMapType.CPU,
allocator_type=allocator_type,
minkowski_algorithm=minkowski_algorithm,
)
self._manager = coordinate_manager
##########################
# Initialize coords
##########################
if coordinates is not None:
assert (
features.shape[0] == coordinates.shape[0]
), "The number of rows in features and coordinates must match."
assert (
features.is_cuda == coordinates.is_cuda
), "Features and coordinates must have the same backend."
coordinate_map_key = CoordinateMapKey(
convert_to_int_list(tensor_stride, self._D), ""
)
coordinates, features, coordinate_map_key = self.initialize_coordinates(
coordinates, features, coordinate_map_key
)
else: # coordinate_map_key is not None:
assert coordinate_map_key.is_key_set(), "The coordinate key must be valid."
if requires_grad is not None:
features.requires_grad_(requires_grad)
self._F = features
self._C = coordinates
self.coordinate_map_key = coordinate_map_key
self._batch_rows = None
@property
def coordinate_key(self):
return self.coordinate_map_key
def initialize_coordinates(self, coordinates, features, coordinate_map_key):
if not isinstance(coordinates, (torch.IntTensor, torch.cuda.IntTensor)):
warnings.warn(
"coordinates implicitly converted to torch.IntTensor. "
+ "To remove this warning, use `.int()` to convert the "
+ "coords into an torch.IntTensor"
)
coordinates = torch.floor(coordinates).int()
(
coordinate_map_key,
(unique_index, inverse_mapping),
) = self._manager.insert_and_map(coordinates, *coordinate_map_key.get_key())
self.unique_index = unique_index.long()
coordinates = coordinates[self.unique_index]
if len(inverse_mapping) == 0:
# When the input has the same shape as the output
self.inverse_mapping = torch.arange(
len(features),
dtype=inverse_mapping.dtype,
device=inverse_mapping.device,
)
return coordinates, features, coordinate_map_key
self.inverse_mapping = inverse_mapping
if self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_SUM:
spmm = MinkowskiSPMMFunction()
N = len(features)
cols = torch.arange(
N,
dtype=self.inverse_mapping.dtype,
device=self.inverse_mapping.device,
)
vals = torch.ones(N, dtype=features.dtype, device=features.device)
size = torch.Size([len(self.unique_index), len(self.inverse_mapping)])
features = spmm.apply(self.inverse_mapping, cols, vals, size, features)
elif self.quantization_mode == SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE:
spmm_avg = MinkowskiSPMMAverageFunction()
N = len(features)
cols = torch.arange(
N,
dtype=self.inverse_mapping.dtype,
device=self.inverse_mapping.device,
)
size = torch.Size([len(self.unique_index), len(self.inverse_mapping)])
features = spmm_avg.apply(self.inverse_mapping, cols, size, features)
elif self.quantization_mode == SparseTensorQuantizationMode.RANDOM_SUBSAMPLE:
features = features[self.unique_index]
else:
# No quantization
pass
return coordinates, features, coordinate_map_key
# Conversion functions
def sparse(self, min_coords=None, max_coords=None, contract_coords=True):
r"""Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch sparse
tensor.
Args:
:attr:`min_coords` (torch.IntTensor, optional): The min
coordinates of the output sparse tensor. Must be divisible by the
current :attr:`tensor_stride`.
:attr:`max_coords` (torch.IntTensor, optional): The max coordinates
of the output sparse tensor (inclusive). Must be divisible by the
current :attr:`tensor_stride`.
:attr:`contract_coords` (bool, optional): Given True, the output
coordinates will be divided by the tensor stride to make features
contiguous.
Returns:
:attr:`spare_tensor` (torch.sparse.Tensor): the torch sparse tensor
representation of the self in `[Batch Dim, Spatial Dims..., Feature
Dim]`. The coordinate of each feature can be accessed via
`min_coord + tensor_stride * [the coordinate of the dense tensor]`.
:attr:`min_coords` (torch.IntTensor): the D-dimensional vector
defining the minimum coordinate of the output sparse tensor. If
:attr:`contract_coords` is True, the :attr:`min_coords` will also
be contracted.
:attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector
defining the stride between tensor elements.
"""
if min_coords is not None:
assert isinstance(min_coords, torch.IntTensor)
assert min_coords.numel() == self._D
if max_coords is not None:
assert isinstance(max_coords, torch.IntTensor)
assert min_coords.numel() == self._D
def torch_sparse_Tensor(coords, feats, size=None):
if size is None:
if feats.dtype == torch.float64:
return torch.sparse.DoubleTensor(coords, feats)
elif feats.dtype == torch.float32:
return torch.sparse.FloatTensor(coords, feats)
else:
raise ValueError("Feature type not supported.")
else:
if feats.dtype == torch.float64:
return torch.sparse.DoubleTensor(coords, feats, size)
elif feats.dtype == torch.float32:
return torch.sparse.FloatTensor(coords, feats, size)
else:
raise ValueError("Feature type not supported.")
# Use int tensor for all operations
tensor_stride = torch.IntTensor(self.tensor_stride)
# New coordinates
coords = self.C
coords, batch_indices = coords[:, 1:], coords[:, 0]
if min_coords is None:
min_coords, _ = coords.min(0, keepdim=True)
elif min_coords.ndim == 1:
min_coords = min_coords.unsqueeze(0)
assert (
min_coords % tensor_stride
).sum() == 0, "The minimum coordinates must be divisible by the tensor stride."
if max_coords is not None:
if max_coords.ndim == 1:
max_coords = max_coords.unsqueeze(0)
assert (
max_coords % tensor_stride
).sum() == 0, (
"The maximum coordinates must be divisible by the tensor stride."
)
coords -= min_coords
if coords.ndim == 1:
coords = coords.unsqueeze(1)
if batch_indices.ndim == 1:
batch_indices = batch_indices.unsqueeze(1)
# return the contracted tensor
if contract_coords:
coords = coords // tensor_stride
if max_coords is not None:
max_coords = max_coords // tensor_stride
min_coords = min_coords // tensor_stride
new_coords = torch.cat((batch_indices, coords), dim=1).long()
size = None
if max_coords is not None:
size = max_coords - min_coords + 1 # inclusive
# Squeeze to make the size one-dimensional
size = size.squeeze()
max_batch = max(self._manager.get_batch_indices())
size = torch.Size([max_batch + 1, *size, self.F.size(1)])
sparse_tensor = torch_sparse_Tensor(
new_coords.t().to(self.F.device), self.F, size
)
tensor_stride = torch.IntTensor(self.tensor_stride)
return sparse_tensor, min_coords, tensor_stride
def dense(self, shape=None, min_coordinate=None, contract_stride=True):
r"""Convert the :attr:`MinkowskiEngine.SparseTensor` to a torch dense
tensor.
Args:
:attr:`shape` (torch.Size, optional): The size of the output tensor.
:attr:`min_coordinate` (torch.IntTensor, optional): The min
coordinates of the output sparse tensor. Must be divisible by the
current :attr:`tensor_stride`. If 0 is given, it will use the origin for the min coordinate.
:attr:`contract_stride` (bool, optional): The output coordinates
will be divided by the tensor stride to make features spatially
contiguous. True by default.
Returns:
:attr:`tensor` (torch.Tensor): the torch tensor with size `[Batch
Dim, Feature Dim, Spatial Dim..., Spatial Dim]`. The coordinate of
each feature can be accessed via `min_coordinate + tensor_stride *
[the coordinate of the dense tensor]`.
:attr:`min_coordinate` (torch.IntTensor): the D-dimensional vector
defining the minimum coordinate of the output tensor.
:attr:`tensor_stride` (torch.IntTensor): the D-dimensional vector
defining the stride between tensor elements.
"""
if min_coordinate is not None:
assert isinstance(min_coordinate, torch.IntTensor)
assert min_coordinate.numel() == self._D
if shape is not None:
assert isinstance(shape, torch.Size)
assert len(shape) == self._D + 2 # batch and channel
if shape[1] != self._F.size(1):
shape = torch.Size([shape[0], self._F.size(1), *[s for s in shape[2:]]])
# Exception handling for empty tensor
if self.__len__() == 0:
assert shape is not None, "shape is required to densify an empty tensor"
return (
torch.zeros(shape, dtype=self.dtype, device=self.device),
torch.zeros(self._D, dtype=torch.int32, device=self.device),
self.tensor_stride,
)
# Use int tensor for all operations
tensor_stride = torch.IntTensor(self.tensor_stride).to(self.device)
# New coordinates
batch_indices = self.C[:, 0]
if min_coordinate is None:
min_coordinate, _ = self.C.min(0, keepdim=True)
min_coordinate = min_coordinate[:, 1:]
if not torch.all(min_coordinate >= 0):
raise ValueError(
f"Coordinate has a negative value: {min_coordinate}. Please provide min_coordinate argument"
)
coords = self.C[:, 1:]
elif isinstance(min_coordinate, int) and min_coordinate == 0:
coords = self.C[:, 1:]
else:
min_coordinate = min_coordinate.to(self.device)
if min_coordinate.ndim == 1:
min_coordinate = min_coordinate.unsqueeze(0)
coords = self.C[:, 1:] - min_coordinate
assert (
min_coordinate % tensor_stride
).sum() == 0, "The minimum coordinates must be divisible by the tensor stride."
if coords.ndim == 1:
coords = coords.unsqueeze(1)
# return the contracted tensor
if contract_stride:
coords = coords // tensor_stride
nchannels = self.F.size(1)
if shape is None:
size = coords.max(0)[0] + 1
shape = torch.Size(
[batch_indices.max() + 1, nchannels, *size.cpu().numpy()]
)
dense_F = torch.zeros(shape, dtype=self.dtype, device=self.device)
tcoords = coords.t().long()
batch_indices = batch_indices.long()
exec(
"dense_F[batch_indices, :, "
+ ", ".join([f"tcoords[{i}]" for i in range(len(tcoords))])
+ "] = self.F"
)
tensor_stride = torch.IntTensor(self.tensor_stride)
return dense_F, min_coordinate, tensor_stride
def interpolate(self, X):
from MinkowskiTensorField import TensorField
assert isinstance(X, TensorField)
if self.coordinate_map_key in X._splat:
tensor_map, field_map, weights, size = X._splat[self.coordinate_map_key]
size = torch.Size([size[1], size[0]]) # transpose
features = MinkowskiSPMMFunction().apply(
field_map, tensor_map, weights, size, self._F
)
else:
features = self.features_at_coordinates(X.C)
return TensorField(
features=features,
coordinate_field_map_key=X.coordinate_field_map_key,
coordinate_manager=X.coordinate_manager,
)
def slice(self, X):
r"""
Args:
:attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor
that discretized the original input.
Returns:
:attr:`tensor_field` (:attr:`MinkowskiEngine.TensorField`): the
resulting tensor field contains features on the continuous
coordinates that generated the input X.
Example::
>>> # coords, feats from a data loader
>>> print(len(coords)) # 227742
>>> tfield = ME.TensorField(coordinates=coords, features=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)
>>> print(len(tfield)) # 227742
>>> sinput = tfield.sparse() # 161890 quantization results in fewer voxels
>>> soutput = MinkUNet(sinput)
>>> print(len(soutput)) # 161890 Output with the same resolution
>>> ofield = soutput.slice(tfield)
>>> assert isinstance(ofield, ME.TensorField)
>>> len(ofield) == len(coords) # recovers the original ordering and length
>>> assert isinstance(ofield.F, torch.Tensor) # .F returns the features
"""
# Currently only supports unweighted slice.
assert X.quantization_mode in [
SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
], "slice only available for sparse tensors with quantization RANDOM_SUBSAMPLE or UNWEIGHTED_AVERAGE"
from MinkowskiTensorField import TensorField
if isinstance(X, TensorField):
return TensorField(
self.F[X.inverse_mapping(self.coordinate_map_key).long()],
coordinate_field_map_key=X.coordinate_field_map_key,
coordinate_manager=X.coordinate_manager,
quantization_mode=X.quantization_mode,
)
elif isinstance(X, SparseTensor):
inv_map = X.inverse_mapping
assert (
X.coordinate_map_key == self.coordinate_map_key
), "Slice can only be applied on the same coordinates (coordinate_map_key)"
return TensorField(
self.F[inv_map],
coordinates=self.C[inv_map],
coordinate_manager=self.coordinate_manager,
quantization_mode=self.quantization_mode,
)
else:
raise ValueError(
"Invalid input. The input must be an instance of TensorField or SparseTensor."
)
def cat_slice(self, X):
r"""
Args:
:attr:`X` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor
that discretized the original input.
Returns:
:attr:`tensor_field` (:attr:`MinkowskiEngine.TensorField`): the
resulting tensor field contains the concatenation of features on the
original continuous coordinates that generated the input X and the
self.
Example::
>>> # coords, feats from a data loader
>>> print(len(coords)) # 227742
>>> sinput = ME.SparseTensor(coordinates=coords, features=feats, quantization_mode=SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)
>>> print(len(sinput)) # 161890 quantization results in fewer voxels
>>> soutput = network(sinput)
>>> print(len(soutput)) # 161890 Output with the same resolution
>>> ofield = soutput.cat_slice(sinput)
>>> assert soutput.F.size(1) + sinput.F.size(1) == ofield.F.size(1) # concatenation of features
"""
# Currently only supports unweighted slice.
assert X.quantization_mode in [
SparseTensorQuantizationMode.RANDOM_SUBSAMPLE,
SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE,
], "slice only available for sparse tensors with quantization RANDOM_SUBSAMPLE or UNWEIGHTED_AVERAGE"
from MinkowskiTensorField import TensorField
inv_map = X.inverse_mapping(self.coordinate_map_key)
features = torch.cat((self.F[inv_map], X.F), dim=1)
if isinstance(X, TensorField):
return TensorField(
features,
coordinate_field_map_key=X.coordinate_field_map_key,
coordinate_manager=X.coordinate_manager,
quantization_mode=X.quantization_mode,
)
elif isinstance(X, SparseTensor):
assert (
X.coordinate_map_key == self.coordinate_map_key
), "Slice can only be applied on the same coordinates (coordinate_map_key)"
return TensorField(
features,
coordinates=self.C[inv_map],
coordinate_manager=self.coordinate_manager,
quantization_mode=self.quantization_mode,
)
else:
raise ValueError(
"Invalid input. The input must be an instance of TensorField or SparseTensor."
)
def features_at_coordinates(self, query_coordinates: torch.Tensor):
r"""Extract features at the specified continuous coordinate matrix.
Args:
:attr:`query_coordinates` (:attr:`torch.FloatTensor`): a coordinate
matrix of size :math:`N \times (D + 1)` where :math:`D` is the size
of the spatial dimension.
Returns:
:attr:`queried_features` (:attr:`torch.Tensor`): a feature matrix of
size :math:`N \times D_F` where :math:`D_F` is the number of
channels in the feature. For coordinates not present in the current
sparse tensor, corresponding feature rows will be zeros.
"""
from MinkowskiInterpolation import MinkowskiInterpolationFunction
assert (
self.dtype == query_coordinates.dtype
), "Invalid query_coordinates dtype. use {self.dtype}"
assert (
query_coordinates.device == self.device
), "query coordinates device ({query_coordinates.device}) does not match the sparse tensor device ({self.device})."
return MinkowskiInterpolationFunction().apply(
self._F,
query_coordinates,
self.coordinate_map_key,
self.coordinate_manager,
)[0]
def __repr__(self):
return (
self.__class__.__name__
+ "("
+ os.linesep
+ " coordinates="
+ str(self.C)
+ os.linesep
+ " features="
+ str(self.F)
+ os.linesep
+ " coordinate_map_key="
+ str(self.coordinate_map_key)
+ os.linesep
+ " coordinate_manager="
+ str(self._manager)
+ " spatial dimension="
+ str(self._D)
+ ")"
)
__slots__ = (
"_C",
"_F",
"_D",
"coordinate_map_key",
"_manager",
"unique_index",
"inverse_mapping",
"quantization_mode",
"_batch_rows",
)
def _get_coordinate_map_key(
input: SparseTensor,
coordinates: torch.Tensor = None,
tensor_stride: StrideType = 1,
expand_coordinates: bool = False,
):
r"""Returns the coordinates map key."""
if coordinates is not None and not expand_coordinates:
assert isinstance(coordinates, (CoordinateMapKey, torch.Tensor, SparseTensor))
if isinstance(coordinates, torch.Tensor):
assert coordinates.ndim == 2
coordinate_map_key = CoordinateMapKey(
convert_to_int_list(tensor_stride, coordinates.size(1) - 1), ""
)
(
coordinate_map_key,
(unique_index, inverse_mapping),
) = input._manager.insert_and_map(
coordinates, *coordinate_map_key.get_key()
)
elif isinstance(coordinates, SparseTensor):
coordinate_map_key = coordinates.coordinate_map_key
else: # CoordinateMapKey type due to the previous assertion
coordinate_map_key = coordinates
else: # coordinates is None
coordinate_map_key = CoordinateMapKey(
input.coordinate_map_key.get_coordinate_size()
)
return coordinate_map_key
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiSparseTensor.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from abc import ABC, abstractmethod
import torch.nn as nn
from MinkowskiSparseTensor import SparseTensor
class MinkowskiNetwork(nn.Module, ABC):
"""
MinkowskiNetwork: an abstract class for sparse convnets.
Note: All modules that use the same coordinates must use the same net_metadata
"""
def __init__(self, D):
super(MinkowskiNetwork, self).__init__()
self.D = D
@abstractmethod
def forward(self, x):
pass
def init(self, x):
"""
Initialize coordinates if it does not exist
"""
nrows = self.get_nrows(1)
if nrows < 0:
if isinstance(x, SparseTensor):
self.initialize_coords(x.coords_man)
else:
raise ValueError('Initialize input coordinates')
elif nrows != x.F.size(0):
raise ValueError('Input size does not match the coordinate size')
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiNetwork.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from typing import Union
import torch
from torch.autograd import Function
from MinkowskiEngineBackend._C import CoordinateMapKey
from MinkowskiSparseTensor import SparseTensor
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
class MinkowskiInterpolationFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
tfield: torch.Tensor,
in_coordinate_map_key: CoordinateMapKey,
coordinate_manager: CoordinateManager = None,
):
input_features = input_features.contiguous()
# in_map, out_map, weights = coordinate_manager.interpolation_map_weight(
# in_coordinate_map_key, tfield)
fw_fn = get_minkowski_function("InterpolationForward", input_features)
out_feat, in_map, out_map, weights = fw_fn(
input_features,
tfield,
in_coordinate_map_key,
coordinate_manager._manager,
)
ctx.save_for_backward(in_map, out_map, weights)
ctx.inputs = (
in_coordinate_map_key,
coordinate_manager,
)
return out_feat, in_map, out_map, weights
@staticmethod
def backward(
ctx, grad_out_feat=None, grad_in_map=None, grad_out_map=None, grad_weights=None
):
grad_out_feat = grad_out_feat.contiguous()
bw_fn = get_minkowski_function("InterpolationBackward", grad_out_feat)
(
in_coordinate_map_key,
coordinate_manager,
) = ctx.inputs
in_map, out_map, weights = ctx.saved_tensors
grad_in_feat = bw_fn(
grad_out_feat,
in_map,
out_map,
weights,
in_coordinate_map_key,
coordinate_manager._manager,
)
return grad_in_feat, None, None, None
class MinkowskiInterpolation(MinkowskiModuleBase):
r"""Sample linearly interpolated features at the provided points."""
def __init__(self, return_kernel_map=False, return_weights=False):
r"""Sample linearly interpolated features at the specified coordinates.
Args:
:attr:`return_kernel_map` (bool): In addition to the sampled
features, the layer returns the kernel map as a pair of input row
indices and output row indices. False by default.
:attr:`return_weights` (bool): When True, return the linear
interpolation weights. False by default.
"""
MinkowskiModuleBase.__init__(self)
self.return_kernel_map = return_kernel_map
self.return_weights = return_weights
self.interp = MinkowskiInterpolationFunction()
def forward(
self,
input: SparseTensor,
tfield: torch.Tensor,
):
# Get a new coordinate map key or extract one from the coordinates
out_feat, in_map, out_map, weights = self.interp.apply(
input.F,
tfield,
input.coordinate_map_key,
input._manager,
)
return_args = [out_feat]
if self.return_kernel_map:
return_args.append((in_map, out_map))
if self.return_weights:
return_args.append(weights)
if len(return_args) > 1:
return tuple(return_args)
else:
return out_feat
def __repr__(self):
return self.__class__.__name__ + "()"
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiInterpolation.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from typing import Union
import torch
from torch.nn import Module
from torch.autograd import Function
from MinkowskiEngineBackend._C import CoordinateMapKey, RegionType, BroadcastMode
from MinkowskiSparseTensor import SparseTensor, _get_coordinate_map_key
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
class MinkowskiBroadcastFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
input_features_global: torch.Tensor,
operation_type: BroadcastMode,
in_coords_key: CoordinateMapKey,
glob_coords_key: CoordinateMapKey,
coords_manager: CoordinateManager,
):
assert isinstance(operation_type, BroadcastMode)
ctx.saved_vars = (
input_features,
input_features_global,
operation_type,
in_coords_key,
glob_coords_key,
coords_manager,
)
fw_fn = get_minkowski_function("BroadcastForward", input_features)
return fw_fn(
input_features,
input_features_global,
operation_type,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
@staticmethod
def backward(ctx, grad_out_feat):
if not grad_out_feat.is_contiguous():
grad_out_feat = grad_out_feat.contiguous()
(
input_features,
input_features_global,
operation_type,
in_coords_key,
glob_coords_key,
coords_manager,
) = ctx.saved_vars
bw_fn = get_minkowski_function("BroadcastBackward", grad_out_feat)
grad_in_feat, grad_in_feat_glob = bw_fn(
input_features,
input_features_global,
grad_out_feat,
operation_type,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
return grad_in_feat, grad_in_feat_glob, None, None, None, None
class MinkowskiBroadcastBase(MinkowskiModuleBase):
def __init__(self, operation_type):
MinkowskiModuleBase.__init__(self)
assert isinstance(operation_type, BroadcastMode)
self.operation_type = operation_type
self.broadcast = MinkowskiBroadcastFunction()
def forward(self, input: SparseTensor, input_glob: SparseTensor):
assert isinstance(input, SparseTensor)
output = self.broadcast.apply(
input.F,
input_glob.F,
self.operation_type,
input.coordinate_map_key,
input_glob.coordinate_map_key,
input.coordinate_manager,
)
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
return self.__class__.__name__
class MinkowskiBroadcastAddition(MinkowskiBroadcastBase):
r"""Broadcast the reduced features to all input coordinates.
.. math::
\mathbf{y}_\mathbf{u} = \mathbf{x}_{1, \mathbf{u}} + \mathbf{x}_2
\; \text{for} \; \mathbf{u} \in \mathcal{C}^\text{in}
For all input :math:`\mathbf{x}_\mathbf{u}`, add :math:`\mathbf{x}_2`. The
output coordinates will be the same as the input coordinates
:math:`\mathcal{C}^\text{in} = \mathcal{C}^\text{out}`.
.. note::
The first argument takes a sparse tensor; the second argument takes
features that are reduced to the origin. This can be typically done with
the global reduction such as the :attr:`MinkowskiGlobalPooling`.
"""
def __init__(self):
MinkowskiBroadcastBase.__init__(self, BroadcastMode.ELEMENTWISE_ADDITON)
class MinkowskiBroadcastMultiplication(MinkowskiBroadcastBase):
r"""Broadcast reduced features to all input coordinates.
.. math::
\mathbf{y}_\mathbf{u} = \mathbf{x}_{1, \mathbf{u}} \times \mathbf{x}_2
\; \text{for} \; \mathbf{u} \in \mathcal{C}^\text{in}
For all input :math:`\mathbf{x}_\mathbf{u}`, multiply :math:`\mathbf{x}_2`
element-wise. The output coordinates will be the same as the input
coordinates :math:`\mathcal{C}^\text{in} = \mathcal{C}^\text{out}`.
.. note::
The first argument takes a sparse tensor; the second argument takes
features that are reduced to the origin. This can be typically done with
the global reduction such as the :attr:`MinkowskiGlobalPooling`.
"""
def __init__(self):
MinkowskiBroadcastBase.__init__(self, BroadcastMode.ELEMENTWISE_MULTIPLICATION)
class MinkowskiBroadcast(Module):
r"""Broadcast reduced features to all input coordinates.
.. math::
\mathbf{y}_\mathbf{u} = \mathbf{x}_2 \; \text{for} \; \mathbf{u} \in
\mathcal{C}^\text{in}
For all input :math:`\mathbf{x}_\mathbf{u}`, copy value :math:`\mathbf{x}_2`
element-wise. The output coordinates will be the same as the input
coordinates :math:`\mathcal{C}^\text{in} = \mathcal{C}^\text{out}`. The
first input :math:`\mathbf{x}_1` is only used for defining the output
coordinates.
.. note::
The first argument takes a sparse tensor; the second argument takes
features that are reduced to the origin. This can be typically done with
the global reduction such as the :attr:`MinkowskiGlobalPooling`.
"""
def __repr__(self):
return self.__class__.__name__
def forward(self, input: SparseTensor, input_glob: SparseTensor):
assert isinstance(input, SparseTensor)
assert isinstance(input_glob, SparseTensor)
broadcast_feat = input.F.new(len(input), input_glob.size()[1])
batch_indices, batch_rows = input.coordinate_manager.origin_map(input.coordinate_map_key)
for b, rows in zip(batch_indices, batch_rows):
broadcast_feat[rows] = input_glob.F[b]
return SparseTensor(
broadcast_feat,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
class MinkowskiBroadcastConcatenation(MinkowskiBroadcast):
r"""Broadcast reduced features to all input coordinates and concatenate to the input.
.. math::
\mathbf{y}_\mathbf{u} = [\mathbf{x}_{1,\mathbf{u}}, \mathbf{x}_2] \;
\text{for} \; \mathbf{u} \in \mathcal{C}^\text{in}
For all input :math:`\mathbf{x}_\mathbf{u}`, concatenate vector
:math:`\mathbf{x}_2`. :math:`[\cdot, \cdot]` is a concatenation operator.
The output coordinates will be the same as the input coordinates
:math:`\mathcal{C}^\text{in} = \mathcal{C}^\text{out}`.
.. note::
The first argument takes a sparse tensor; the second argument takes
features that are reduced to the origin. This can be typically done with
the global reduction such as the :attr:`MinkowskiGlobalPooling`.
"""
def forward(self, input: SparseTensor, input_glob: SparseTensor):
assert isinstance(input, SparseTensor)
assert isinstance(input_glob, SparseTensor)
broadcast_feat = input.F.new(len(input), input_glob.size()[1])
batch_indices, batch_rows = input.coordinate_manager.origin_map(input.coordinate_map_key)
for b, row_ind in zip(batch_indices, batch_rows):
broadcast_feat[row_ind] = input_glob.F[b]
broadcast_cat = torch.cat((input.F, broadcast_feat), dim=1)
return SparseTensor(
broadcast_cat,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiBroadcast.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
__version__ = "0.5.4"
import os
import sys
import warnings
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
# Force OMP_NUM_THREADS setup
if os.cpu_count() > 16 and "OMP_NUM_THREADS" not in os.environ:
warnings.warn(
" ".join(
[
"The environment variable `OMP_NUM_THREADS` not set. MinkowskiEngine will automatically set `OMP_NUM_THREADS=16`.",
"If you want to set `OMP_NUM_THREADS` manually, please export it on the command line before running a python script.",
"e.g. `export OMP_NUM_THREADS=12; python your_program.py`.",
"It is recommended to set it below 24.",
]
)
)
os.environ["OMP_NUM_THREADS"] = str(16)
# Must be imported first to load all required shared libs
import torch
from diagnostics import print_diagnostics
from MinkowskiEngineBackend._C import (
MinkowskiAlgorithm,
CoordinateMapKey,
GPUMemoryAllocatorType,
CoordinateMapType,
RegionType,
PoolingMode,
BroadcastMode,
is_cuda_available,
cuda_version,
cudart_version,
get_gpu_memory_info,
)
from MinkowskiKernelGenerator import (
KernelRegion,
KernelGenerator,
convert_region_type,
get_kernel_volume,
)
from MinkowskiTensor import (
SparseTensorOperationMode,
SparseTensorQuantizationMode,
set_sparse_tensor_operation_mode,
sparse_tensor_operation_mode,
global_coordinate_manager,
set_global_coordinate_manager,
clear_global_coordinate_manager,
)
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensorField import TensorField
from MinkowskiCommon import (
convert_to_int_tensor,
MinkowskiModuleBase,
)
from MinkowskiCoordinateManager import (
set_memory_manager_backend,
set_gpu_allocator,
CoordsManager,
CoordinateManager,
)
from MinkowskiConvolution import (
MinkowskiConvolutionFunction,
MinkowskiConvolution,
MinkowskiConvolutionTransposeFunction,
MinkowskiConvolutionTranspose,
MinkowskiGenerativeConvolutionTranspose,
)
from MinkowskiChannelwiseConvolution import MinkowskiChannelwiseConvolution
from MinkowskiPooling import (
MinkowskiLocalPoolingFunction,
MinkowskiSumPooling,
MinkowskiAvgPooling,
MinkowskiMaxPooling,
MinkowskiLocalPoolingTransposeFunction,
MinkowskiPoolingTranspose,
MinkowskiGlobalPoolingFunction,
MinkowskiGlobalPooling,
MinkowskiGlobalSumPooling,
MinkowskiGlobalAvgPooling,
MinkowskiGlobalMaxPooling,
MinkowskiDirectMaxPoolingFunction,
)
from MinkowskiBroadcast import (
MinkowskiBroadcastFunction,
MinkowskiBroadcastAddition,
MinkowskiBroadcastMultiplication,
MinkowskiBroadcast,
MinkowskiBroadcastConcatenation,
)
from MinkowskiNonlinearity import (
MinkowskiELU,
MinkowskiHardshrink,
MinkowskiHardsigmoid,
MinkowskiHardtanh,
MinkowskiHardswish,
MinkowskiLeakyReLU,
MinkowskiLogSigmoid,
MinkowskiPReLU,
MinkowskiReLU,
MinkowskiReLU6,
MinkowskiRReLU,
MinkowskiSELU,
MinkowskiCELU,
MinkowskiGELU,
MinkowskiSigmoid,
MinkowskiSiLU,
MinkowskiSoftplus,
MinkowskiSoftshrink,
MinkowskiSoftsign,
MinkowskiTanh,
MinkowskiTanhshrink,
MinkowskiThreshold,
MinkowskiSoftmin,
MinkowskiSoftmax,
MinkowskiLogSoftmax,
MinkowskiAdaptiveLogSoftmaxWithLoss,
MinkowskiDropout,
MinkowskiAlphaDropout,
MinkowskiSinusoidal,
)
from MinkowskiNormalization import (
MinkowskiBatchNorm,
MinkowskiSyncBatchNorm,
MinkowskiInstanceNorm,
MinkowskiInstanceNormFunction,
MinkowskiStableInstanceNorm,
)
from MinkowskiPruning import MinkowskiPruning, MinkowskiPruningFunction
from MinkowskiUnion import MinkowskiUnion, MinkowskiUnionFunction
from MinkowskiInterpolation import (
MinkowskiInterpolation,
MinkowskiInterpolationFunction,
)
from MinkowskiNetwork import MinkowskiNetwork
import MinkowskiOps
from MinkowskiOps import (
MinkowskiLinear,
MinkowskiToSparseTensor,
MinkowskiToDenseTensor,
MinkowskiToFeature,
MinkowskiStackCat,
MinkowskiStackSum,
MinkowskiStackMean,
MinkowskiStackVar,
cat,
mean,
var,
to_sparse,
to_sparse_all,
dense_coordinates,
)
from MinkowskiOps import _sum as sum
import MinkowskiFunctional
import MinkowskiEngine.utils as utils
import MinkowskiEngine.modules as modules
from sparse_matrix_functions import (
spmm,
MinkowskiSPMMFunction,
MinkowskiSPMMAverageFunction,
)
if not is_cuda_available():
warnings.warn(
" ".join(
[
"The MinkowskiEngine was compiled with CPU_ONLY flag.",
"If you want to compile with CUDA support, make sure `torch.cuda.is_available()` is True when you install MinkowskiEngine.",
]
)
)
| MinkowskiEngine-master | MinkowskiEngine/__init__.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
from torch.nn import Module
from torch.autograd import Function
from MinkowskiEngineBackend._C import CoordinateMapKey
from MinkowskiSparseTensor import SparseTensor
from MinkowskiCoordinateManager import CoordinateManager
class MinkowskiUnionFunction(Function):
@staticmethod
def forward(
ctx,
in_coords_keys: list,
out_coords_key: CoordinateMapKey,
coordinate_manager: CoordinateManager,
*in_feats,
):
assert isinstance(
in_feats, (list, tuple)
), "Input must be a collection of Tensors"
assert len(in_feats) > 1, "input must be a set with at least 2 Tensors"
assert len(in_feats) == len(
in_coords_keys
), "The input features and keys must have the same length"
union_maps = coordinate_manager.union_map(in_coords_keys, out_coords_key)
out_feat = torch.zeros(
(coordinate_manager.size(out_coords_key), in_feats[0].shape[1]),
dtype=in_feats[0].dtype,
device=in_feats[0].device,
)
for in_feat, union_map in zip(in_feats, union_maps):
out_feat[union_map[1]] += in_feat[union_map[0]]
ctx.keys = (in_coords_keys, coordinate_manager)
ctx.save_for_backward(*union_maps)
return out_feat
@staticmethod
def backward(ctx, grad_out_feat):
if not grad_out_feat.is_contiguous():
grad_out_feat = grad_out_feat.contiguous()
union_maps = ctx.saved_tensors
in_coords_keys, coordinate_manager = ctx.keys
num_ch, dtype, device = (
grad_out_feat.shape[1],
grad_out_feat.dtype,
grad_out_feat.device,
)
grad_in_feats = []
for in_coords_key, union_map in zip(in_coords_keys, union_maps):
grad_in_feat = torch.zeros(
(coordinate_manager.size(in_coords_key), num_ch),
dtype=dtype,
device=device,
)
grad_in_feat[union_map[0]] = grad_out_feat[union_map[1]]
grad_in_feats.append(grad_in_feat)
return (None, None, None, *grad_in_feats)
class MinkowskiUnion(Module):
r"""Create a union of all sparse tensors and add overlapping features.
Args:
None
.. warning::
This function is experimental and the usage can be changed in the future updates.
"""
def __init__(self):
super(MinkowskiUnion, self).__init__()
self.union = MinkowskiUnionFunction()
def forward(self, *inputs):
r"""
Args:
A variable number of :attr:`MinkowskiEngine.SparseTensor`'s.
Returns:
A :attr:`MinkowskiEngine.SparseTensor` with coordinates = union of all
input coordinates, and features = sum of all features corresponding to the
coordinate.
Example::
>>> # Define inputs
>>> input1 = SparseTensor(
>>> torch.rand(N, in_channels, dtype=torch.double), coords=coords)
>>> # All inputs must share the same coordinate manager
>>> input2 = SparseTensor(
>>> torch.rand(N, in_channels, dtype=torch.double),
>>> coords=coords + 1,
>>> coords_manager=input1.coordinate_manager, # Must use same coords manager
>>> force_creation=True # The tensor stride [1, 1] already exists.
>>> )
>>> union = MinkowskiUnion()
>>> output = union(input1, iput2)
"""
assert isinstance(inputs, (list, tuple)), "The input must be a list or tuple"
for s in inputs:
assert isinstance(s, SparseTensor), "Inputs must be sparse tensors."
assert len(inputs) > 1, "input must be a set with at least 2 SparseTensors"
# Assert the same coordinate manager
ref_coordinate_manager = inputs[0].coordinate_manager
for s in inputs:
assert (
ref_coordinate_manager == s.coordinate_manager
), "Invalid coordinate manager. All inputs must have the same coordinate manager."
in_coordinate_map_key = inputs[0].coordinate_map_key
coordinate_manager = inputs[0].coordinate_manager
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
output = self.union.apply(
[input.coordinate_map_key for input in inputs],
out_coordinate_map_key,
coordinate_manager,
*[input.F for input in inputs],
)
return SparseTensor(
output,
coordinate_map_key=out_coordinate_map_key,
coordinate_manager=coordinate_manager,
)
def __repr__(self):
return self.__class__.__name__ + "()"
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiUnion.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch.nn.functional as F
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensorField import TensorField
def _wrap_tensor(input, F):
if isinstance(input, TensorField):
return TensorField(
F,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
F,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
# Activations
def threshold(input, *args, **kwargs):
return _wrap_tensor(input, F.threshold(input.F, *args, **kwargs))
def relu(input, *args, **kwargs):
return _wrap_tensor(input, F.relu(input.F, *args, **kwargs))
def hardtanh(input, *args, **kwargs):
return _wrap_tensor(input, F.hardtanh(input.F, *args, **kwargs))
def hardswish(input, *args, **kwargs):
return _wrap_tensor(input, F.hardswish(input.F, *args, **kwargs))
def relu6(input, *args, **kwargs):
return _wrap_tensor(input, F.relu6(input.F, *args, **kwargs))
def elu(input, *args, **kwargs):
return _wrap_tensor(input, F.elu(input.F, *args, **kwargs))
def selu(input, *args, **kwargs):
return _wrap_tensor(input, F.selu(input.F, *args, **kwargs))
def celu(input, *args, **kwargs):
return _wrap_tensor(input, F.celu(input.F, *args, **kwargs))
def leaky_relu(input, *args, **kwargs):
return _wrap_tensor(input, F.leaky_relu(input.F, *args, **kwargs))
def prelu(input, *args, **kwargs):
return _wrap_tensor(input, F.prelu(input.F, *args, **kwargs))
def rrelu(input, *args, **kwargs):
return _wrap_tensor(input, F.rrelu(input.F, *args, **kwargs))
def glu(input, *args, **kwargs):
return _wrap_tensor(input, F.glu(input.F, *args, **kwargs))
def gelu(input, *args, **kwargs):
return _wrap_tensor(input, F.gelu(input.F, *args, **kwargs))
def logsigmoid(input, *args, **kwargs):
return _wrap_tensor(input, F.logsigmoid(input.F, *args, **kwargs))
def hardshrink(input, *args, **kwargs):
return _wrap_tensor(input, F.hardshrink(input.F, *args, **kwargs))
def tanhshrink(input, *args, **kwargs):
return _wrap_tensor(input, F.tanhshrink(input.F, *args, **kwargs))
def softsign(input, *args, **kwargs):
return _wrap_tensor(input, F.softsign(input.F, *args, **kwargs))
def softplus(input, *args, **kwargs):
return _wrap_tensor(input, F.softplus(input.F, *args, **kwargs))
def softmin(input, *args, **kwargs):
return _wrap_tensor(input, F.softmin(input.F, *args, **kwargs))
def softmax(input, *args, **kwargs):
return _wrap_tensor(input, F.softmax(input.F, *args, **kwargs))
def softshrink(input, *args, **kwargs):
return _wrap_tensor(input, F.softshrink(input.F, *args, **kwargs))
def gumbel_softmax(input, *args, **kwargs):
return _wrap_tensor(input, F.gumbel_softmax(input.F, *args, **kwargs))
def log_softmax(input, *args, **kwargs):
return _wrap_tensor(input, F.log_softmax(input.F, *args, **kwargs))
def tanh(input, *args, **kwargs):
return _wrap_tensor(input, F.tanh(input.F, *args, **kwargs))
def sigmoid(input, *args, **kwargs):
return _wrap_tensor(input, F.sigmoid(input.F, *args, **kwargs))
def hardsigmoid(input, *args, **kwargs):
return _wrap_tensor(input, F.hardsigmoid(input.F, *args, **kwargs))
def silu(input, *args, **kwargs):
return _wrap_tensor(input, F.silu(input.F, *args, **kwargs))
# Normalization
def batch_norm(input, *args, **kwargs):
return _wrap_tensor(input, F.batch_norm(input.F, *args, **kwargs))
def normalize(input, *args, **kwargs):
return _wrap_tensor(input, F.normalize(input.F, *args, **kwargs))
# Linear
def linear(input, *args, **kwargs):
return _wrap_tensor(input, F.linear(input.F, *args, **kwargs))
# Dropouts
def dropout(input, *args, **kwargs):
return _wrap_tensor(input, F.dropout(input.F, *args, **kwargs))
def alpha_dropout(input, *args, **kwargs):
return _wrap_tensor(input, F.alpha_dropout(input.F, *args, **kwargs))
# Loss functions
def binary_cross_entropy(input, target, *args, **kwargs):
return F.binary_cross_entropy(input.F, target, *args, **kwargs)
def binary_cross_entropy_with_logits(input, target, *args, **kwargs):
return F.binary_cross_entropy_with_logits(input.F, target, *args, **kwargs)
def poisson_nll_loss(input, target, *args, **kwargs):
return F.poisson_nll_loss(input.F, target, *args, **kwargs)
def cross_entropy(input, target, *args, **kwargs):
return F.cross_entropy(input.F, target, *args, **kwargs)
def hinge_embedding_loss(input, target, *args, **kwargs):
return F.hinge_embedding_loss(input.F, target, *args, **kwargs)
def kl_div(input, target, *args, **kwargs):
return F.kl_div(input.F, target, *args, **kwargs)
def l1_loss(input, target, *args, **kwargs):
return F.l1_loss(input.F, target, *args, **kwargs)
def mse_loss(input, target, *args, **kwargs):
return F.mse_loss(input.F, target, *args, **kwargs)
def multilabel_margin_loss(input, target, *args, **kwargs):
return F.multilabel_margin_loss(input.F, target, *args, **kwargs)
def multilabel_soft_margin_loss(input, target, *args, **kwargs):
return F.multilabel_soft_margin_loss(input.F, target, *args, **kwargs)
def multi_margin_loss(input, target, *args, **kwargs):
return F.multi_margin_loss(input.F, target, *args, **kwargs)
def nll_loss(input, target, *args, **kwargs):
return F.nll_loss(input.F, target, *args, **kwargs)
def smooth_l1_loss(input, target, *args, **kwargs):
return F.smooth_l1_loss(input.F, target, *args, **kwargs)
def soft_margin_loss(input, target, *args, **kwargs):
return F.soft_margin_loss(input.F, target, *args, **kwargs)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiFunctional.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from typing import Union
import torch
import torch.nn as nn
from MinkowskiCommon import MinkowskiModuleBase
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensorField import TensorField
class MinkowskiNonlinearityBase(MinkowskiModuleBase):
MODULE = None
def __init__(self, *args, **kwargs):
super(MinkowskiNonlinearityBase, self).__init__()
self.module = self.MODULE(*args, **kwargs)
def forward(self, input):
output = self.module(input.F)
if isinstance(input, TensorField):
return TensorField(
output,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
return self.__class__.__name__ + "()"
class MinkowskiELU(MinkowskiNonlinearityBase):
MODULE = torch.nn.ELU
class MinkowskiHardshrink(MinkowskiNonlinearityBase):
MODULE = torch.nn.Hardshrink
class MinkowskiHardsigmoid(MinkowskiNonlinearityBase):
MODULE = torch.nn.Hardsigmoid
class MinkowskiHardtanh(MinkowskiNonlinearityBase):
MODULE = torch.nn.Hardtanh
class MinkowskiHardswish(MinkowskiNonlinearityBase):
MODULE = torch.nn.Hardswish
class MinkowskiLeakyReLU(MinkowskiNonlinearityBase):
MODULE = torch.nn.LeakyReLU
class MinkowskiLogSigmoid(MinkowskiNonlinearityBase):
MODULE = torch.nn.LogSigmoid
class MinkowskiPReLU(MinkowskiNonlinearityBase):
MODULE = torch.nn.PReLU
class MinkowskiReLU(MinkowskiNonlinearityBase):
MODULE = torch.nn.ReLU
class MinkowskiReLU6(MinkowskiNonlinearityBase):
MODULE = torch.nn.ReLU6
class MinkowskiRReLU(MinkowskiNonlinearityBase):
MODULE = torch.nn.RReLU
class MinkowskiSELU(MinkowskiNonlinearityBase):
MODULE = torch.nn.SELU
class MinkowskiCELU(MinkowskiNonlinearityBase):
MODULE = torch.nn.CELU
class MinkowskiGELU(MinkowskiNonlinearityBase):
MODULE = torch.nn.GELU
class MinkowskiSigmoid(MinkowskiNonlinearityBase):
MODULE = torch.nn.Sigmoid
class MinkowskiSiLU(MinkowskiNonlinearityBase):
MODULE = torch.nn.SiLU
class MinkowskiSoftplus(MinkowskiNonlinearityBase):
MODULE = torch.nn.Softplus
class MinkowskiSoftshrink(MinkowskiNonlinearityBase):
MODULE = torch.nn.Softshrink
class MinkowskiSoftsign(MinkowskiNonlinearityBase):
MODULE = torch.nn.Softsign
class MinkowskiTanh(MinkowskiNonlinearityBase):
MODULE = torch.nn.Tanh
class MinkowskiTanhshrink(MinkowskiNonlinearityBase):
MODULE = torch.nn.Tanhshrink
class MinkowskiThreshold(MinkowskiNonlinearityBase):
MODULE = torch.nn.Threshold
# Non-linear Activations (other)
class MinkowskiSoftmin(MinkowskiNonlinearityBase):
MODULE = torch.nn.Softmin
class MinkowskiSoftmax(MinkowskiNonlinearityBase):
MODULE = torch.nn.Softmax
class MinkowskiLogSoftmax(MinkowskiNonlinearityBase):
MODULE = torch.nn.LogSoftmax
class MinkowskiAdaptiveLogSoftmaxWithLoss(MinkowskiNonlinearityBase):
MODULE = torch.nn.AdaptiveLogSoftmaxWithLoss
# Dropouts
class MinkowskiDropout(MinkowskiNonlinearityBase):
MODULE = torch.nn.Dropout
class MinkowskiAlphaDropout(MinkowskiNonlinearityBase):
MODULE = torch.nn.AlphaDropout
class MinkowskiSinusoidal(MinkowskiModuleBase):
def __init__(self, in_channel, out_channel):
MinkowskiModuleBase.__init__(self)
self.in_channel = in_channel
self.out_channel = out_channel
self.kernel = nn.Parameter(torch.rand(in_channel, out_channel))
self.bias = nn.Parameter(torch.rand(1, out_channel))
self.coef = nn.Parameter(torch.rand(1, out_channel))
def forward(self, input: Union[SparseTensor, TensorField]):
out_F = torch.sin(input.F.mm(self.kernel) + self.bias) * self.coef
if isinstance(input, TensorField):
return TensorField(
out_F,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
out_F,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiNonlinearity.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import math
from typing import Union
import torch
from torch.nn import Parameter
from MinkowskiSparseTensor import SparseTensor
from MinkowskiEngineBackend._C import CoordinateMapKey, RegionType
from MinkowskiCommon import MinkowskiModuleBase
from MinkowskiKernelGenerator import KernelGenerator
class MinkowskiChannelwiseConvolution(MinkowskiModuleBase):
__slots__ = (
"in_channels",
"out_channels",
"kernel_generator",
"dimension",
"kernel",
"bias",
"conv",
)
r"""Channelwise (Depthwise) Convolution layer for a sparse tensor.
.. math::
\mathbf{x}_\mathbf{u} = \sum_{\mathbf{i} \in \mathcal{N}^D(\mathbf{u}, K) \cap
\mathcal{C}^\text{in}} W_\mathbf{i} \odot \mathbf{x}_{\mathbf{i} +
\mathbf{u}} \;\text{for} \; \mathbf{u} \in \mathcal{C}^\text{out}
where :math:`K` is the kernel size and :math:`\mathcal{N}^D(\mathbf{u}, K)
\cap \mathcal{C}^\text{in}` is the set of offsets that are at most :math:`\left
\lceil{\frac{1}{2}(K - 1)} \right \rceil` away from :math:`\mathbf{u}`
defined in :math:`\mathcal{S}^\text{in}`. :math:`\odot` indicates the
elementwise product.
.. note::
For even :math:`K`, the kernel offset :math:`\mathcal{N}^D`
implementation is different from the above definition. The offsets
range from :math:`\mathbf{i} \in [0, K)^D, \; \mathbf{i} \in
\mathbb{Z}_+^D`.
"""
def __init__(
self,
in_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
dimension=-1,
):
r"""convolution on a sparse tensor
Args:
:attr:`in_channels` (int): the number of input channels in the
input tensor.
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`bias` (bool, optional): if True, the convolution layer
has a bias.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): defines the custom kernel shape.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
"""
super(MinkowskiChannelwiseConvolution, self).__init__()
assert (
dimension > 0
), f"Invalid dimension. Please provide a valid dimension argument. dimension={dimension}"
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
dimension=dimension,
)
self.kernel_generator = kernel_generator
self.in_channels = in_channels
self.dimension = dimension
self.kernel_shape = (kernel_generator.kernel_volume, self.in_channels)
Tensor = torch.FloatTensor
self.kernel = Parameter(Tensor(*self.kernel_shape))
self.bias = Parameter(Tensor(1, in_channels)) if bias else None
self.reset_parameters()
def forward(
self,
input: SparseTensor,
coords: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None,
):
r"""
:attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a
convolution on.
:attr:`coords` ((`torch.IntTensor`, `MinkowskiEngine.CoordinateMapKey`,
`MinkowskiEngine.SparseTensor`), optional): If provided, generate
results on the provided coordinates. None by default.
"""
assert isinstance(input, SparseTensor)
assert input.D == self.dimension
assert (
self.in_channels == input.shape[1]
), f"Channel size mismatch {self.in_channels} != {input.shape[1]}"
# Create a region_offset
region_type_, region_offset_, _ = self.kernel_generator.get_kernel(
input.tensor_stride, False
)
cm = input.coordinate_manager
in_key = input.coordinate_map_key
out_key = cm.stride(in_key, self.kernel_generator.kernel_stride)
N_out = cm.size(out_key)
out_F = input._F.new(N_out, self.in_channels).zero_()
kernel_map = cm.kernel_map(
in_key,
out_key,
self.kernel_generator.kernel_stride,
self.kernel_generator.kernel_size,
self.kernel_generator.kernel_dilation,
region_type=region_type_,
region_offset=region_offset_,
)
for k, in_out in kernel_map.items():
in_out = in_out.long().to(input.device)
out_F[in_out[1]] += input.F[in_out[0]] * self.kernel[k]
if self.bias is not None:
out_F += self.bias
return SparseTensor(out_F, coordinate_map_key=out_key, coordinate_manager=cm)
def reset_parameters(self, is_transpose=False):
with torch.no_grad():
n = (
self.out_channels if is_transpose else self.in_channels
) * self.kernel_generator.kernel_volume
stdv = 1.0 / math.sqrt(n)
self.kernel.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = "(in={}, region_type={}, ".format(
self.in_channels, self.kernel_generator.region_type
)
if self.kernel_generator.region_type in [RegionType.CUSTOM]:
s += "kernel_volume={}, ".format(self.kernel_generator.kernel_volume)
else:
s += "kernel_size={}, ".format(self.kernel_generator.kernel_size)
s += "stride={}, dilation={})".format(
self.kernel_generator.kernel_stride,
self.kernel_generator.kernel_dilation,
)
return self.__class__.__name__ + s
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiChannelwiseConvolution.py |
# Copyright (c) 2021 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from typing import Union
import numpy as np
import torch
from torch.nn.modules import Module
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensor import (
COORDINATE_MANAGER_DIFFERENT_ERROR,
COORDINATE_KEY_DIFFERENT_ERROR,
)
from MinkowskiTensorField import TensorField
from MinkowskiCommon import MinkowskiModuleBase
from MinkowskiEngineBackend._C import CoordinateMapKey
class MinkowskiLinear(Module):
def __init__(self, in_features, out_features, bias=True):
super(MinkowskiLinear, self).__init__()
self.linear = torch.nn.Linear(in_features, out_features, bias=bias)
def forward(self, input: Union[SparseTensor, TensorField]):
output = self.linear(input.F)
if isinstance(input, TensorField):
return TensorField(
output,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
s = "(in_features={}, out_features={}, bias={})".format(
self.linear.in_features,
self.linear.out_features,
self.linear.bias is not None,
)
return self.__class__.__name__ + s
def _tuple_operator(*sparse_tensors, operator):
if len(sparse_tensors) == 1:
assert isinstance(sparse_tensors[0], (tuple, list))
sparse_tensors = sparse_tensors[0]
assert (
len(sparse_tensors) > 1
), f"Invalid number of inputs. The input must be at least two len(sparse_tensors) > 1"
if isinstance(sparse_tensors[0], SparseTensor):
device = sparse_tensors[0].device
coordinate_manager = sparse_tensors[0].coordinate_manager
coordinate_map_key = sparse_tensors[0].coordinate_map_key
for s in sparse_tensors:
assert isinstance(
s, SparseTensor
), "Inputs must be either SparseTensors or TensorFields."
assert (
device == s.device
), f"Device must be the same. {device} != {s.device}"
assert (
coordinate_manager == s.coordinate_manager
), COORDINATE_MANAGER_DIFFERENT_ERROR
assert coordinate_map_key == s.coordinate_map_key, (
COORDINATE_KEY_DIFFERENT_ERROR
+ str(coordinate_map_key)
+ " != "
+ str(s.coordinate_map_key)
)
tens = []
for s in sparse_tensors:
tens.append(s.F)
return SparseTensor(
operator(tens),
coordinate_map_key=coordinate_map_key,
coordinate_manager=coordinate_manager,
)
elif isinstance(sparse_tensors[0], TensorField):
device = sparse_tensors[0].device
coordinate_manager = sparse_tensors[0].coordinate_manager
coordinate_field_map_key = sparse_tensors[0].coordinate_field_map_key
for s in sparse_tensors:
assert isinstance(
s, TensorField
), "Inputs must be either SparseTensors or TensorFields."
assert (
device == s.device
), f"Device must be the same. {device} != {s.device}"
assert (
coordinate_manager == s.coordinate_manager
), COORDINATE_MANAGER_DIFFERENT_ERROR
assert coordinate_field_map_key == s.coordinate_field_map_key, (
COORDINATE_KEY_DIFFERENT_ERROR
+ str(coordinate_field_map_key)
+ " != "
+ str(s.coordinate_field_map_key)
)
tens = []
for s in sparse_tensors:
tens.append(s.F)
return TensorField(
operator(tens),
coordinate_field_map_key=coordinate_field_map_key,
coordinate_manager=coordinate_manager,
)
else:
raise ValueError(
"Invalid data type. The input must be either a list of sparse tensors or a list of tensor fields."
)
def cat(*sparse_tensors):
r"""Concatenate sparse tensors
Concatenate sparse tensor features. All sparse tensors must have the same
`coordinate_map_key` (the same coordinates). To concatenate sparse tensors
with different sparsity patterns, use SparseTensor binary operations, or
:attr:`MinkowskiEngine.MinkowskiUnion`.
Example::
>>> import MinkowskiEngine as ME
>>> sin = ME.SparseTensor(feats, coords)
>>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coordinate_mananger=sin.coordinate_manager)
>>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates
>>> sout2 = ME.cat(sin, sin2, sout) # Can concatenate multiple sparse tensors
"""
return _tuple_operator(*sparse_tensors, operator=lambda xs: torch.cat(xs, dim=-1))
def _sum(*sparse_tensors):
r"""Compute the sum of sparse tensor features
Sum all sparse tensor features. All sparse tensors must have the same
`coordinate_map_key` (the same coordinates). To sum sparse tensors with
different sparsity patterns, use SparseTensor binary operations, or
:attr:`MinkowskiEngine.MinkowskiUnion`.
Example::
>>> import MinkowskiEngine as ME
>>> sin = ME.SparseTensor(feats, coords)
>>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coordinate_manager=sin.coordinate_manager)
>>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates
>>> sout2 = ME.sum(sin, sin2, sout) # Can concatenate multiple sparse tensors
"""
def return_sum(xs):
tmp = xs[0] + xs[1]
for x in xs[2:]:
tmp += x
return tmp
return _tuple_operator(*sparse_tensors, operator=lambda xs: return_sum(xs))
def mean(*sparse_tensors):
r"""Compute the average of sparse tensor features
Sum all sparse tensor features. All sparse tensors must have the same
`coordinate_map_key` (the same coordinates). To sum sparse tensors with
different sparsity patterns, use SparseTensor binary operations, or
:attr:`MinkowskiEngine.MinkowskiUnion`.
Example::
>>> import MinkowskiEngine as ME
>>> sin = ME.SparseTensor(feats, coords)
>>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coordinate_manager=sin.coordinate_manager)
>>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates
>>> sout2 = ME.mean(sin, sin2, sout) # Can concatenate multiple sparse tensors
"""
def return_mean(xs):
tmp = xs[0] + xs[1]
for x in xs[2:]:
tmp += x
return tmp / len(xs)
return _tuple_operator(*sparse_tensors, operator=lambda xs: return_mean(xs))
def var(*sparse_tensors):
r"""Compute the variance of sparse tensor features
Sum all sparse tensor features. All sparse tensors must have the same
`coordinate_map_key` (the same coordinates). To sum sparse tensors with
different sparsity patterns, use SparseTensor binary operations, or
:attr:`MinkowskiEngine.MinkowskiUnion`.
Example::
>>> import MinkowskiEngine as ME
>>> sin = ME.SparseTensor(feats, coords)
>>> sin2 = ME.SparseTensor(feats2, coordinate_map_key=sin.coordinate_map_key, coordinate_manager=sin.coordinate_manager)
>>> sout = UNet(sin) # Returns an output sparse tensor on the same coordinates
>>> sout2 = ME.var(sin, sin2, sout) # Can concatenate multiple sparse tensors
"""
def return_var(xs):
tmp = xs[0] + xs[1]
for x in xs[2:]:
tmp += x
mean = tmp / len(xs)
var = (xs[0] - mean) ** 2
for x in xs[1:]:
var += (x - mean) ** 2
return var / len(xs)
return _tuple_operator(*sparse_tensors, operator=lambda xs: return_var(xs))
def dense_coordinates(shape: Union[list, torch.Size]):
"""
coordinates = dense_coordinates(tensor.shape)
"""
r"""
Assume the input to have BxCxD1xD2x....xDN format.
If the shape of the tensor do not change, use
"""
spatial_dim = len(shape) - 2
assert (
spatial_dim > 0
), "Invalid shape. Shape must be batch x channel x spatial dimensions."
# Generate coordinates
size = [i for i in shape]
B = size[0]
coordinates = torch.from_numpy(
np.stack(
[
s.reshape(-1)
for s in np.meshgrid(
np.linspace(0, B - 1, B),
*(np.linspace(0, s - 1, s) for s in size[2:]),
indexing="ij",
)
],
1,
)
).int()
return coordinates
def to_sparse(x: torch.Tensor, format: str = None, coordinates=None, device=None):
r"""Convert a batched tensor (dimension 0 is the batch dimension) to a SparseTensor
:attr:`x` (:attr:`torch.Tensor`): a batched tensor. The first dimension is the batch dimension.
:attr:`format` (:attr:`str`): Format of the tensor. It must include 'B' and 'C' indicating the batch and channel dimension respectively. The rest of the dimensions must be 'X'. .e.g. format="BCXX" if image data with BCHW format is used. If a 3D data with the channel at the last dimension, use format="BXXXC" indicating Batch X Height X Width X Depth X Channel. If not provided, the format will be "BCX...X".
:attr:`device`: Device the sparse tensor will be generated on. If not provided, the device of the input tensor will be used.
"""
assert x.ndim > 2, "Input has 0 spatial dimension."
assert isinstance(x, torch.Tensor)
if format is None:
format = [
"X",
] * x.ndim
format[0] = "B"
format[1] = "C"
format = "".join(format)
assert x.ndim == len(format), f"Invalid format: {format}. len(format) != x.ndim"
assert (
"B" in format and "B" == format[0] and format.count("B") == 1
), "The input must have the batch axis and the format must include 'B' indicating the batch axis."
assert (
"C" in format and format.count("C") == 1
), "The format must indicate the channel axis"
if device is None:
device = x.device
ch_dim = format.find("C")
reduced_x = torch.abs(x).sum(ch_dim)
bcoords = torch.where(reduced_x != 0)
stacked_bcoords = torch.stack(bcoords, dim=1).int()
indexing = [f"bcoords[{i}]" for i in range(len(bcoords))]
indexing.insert(ch_dim, ":")
features = torch.zeros(
(len(stacked_bcoords), x.size(ch_dim)), dtype=x.dtype, device=x.device
)
exec("features[:] = x[" + ", ".join(indexing) + "]")
return SparseTensor(features=features, coordinates=stacked_bcoords, device=device)
def to_sparse_all(dense_tensor: torch.Tensor, coordinates: torch.Tensor = None):
r"""Converts a (differentiable) dense tensor to a sparse tensor with all coordinates.
Assume the input to have BxCxD1xD2x....xDN format.
If the shape of the tensor do not change, use `dense_coordinates` to cache the coordinates.
Please refer to tests/python/dense.py for usage
Example::
>>> dense_tensor = torch.rand(3, 4, 5, 6, 7, 8) # BxCxD1xD2xD3xD4
>>> dense_tensor.requires_grad = True
>>> stensor = to_sparse(dense_tensor)
"""
spatial_dim = dense_tensor.ndim - 2
assert (
spatial_dim > 0
), "Invalid shape. Shape must be batch x channel x spatial dimensions."
if coordinates is None:
coordinates = dense_coordinates(dense_tensor.shape)
feat_tensor = dense_tensor.permute(0, *(2 + i for i in range(spatial_dim)), 1)
return SparseTensor(
feat_tensor.reshape(-1, dense_tensor.size(1)),
coordinates,
device=dense_tensor.device,
)
class MinkowskiToSparseTensor(MinkowskiModuleBase):
r"""Converts a (differentiable) dense tensor or a :attr:`MinkowskiEngine.TensorField` to a :attr:`MinkowskiEngine.SparseTensor`.
For dense tensor, the input must have the BxCxD1xD2x....xDN format.
:attr:`remove_zeros` (bool): if True, removes zero valued coordinates. If
False, use all coordinates to populate a sparse tensor. True by default.
:attr:`coordinates` (torch.Tensor): if set, use the provided coordinates
only for sparse tensor generation. Will ignore `remove_zeros`.
If the shape of the tensor do not change, use `dense_coordinates` to cache the coordinates.
Please refer to tests/python/dense.py for usage.
Example::
>>> # Differentiable dense torch.Tensor to sparse tensor.
>>> dense_tensor = torch.rand(3, 4, 11, 11, 11, 11) # BxCxD1xD2x....xDN
>>> dense_tensor.requires_grad = True
>>> # Since the shape is fixed, cache the coordinates for faster inference
>>> coordinates = dense_coordinates(dense_tensor.shape)
>>> network = nn.Sequential(
>>> # Add layers that can be applied on a regular pytorch tensor
>>> nn.ReLU(),
>>> MinkowskiToSparseTensor(coordinates=coordinates),
>>> MinkowskiConvolution(4, 5, kernel_size=3, dimension=4),
>>> MinkowskiBatchNorm(5),
>>> MinkowskiReLU(),
>>> )
>>> for i in range(5):
>>> print(f"Iteration: {i}")
>>> soutput = network(dense_tensor)
>>> soutput.F.sum().backward()
>>> soutput.dense(shape=dense_tensor.shape)
"""
def __init__(self, remove_zeros=True, coordinates: torch.Tensor = None):
MinkowskiModuleBase.__init__(self)
self.remove_zeros = remove_zeros
self.coordinates = coordinates
def forward(self, input: Union[TensorField, torch.Tensor]):
if isinstance(input, TensorField):
return input.sparse()
elif isinstance(input, torch.Tensor):
# dense tensor to sparse tensor conversion
if self.remove_zeros and self.coordinates is not None:
return to_sparse(input)
else:
return to_sparse_all(input, self.coordinates)
else:
raise ValueError(
"Unsupported type. Only TensorField and torch.Tensor are supported"
)
def __repr__(self):
return self.__class__.__name__ + "()"
class MinkowskiToDenseTensor(MinkowskiModuleBase):
r"""Converts a (differentiable) sparse tensor to a torch tensor.
The return type has the BxCxD1xD2x....xDN format.
Example::
>>> dense_tensor = torch.rand(3, 4, 11, 11, 11, 11) # BxCxD1xD2x....xDN
>>> dense_tensor.requires_grad = True
>>> # Since the shape is fixed, cache the coordinates for faster inference
>>> coordinates = dense_coordinates(dense_tensor.shape)
>>> network = nn.Sequential(
>>> # Add layers that can be applied on a regular pytorch tensor
>>> nn.ReLU(),
>>> MinkowskiToSparseTensor(coordinates=coordinates),
>>> MinkowskiConvolution(4, 5, stride=2, kernel_size=3, dimension=4),
>>> MinkowskiBatchNorm(5),
>>> MinkowskiReLU(),
>>> MinkowskiConvolutionTranspose(5, 6, stride=2, kernel_size=3, dimension=4),
>>> MinkowskiToDenseTensor(
>>> dense_tensor.shape
>>> ), # must have the same tensor stride.
>>> )
>>> for i in range(5):
>>> print(f"Iteration: {i}")
>>> output = network(dense_tensor) # returns a regular pytorch tensor
>>> output.sum().backward()
"""
def __init__(self, shape: torch.Size = None):
MinkowskiModuleBase.__init__(self)
self.shape = shape
def forward(self, input: SparseTensor):
# dense tensor to sparse tensor conversion
dense_tensor, _, _ = input.dense(shape=self.shape)
return dense_tensor
def __repr__(self):
return self.__class__.__name__ + "()"
class MinkowskiToFeature(MinkowskiModuleBase):
r"""
Extract features from a sparse tensor and returns a pytorch tensor.
Can be used to to make a network construction simpler.
Example::
>>> net = nn.Sequential(MinkowskiConvolution(...), MinkowskiGlobalMaxPooling(...), MinkowskiToFeature(), nn.Linear(...))
>>> torch_tensor = net(sparse_tensor)
"""
def forward(self, x: SparseTensor):
assert isinstance(
x, (SparseTensor, TensorField)
), "Invalid input type for MinkowskiToFeature"
return x.F
class MinkowskiStackCat(torch.nn.Sequential):
def forward(self, x):
return cat([module(x) for module in self])
class MinkowskiStackSum(torch.nn.Sequential):
def forward(self, x):
return _sum([module(x) for module in self])
class MinkowskiStackMean(torch.nn.Sequential):
def forward(self, x):
return mean([module(x) for module in self])
class MinkowskiStackVar(torch.nn.Sequential):
def forward(self, x):
return var([module(x) for module in self])
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiOps.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from typing import Union
import torch
from torch.autograd import Function
import MinkowskiEngineBackend._C as _C
from MinkowskiEngineBackend._C import CoordinateMapKey, PoolingMode
from MinkowskiSparseTensor import SparseTensor, _get_coordinate_map_key
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiKernelGenerator import KernelGenerator, save_ctx
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
import MinkowskiEngine as ME
class MinkowskiLocalPoolingFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
pooling_mode: PoolingMode,
kernel_generator: KernelGenerator,
in_coordinate_map_key: CoordinateMapKey,
out_coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
):
if out_coordinate_map_key is None:
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
input_features = input_features.contiguous()
ctx.input_features = input_features
ctx = save_ctx(
ctx,
kernel_generator,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
)
ctx.pooling_mode = pooling_mode
fw_fn = get_minkowski_function("LocalPoolingForward", input_features)
out_feat, num_nonzero = fw_fn(
ctx.input_features,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
pooling_mode,
ctx.in_coordinate_map_key,
ctx.out_coordinate_map_key,
ctx.coordinate_manager._manager,
)
ctx.num_nonzero = num_nonzero
return out_feat
@staticmethod
def backward(ctx, grad_out_feat):
grad_out_feat = grad_out_feat.contiguous()
bw_fn = get_minkowski_function("LocalPoolingBackward", grad_out_feat)
grad_in_feat = bw_fn(
ctx.input_features,
grad_out_feat,
ctx.num_nonzero,
ctx.kernel_generator.kernel_size,
ctx.kernel_generator.kernel_stride,
ctx.kernel_generator.kernel_dilation,
ctx.kernel_generator.region_type,
ctx.kernel_generator.region_offsets,
ctx.pooling_mode,
ctx.in_coordinate_map_key,
ctx.out_coordinate_map_key,
ctx.coordinate_manager._manager,
)
return (
grad_in_feat,
None,
None,
None,
None,
None,
)
class MinkowskiPoolingBase(MinkowskiModuleBase):
__slots__ = (
"is_transpose",
"kernel_generator",
"pooling_mode",
"dimension",
"pooling",
)
def __init__(
self,
kernel_size,
stride=1,
dilation=1,
kernel_generator=None,
is_transpose=False,
pooling_mode=PoolingMode.LOCAL_AVG_POOLING,
dimension=-1,
):
super(MinkowskiPoolingBase, self).__init__()
assert (
dimension > 0
), f"Invalid dimension. Please provide a valid dimension argument. dimension={dimension}"
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
dimension=dimension,
)
self.is_transpose = is_transpose
self.kernel_generator = kernel_generator
self.pooling_mode = pooling_mode
self.dimension = dimension
self.pooling = MinkowskiLocalPoolingFunction()
def forward(
self,
input: SparseTensor,
coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None,
):
r"""
:attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a
convolution on.
:attr:`coordinates` ((`torch.IntTensor`, `MinkowskiEngine.CoordsKey`,
`MinkowskiEngine.SparseTensor`), optional): If provided, generate
results on the provided coordinates. None by default.
"""
assert isinstance(input, SparseTensor)
assert input.D == self.dimension
# Get a new coordinate map key or extract one from the coordinates
out_coordinate_map_key = _get_coordinate_map_key(input, coordinates)
outfeat = self.pooling.apply(
input.F,
self.pooling_mode,
self.kernel_generator,
input.coordinate_map_key,
out_coordinate_map_key,
input._manager,
)
return SparseTensor(
outfeat,
coordinate_map_key=out_coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
s = "(kernel_size={}, stride={}, dilation={})".format(
self.kernel_generator.kernel_size,
self.kernel_generator.kernel_stride,
self.kernel_generator.kernel_dilation,
)
return self.__class__.__name__ + s
class MinkowskiAvgPooling(MinkowskiPoolingBase):
r"""Average input features within a kernel.
.. math::
\mathbf{y}_\mathbf{u} = \frac{1}{|\mathcal{N}^D(\mathbf{u},
\mathcal{C}^\text{in})|} \sum_{\mathbf{i} \in \mathcal{N}^D(\mathbf{u},
\mathcal{C}^\text{in})} \mathbf{x}_{\mathbf{u} + \mathbf{i}}
\; \text{for} \; \mathbf{u} \in \mathcal{C}^\text{out}
For each output :math:`\mathbf{u}` in :math:`\mathcal{C}^\text{out}`,
average input features.
.. note::
An average layer first computes the cardinality of the input features,
the number of input features for each output, and divide the sum of the
input features by the cardinality. For a dense tensor, the cardinality
is a constant, the volume of a kernel. However, for a sparse tensor, the
cardinality varies depending on the number of input features per output.
Thus, the average pooling for a sparse tensor is not equivalent to the
conventional average pooling layer for a dense tensor. Please refer to
the :attr:`MinkowskiSumPooling` for the equivalent layer.
.. note::
The engine will generate the in-out mapping corresponding to a
pooling function faster if the kernel sizes is equal to the stride
sizes, e.g. `kernel_size = [2, 1], stride = [2, 1]`.
If you use a U-network architecture, use the transposed version of
the same function for up-sampling. e.g. `pool =
MinkowskiSumPooling(kernel_size=2, stride=2, D=D)`, then use the
`unpool = MinkowskiPoolingTranspose(kernel_size=2, stride=2, D=D)`.
"""
def __init__(
self,
kernel_size=-1,
stride=1,
dilation=1,
kernel_generator=None,
dimension=None,
):
r"""a high-dimensional sparse average pooling layer.
Args:
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): define custom kernel shape.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. warning::
Custom kernel shapes are not supported when kernel_size == stride.
"""
is_transpose = False
MinkowskiPoolingBase.__init__(
self,
kernel_size,
stride,
dilation,
kernel_generator,
is_transpose,
pooling_mode=PoolingMode.LOCAL_AVG_POOLING,
dimension=dimension,
)
class MinkowskiSumPooling(MinkowskiPoolingBase):
r"""Sum all input features within a kernel.
.. math::
\mathbf{y}_\mathbf{u} = \sum_{\mathbf{i} \in \mathcal{N}^D(\mathbf{u},
\mathcal{C}^\text{in})} \mathbf{x}_{\mathbf{u} + \mathbf{i}}
\; \text{for} \; \mathbf{u} \in \mathcal{C}^\text{out}
For each output :math:`\mathbf{u}` in :math:`\mathcal{C}^\text{out}`,
average input features.
.. note::
An average layer first computes the cardinality of the input features,
the number of input features for each output, and divide the sum of the
input features by the cardinality. For a dense tensor, the cardinality
is a constant, the volume of a kernel. However, for a sparse tensor, the
cardinality varies depending on the number of input features per output.
Thus, averaging the input features with the cardinality may not be
equivalent to the conventional average pooling for a dense tensor.
This layer provides an alternative that does not divide the sum by the
cardinality.
.. note::
The engine will generate the in-out mapping corresponding to a
pooling function faster if the kernel sizes is equal to the stride
sizes, e.g. `kernel_size = [2, 1], stride = [2, 1]`.
If you use a U-network architecture, use the transposed version of
the same function for up-sampling. e.g. `pool =
MinkowskiSumPooling(kernel_size=2, stride=2, D=D)`, then use the
`unpool = MinkowskiPoolingTranspose(kernel_size=2, stride=2, D=D)`.
"""
def __init__(
self, kernel_size, stride=1, dilation=1, kernel_generator=None, dimension=None
):
r"""a high-dimensional sum pooling layer
Args:
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): define custom kernel shape.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. warning::
Custom kernel shapes are not supported when kernel_size == stride.
"""
is_transpose = False
MinkowskiPoolingBase.__init__(
self,
kernel_size,
stride,
dilation,
kernel_generator,
is_transpose,
pooling_mode=PoolingMode.LOCAL_SUM_POOLING,
dimension=dimension,
)
class MinkowskiMaxPooling(MinkowskiPoolingBase):
r"""A max pooling layer for a sparse tensor.
.. math::
y^c_\mathbf{u} = \max_{\mathbf{i} \in \mathcal{N}^D(\mathbf{u},
\mathcal{C}^\text{in})} x^c_{\mathbf{u} + \mathbf{i}} \; \text{for} \;
\mathbf{u} \in \mathcal{C}^\text{out}
where :math:`y^c_\mathbf{u}` is a feature at channel :math:`c` and a
coordinate :math:`\mathbf{u}`.
.. note::
The engine will generate the in-out mapping corresponding to a
pooling function faster if the kernel sizes is equal to the stride
sizes, e.g. `kernel_size = [2, 1], stride = [2, 1]`.
If you use a U-network architecture, use the transposed version of
the same function for up-sampling. e.g. `pool =
MinkowskiSumPooling(kernel_size=2, stride=2, D=D)`, then use the
`unpool = MinkowskiPoolingTranspose(kernel_size=2, stride=2, D=D)`.
"""
def __init__(
self, kernel_size, stride=1, dilation=1, kernel_generator=None, dimension=None
):
r"""a high-dimensional max pooling layer for sparse tensors.
Args:
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): define custom kernel shape.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. warning::
Custom kernel shapes are not supported when kernel_size == stride.
"""
MinkowskiPoolingBase.__init__(
self,
kernel_size,
stride,
dilation,
kernel_generator,
is_transpose=False,
pooling_mode=PoolingMode.LOCAL_MAX_POOLING,
dimension=dimension,
)
class MinkowskiLocalPoolingTransposeFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
pooling_mode: PoolingMode,
kernel_generator: KernelGenerator,
in_coordinate_map_key: CoordinateMapKey,
out_coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
):
if out_coordinate_map_key is None:
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
input_features = input_features.contiguous()
ctx.input_features = input_features
ctx = save_ctx(
ctx,
kernel_generator,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
)
ctx.pooling_mode = pooling_mode
fw_fn = get_minkowski_function("LocalPoolingTransposeForward", input_features)
out_feat, num_nonzero = fw_fn(
ctx.input_features,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
kernel_generator.expand_coordinates,
pooling_mode,
ctx.in_coordinate_map_key,
ctx.out_coordinate_map_key,
ctx.coordinate_manager._manager,
)
ctx.num_nonzero = num_nonzero
return out_feat
@staticmethod
def backward(ctx, grad_out_feat):
grad_out_feat = grad_out_feat.contiguous()
bw_fn = get_minkowski_function("LocalPoolingTransposeBackward", grad_out_feat)
grad_in_feat = bw_fn(
ctx.input_features,
grad_out_feat,
ctx.num_nonzero,
ctx.kernel_generator.kernel_size,
ctx.kernel_generator.kernel_stride,
ctx.kernel_generator.kernel_dilation,
ctx.kernel_generator.region_type,
ctx.kernel_generator.region_offsets,
ctx.pooling_mode,
ctx.in_coordinate_map_key,
ctx.out_coordinate_map_key,
ctx.coordinate_manager._manager,
)
return (
grad_in_feat,
None,
None,
None,
None,
None,
)
class MinkowskiPoolingTranspose(MinkowskiPoolingBase):
r"""A pooling transpose layer for a sparse tensor.
Unpool the features and divide it by the number of non zero elements that
contributed.
"""
def __init__(
self,
kernel_size,
stride,
dilation=1,
kernel_generator=None,
expand_coordinates=False,
dimension=None,
):
r"""a high-dimensional unpooling layer for sparse tensors.
Args:
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): define custom kernel shape.
:attr:`expand_coordinates` (bool, optional): Force generation of
new coordinates. When True, the output coordinates will be the
outer product of the kernel shape and the input coordinates.
`False` by default.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
"""
is_transpose = True
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
expand_coordinates=expand_coordinates,
dimension=dimension,
)
MinkowskiPoolingBase.__init__(
self,
kernel_size,
stride,
dilation,
kernel_generator,
is_transpose,
dimension=dimension,
)
self.pooling = MinkowskiLocalPoolingTransposeFunction()
class MinkowskiGlobalPoolingFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
pooling_mode: PoolingMode,
in_coordinate_map_key: CoordinateMapKey,
out_coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
):
if out_coordinate_map_key is None:
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
input_features = input_features.contiguous()
ctx.input_features = input_features
ctx.in_coords_key = in_coordinate_map_key
ctx.out_coords_key = out_coordinate_map_key
ctx.coordinate_manager = coordinate_manager
ctx.pooling_mode = pooling_mode
fw_fn = get_minkowski_function("GlobalPoolingForward", input_features)
out_feat, num_nonzero = fw_fn(
input_features,
pooling_mode,
ctx.in_coords_key,
ctx.out_coords_key,
ctx.coordinate_manager._manager,
)
ctx.num_nonzero = num_nonzero
return out_feat
@staticmethod
def backward(ctx, grad_out_feat):
grad_out_feat = grad_out_feat.contiguous()
bw_fn = get_minkowski_function("GlobalPoolingBackward", grad_out_feat)
grad_in_feat = bw_fn(
ctx.input_features,
grad_out_feat,
ctx.num_nonzero,
ctx.pooling_mode,
ctx.in_coords_key,
ctx.out_coords_key,
ctx.coordinate_manager._manager,
)
return grad_in_feat, None, None, None, None, None
class MinkowskiGlobalPooling(MinkowskiModuleBase):
r"""Pool all input features to one output."""
def __init__(
self, mode: PoolingMode = PoolingMode.GLOBAL_AVG_POOLING_PYTORCH_INDEX
):
r"""Reduces sparse coords into points at origin, i.e. reduce each point
cloud into a point at the origin, returning batch_size number of points
[[0, 0, ..., 0], [0, 0, ..., 1],, [0, 0, ..., 2]] where the last elem
of the coords is the batch index. The reduction function should be
provided as the mode.
Args:
:attr:`mode` (PoolingMode): Reduction function mode. E.g.
`PoolingMode.GLOBAL_SUM_POOLING_DEFAULT`
"""
super(MinkowskiGlobalPooling, self).__init__()
assert isinstance(
mode, PoolingMode
), f"Mode must be an instance of PoolingMode. mode={mode}"
self.pooling_mode = mode
self.pooling = MinkowskiGlobalPoolingFunction()
def forward(
self,
input: SparseTensor,
coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None,
):
# Get a new coordinate map key or extract one from the coordinates
out_coordinate_map_key = _get_coordinate_map_key(input, coordinates)
output = self.pooling.apply(
input.F,
self.pooling_mode,
input.coordinate_map_key,
out_coordinate_map_key,
input._manager,
)
return SparseTensor(
output,
coordinate_map_key=out_coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
return self.__class__.__name__ + f"(mode={str(self.pooling_mode)})"
class MinkowskiGlobalSumPooling(MinkowskiGlobalPooling):
def __init__(self, mode=PoolingMode.GLOBAL_SUM_POOLING_PYTORCH_INDEX):
r"""Reduces sparse coords into points at origin, i.e. reduce each point
cloud into a point at the origin, returning batch_size number of points
[[0, 0, ..., 0], [0, 0, ..., 1],, [0, 0, ..., 2]] where the last elem
of the coords is the batch index.
"""
MinkowskiGlobalPooling.__init__(self, mode=mode)
class MinkowskiGlobalAvgPooling(MinkowskiGlobalPooling):
def __init__(self, mode=PoolingMode.GLOBAL_AVG_POOLING_PYTORCH_INDEX):
r"""Reduces sparse coords into points at origin, i.e. reduce each point
cloud into a point at the origin, returning batch_size number of points
[[0, 0, ..., 0], [0, 0, ..., 1],, [0, 0, ..., 2]] where the last elem
of the coords is the batch index.
"""
MinkowskiGlobalPooling.__init__(self, mode=mode)
class MinkowskiGlobalMaxPooling(MinkowskiGlobalPooling):
r"""Max pool all input features to one output feature at the origin.
.. math::
\mathbf{y} = \max_{\mathbf{i} \in
\mathcal{C}^\text{in}} \mathbf{x}_{\mathbf{i}}
"""
def __init__(self, mode=PoolingMode.GLOBAL_MAX_POOLING_PYTORCH_INDEX):
r"""Reduces sparse coords into points at origin, i.e. reduce each point
cloud into a point at the origin, returning batch_size number of points
[[0, 0, ..., 0], [0, 0, ..., 1],, [0, 0, ..., 2]] where the last elem
of the coords is the batch index.
"""
MinkowskiGlobalPooling.__init__(self, mode=mode)
def forward(
self,
input,
coordinates: Union[torch.IntTensor, CoordinateMapKey, SparseTensor] = None,
):
# Get a new coordinate map key or extract one from the coordinates
if isinstance(input, ME.TensorField):
in_coordinate_map_key = input.coordinate_field_map_key
out_coordinate_map_key = CoordinateMapKey(
input.coordinate_field_map_key.get_coordinate_size()
)
else:
in_coordinate_map_key = input.coordinate_map_key
out_coordinate_map_key = _get_coordinate_map_key(input, coordinates)
output = self.pooling.apply(
input.F,
self.pooling_mode,
in_coordinate_map_key,
out_coordinate_map_key,
input._manager,
)
return SparseTensor(
output,
coordinate_map_key=out_coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
class MinkowskiDirectMaxPoolingFunction(Function):
@staticmethod
def forward(
ctx,
in_map: torch.Tensor,
out_map: torch.Tensor,
in_feat: torch.Tensor,
out_nrows: int,
is_sorted: bool = False,
):
out_feat, max_mask = _C.direct_max_pool_fw(
in_map, out_map, in_feat, out_nrows, is_sorted
)
ctx.in_nrows = in_feat.size(0)
ctx.save_for_backward(max_mask)
return out_feat
@staticmethod
def backward(ctx, grad_out_feat):
grad_out_feat = grad_out_feat.contiguous()
max_mask = ctx.saved_tensors[0]
grad = _C.direct_max_pool_bw(grad_out_feat, max_mask, ctx.in_nrows)
return (
None,
None,
grad,
None,
None,
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiPooling.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
from torch.nn import Module
from torch.autograd import Function
from MinkowskiEngineBackend._C import CoordinateMapKey
from MinkowskiSparseTensor import SparseTensor
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
from MinkowskiCoordinateManager import CoordinateManager
class MinkowskiPruningFunction(Function):
@staticmethod
def forward(
ctx,
in_feat: torch.Tensor,
mask: torch.Tensor,
in_coords_key: CoordinateMapKey,
out_coords_key: CoordinateMapKey = None,
coords_manager: CoordinateManager = None,
):
ctx.in_coords_key = in_coords_key
ctx.out_coords_key = out_coords_key
ctx.coords_manager = coords_manager
in_feat = in_feat.contiguous()
fw_fn = get_minkowski_function("PruningForward", in_feat)
return fw_fn(
in_feat,
mask,
ctx.in_coords_key,
ctx.out_coords_key,
ctx.coords_manager._manager,
)
@staticmethod
def backward(ctx, grad_out_feat: torch.Tensor):
grad_out_feat = grad_out_feat.contiguous()
bw_fn = get_minkowski_function("PruningBackward", grad_out_feat)
grad_in_feat = bw_fn(
grad_out_feat,
ctx.in_coords_key,
ctx.out_coords_key,
ctx.coords_manager._manager,
)
return grad_in_feat, None, None, None, None
class MinkowskiPruning(MinkowskiModuleBase):
r"""Remove specified coordinates from a :attr:`MinkowskiEngine.SparseTensor`.
"""
def __init__(self):
super(MinkowskiPruning, self).__init__()
self.pruning = MinkowskiPruningFunction()
def forward(self, input: SparseTensor, mask: torch.Tensor):
r"""
Args:
:attr:`input` (:attr:`MinkowskiEnigne.SparseTensor`): a sparse tensor
to remove coordinates from.
:attr:`mask` (:attr:`torch.BoolTensor`): mask vector that specifies
which one to keep. Coordinates with False will be removed.
Returns:
A :attr:`MinkowskiEngine.SparseTensor` with C = coordinates
corresponding to `mask == True` F = copy of the features from `mask ==
True`.
Example::
>>> # Define inputs
>>> input = SparseTensor(feats, coords=coords)
>>> # Any boolean tensor can be used as the filter
>>> mask = torch.rand(feats.size(0)) < 0.5
>>> pruning = MinkowskiPruning()
>>> output = pruning(input, mask)
"""
assert isinstance(input, SparseTensor)
out_coords_key = CoordinateMapKey(
input.coordinate_map_key.get_coordinate_size()
)
output = self.pruning.apply(
input.F, mask, input.coordinate_map_key, out_coords_key, input._manager
)
return SparseTensor(
output, coordinate_map_key=out_coords_key, coordinate_manager=input._manager
)
def __repr__(self):
return self.__class__.__name__ + "()"
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiPruning.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.nn import Module
from torch.autograd import Function
from MinkowskiSparseTensor import SparseTensor
from MinkowskiTensorField import TensorField
from MinkowskiPooling import MinkowskiGlobalAvgPooling
from MinkowskiBroadcast import (
MinkowskiBroadcastAddition,
MinkowskiBroadcastMultiplication,
)
from MinkowskiEngineBackend._C import (
CoordinateMapKey,
BroadcastMode,
PoolingMode,
)
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
class MinkowskiBatchNorm(Module):
r"""A batch normalization layer for a sparse tensor.
See the pytorch :attr:`torch.nn.BatchNorm1d` for more details.
"""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
):
super(MinkowskiBatchNorm, self).__init__()
self.bn = torch.nn.BatchNorm1d(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
)
def forward(self, input):
output = self.bn(input.F)
if isinstance(input, TensorField):
return TensorField(
output,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
def __repr__(self):
s = "({}, eps={}, momentum={}, affine={}, track_running_stats={})".format(
self.bn.num_features,
self.bn.eps,
self.bn.momentum,
self.bn.affine,
self.bn.track_running_stats,
)
return self.__class__.__name__ + s
class MinkowskiSyncBatchNorm(MinkowskiBatchNorm):
r"""A batch normalization layer with multi GPU synchronization."""
def __init__(
self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True,
process_group=None,
):
Module.__init__(self)
self.bn = torch.nn.SyncBatchNorm(
num_features,
eps=eps,
momentum=momentum,
affine=affine,
track_running_stats=track_running_stats,
process_group=process_group,
)
def forward(self, input):
output = self.bn(input.F)
if isinstance(input, TensorField):
return TensorField(
output,
coordinate_field_map_key=input.coordinate_field_map_key,
coordinate_manager=input.coordinate_manager,
quantization_mode=input.quantization_mode,
)
else:
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
@classmethod
def convert_sync_batchnorm(cls, module, process_group=None):
r"""Helper function to convert
:attr:`MinkowskiEngine.MinkowskiBatchNorm` layer in the model to
:attr:`MinkowskiEngine.MinkowskiSyncBatchNorm` layer.
Args:
module (nn.Module): containing module
process_group (optional): process group to scope synchronization,
default is the whole world
Returns:
The original module with the converted
:attr:`MinkowskiEngine.MinkowskiSyncBatchNorm` layer
Example::
>>> # Network with MinkowskiBatchNorm layer
>>> module = torch.nn.Sequential(
>>> MinkowskiLinear(20, 100),
>>> MinkowskiBatchNorm1d(100)
>>> ).cuda()
>>> # creating process group (optional)
>>> # process_ids is a list of int identifying rank ids.
>>> process_group = torch.distributed.new_group(process_ids)
>>> sync_bn_module = convert_sync_batchnorm(module, process_group)
"""
module_output = module
if isinstance(module, MinkowskiBatchNorm):
module_output = MinkowskiSyncBatchNorm(
module.bn.num_features,
module.bn.eps,
module.bn.momentum,
module.bn.affine,
module.bn.track_running_stats,
process_group,
)
if module.bn.affine:
with torch.no_grad():
module_output.bn.weight = module.bn.weight
module_output.bn.bias = module.bn.bias
module_output.bn.running_mean = module.bn.running_mean
module_output.bn.running_var = module.bn.running_var
module_output.bn.num_batches_tracked = module.bn.num_batches_tracked
if hasattr(module, "qconfig"):
module_output.bn.qconfig = module.bn.qconfig
for name, child in module.named_children():
module_output.add_module(
name, cls.convert_sync_batchnorm(child, process_group)
)
del module
return module_output
class MinkowskiInstanceNormFunction(Function):
@staticmethod
def forward(
ctx,
in_feat: torch.Tensor,
in_coords_key: CoordinateMapKey,
glob_coords_key: CoordinateMapKey = None,
coords_manager: CoordinateManager = None,
gpooling_mode=PoolingMode.GLOBAL_AVG_POOLING_KERNEL,
):
if glob_coords_key is None:
glob_coords_key = CoordinateMapKey(in_coords_key.get_coordinate_size())
gpool_avg_forward = get_minkowski_function("GlobalPoolingForward", in_feat)
broadcast_forward = get_minkowski_function("BroadcastForward", in_feat)
mean, num_nonzero = gpool_avg_forward(
in_feat,
gpooling_mode,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
# X - \mu
centered_feat = broadcast_forward(
in_feat,
-mean,
BroadcastMode.ELEMENTWISE_ADDITON,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
# Variance = 1/N \sum (X - \mu) ** 2
variance, num_nonzero = gpool_avg_forward(
centered_feat ** 2,
gpooling_mode,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
# norm_feat = (X - \mu) / \sigma
inv_std = 1 / (variance + 1e-8).sqrt()
norm_feat = broadcast_forward(
centered_feat,
inv_std,
BroadcastMode.ELEMENTWISE_MULTIPLICATION,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
ctx.saved_vars = (in_coords_key, glob_coords_key, coords_manager, gpooling_mode)
# For GPU tensors, must use save_for_backward.
ctx.save_for_backward(inv_std, norm_feat)
return norm_feat
@staticmethod
def backward(ctx, out_grad):
# https://kevinzakka.github.io/2016/09/14/batch_normalization/
in_coords_key, glob_coords_key, coords_manager, gpooling_mode = ctx.saved_vars
# To prevent the memory leakage, compute the norm again
inv_std, norm_feat = ctx.saved_tensors
gpool_avg_forward = get_minkowski_function("GlobalPoolingForward", out_grad)
broadcast_forward = get_minkowski_function("BroadcastForward", out_grad)
# 1/N \sum dout
mean_dout, num_nonzero = gpool_avg_forward(
out_grad,
gpooling_mode,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
# 1/N \sum (dout * out)
mean_dout_feat, num_nonzero = gpool_avg_forward(
out_grad * norm_feat,
gpooling_mode,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
# out * 1/N \sum (dout * out)
feat_mean_dout_feat = broadcast_forward(
norm_feat,
mean_dout_feat,
BroadcastMode.ELEMENTWISE_MULTIPLICATION,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
unnorm_din = broadcast_forward(
out_grad - feat_mean_dout_feat,
-mean_dout,
BroadcastMode.ELEMENTWISE_ADDITON,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
norm_din = broadcast_forward(
unnorm_din,
inv_std,
BroadcastMode.ELEMENTWISE_MULTIPLICATION,
in_coords_key,
glob_coords_key,
coords_manager._manager,
)
return norm_din, None, None, None, None
class MinkowskiStableInstanceNorm(MinkowskiModuleBase):
def __init__(self, num_features):
Module.__init__(self)
self.num_features = num_features
self.eps = 1e-6
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.mean_in = MinkowskiGlobalAvgPooling()
self.glob_sum = MinkowskiBroadcastAddition()
self.glob_sum2 = MinkowskiBroadcastAddition()
self.glob_mean = MinkowskiGlobalAvgPooling()
self.glob_times = MinkowskiBroadcastMultiplication()
self.reset_parameters()
def __repr__(self):
s = f"(nchannels={self.num_features})"
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, x):
neg_mean_in = self.mean_in(
SparseTensor(-x.F, coords_key=x.coords_key, coords_manager=x.coords_man)
)
centered_in = self.glob_sum(x, neg_mean_in)
temp = SparseTensor(
centered_in.F ** 2,
coordinate_map_key=centered_in.coordinate_map_key,
coordinate_manager=centered_in.coordinate_manager,
)
var_in = self.glob_mean(temp)
instd_in = SparseTensor(
1 / (var_in.F + self.eps).sqrt(),
coordinate_map_key=var_in.coordinate_map_key,
coordinate_manager=var_in.coordinate_manager,
)
x = self.glob_times(self.glob_sum2(x, neg_mean_in), instd_in)
return SparseTensor(
x.F * self.weight + self.bias,
coordinate_map_key=x.coordinate_map_key,
coordinate_manager=x.coordinate_manager,
)
class MinkowskiInstanceNorm(MinkowskiModuleBase):
r"""A instance normalization layer for a sparse tensor."""
def __init__(self, num_features):
r"""
Args:
num_features (int): the dimension of the input feautres.
mode (GlobalPoolingModel, optional): The internal global pooling computation mode.
"""
Module.__init__(self)
self.num_features = num_features
self.weight = nn.Parameter(torch.ones(1, num_features))
self.bias = nn.Parameter(torch.zeros(1, num_features))
self.reset_parameters()
self.inst_norm = MinkowskiInstanceNormFunction()
def __repr__(self):
s = f"(nchannels={self.num_features})"
return self.__class__.__name__ + s
def reset_parameters(self):
self.weight.data.fill_(1)
self.bias.data.zero_()
def forward(self, input: SparseTensor):
assert isinstance(input, SparseTensor)
output = self.inst_norm.apply(
input.F, input.coordinate_map_key, None, input.coordinate_manager
)
output = output * self.weight + self.bias
return SparseTensor(
output,
coordinate_map_key=input.coordinate_map_key,
coordinate_manager=input.coordinate_manager,
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiNormalization.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import numpy as np
from collections.abc import Sequence
from typing import Union, List, Tuple
import warnings
import torch
from MinkowskiCommon import convert_to_int_list, convert_to_int_tensor
import MinkowskiEngineBackend._C as _C
from MinkowskiEngineBackend._C import (
CoordinateMapKey,
GPUMemoryAllocatorType,
CoordinateMapType,
MinkowskiAlgorithm,
RegionType,
)
CPU_COUNT = os.cpu_count()
if "OMP_NUM_THREADS" in os.environ:
CPU_COUNT = int(os.environ["OMP_NUM_THREADS"])
_allocator_type = GPUMemoryAllocatorType.PYTORCH
_coordinate_map_type = (
CoordinateMapType.CUDA if _C.is_cuda_available() else CoordinateMapType.CPU
)
_minkowski_algorithm = MinkowskiAlgorithm.DEFAULT
def set_coordinate_map_type(coordinate_map_type: CoordinateMapType):
r"""Set the default coordinate map type.
The MinkowskiEngine automatically set the coordinate_map_type to CUDA if
a NVIDIA GPU is available. To control the
"""
global _coordinate_map_type
_coordinate_map_type = coordinate_map_type
def set_gpu_allocator(backend: GPUMemoryAllocatorType):
r"""Set the GPU memory allocator
By default, the Minkowski Engine will use the pytorch memory pool to
allocate temporary GPU memory slots. This allows the pytorch backend to
effectively reuse the memory pool shared between the pytorch backend and
the Minkowski Engine. It tends to allow training with larger batch sizes
given a fixed GPU memory. However, pytorch memory manager tend to be slower
than allocating GPU directly using raw CUDA calls.
By default, the Minkowski Engine uses
:attr:`ME.GPUMemoryAllocatorType.PYTORCH` for memory management.
Example::
>>> import MinkowskiEngine as ME
>>> # Set the GPU memory manager backend to raw CUDA calls
>>> ME.set_gpu_allocator(ME.GPUMemoryAllocatorType.CUDA)
>>> # Set the GPU memory manager backend to the pytorch c10 allocator
>>> ME.set_gpu_allocator(ME.GPUMemoryAllocatorType.PYTORCH)
"""
assert isinstance(
backend, GPUMemoryAllocatorType
), f"Input must be an instance of GPUMemoryAllocatorType not {backend}"
global _allocator_type
_allocator_type = backend
def set_memory_manager_backend(backend: GPUMemoryAllocatorType):
r"""Alias for set_gpu_allocator. Deprecated and will be removed."""
warnings.warn(
"`set_memory_manager_backend` has been deprecated. Use `set_gpu_allocator` instead."
)
set_gpu_allocator(backend)
class CoordsManager:
def __init__(*args, **kwargs):
raise DeprecationWarning(
"`CoordsManager` has been deprecated. Use `CoordinateManager` instead."
)
class CoordinateManager:
def __init__(
self,
D: int = 0,
num_threads: int = -1,
coordinate_map_type: CoordinateMapType = None,
allocator_type: GPUMemoryAllocatorType = None,
minkowski_algorithm: MinkowskiAlgorithm = None,
):
r"""
:attr:`D`: The order, or dimension of the coordinates.
"""
global _coordinate_map_type, _allocator_type, _minkowski_algorithm
if D < 1:
raise ValueError(f"Invalid rank D > 0, D = {D}.")
if num_threads < 0:
num_threads = min(CPU_COUNT, 20)
if coordinate_map_type is None:
coordinate_map_type = _coordinate_map_type
if allocator_type is None:
allocator_type = _allocator_type
if minkowski_algorithm is None:
minkowski_algorithm = _minkowski_algorithm
postfix = ""
if coordinate_map_type == CoordinateMapType.CPU:
postfix = "CPU"
else:
assert (
_C.is_cuda_available()
), "The MinkowskiEngine was compiled with CPU_ONLY flag. If you want to compile with CUDA support, make sure `torch.cuda.is_available()` is True when you install MinkowskiEngine."
postfix = "GPU" + (
"_default" if allocator_type == GPUMemoryAllocatorType.CUDA else "_c10"
)
self.D = D
self.minkowski_algorithm = minkowski_algorithm
self._CoordinateManagerClass = getattr(_C, "CoordinateMapManager" + postfix)
self._manager = self._CoordinateManagerClass(minkowski_algorithm, num_threads)
# TODO: insert without remap, unique_map, inverse_mapa
#
# def insert() -> CoordinateMapKey
def insert_and_map(
self,
coordinates: torch.Tensor,
tensor_stride: Union[int, Sequence, np.ndarray] = 1,
string_id: str = "",
) -> Tuple[CoordinateMapKey, Tuple[torch.IntTensor, torch.IntTensor]]:
r"""create a new coordinate map and returns (key, (map, inverse_map)).
:attr:`coordinates`: `torch.Tensor` (Int tensor. `CUDA` if
coordinate_map_type == `CoordinateMapType.GPU`) that defines the
coordinates.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
Example::
>>> manager = CoordinateManager(D=1)
>>> coordinates = torch.IntTensor([[0, 0], [0, 0], [0, 1], [0, 2]])
>>> key, (unique_map, inverse_map) = manager.insert(coordinates, [1])
>>> print(key) # key is tensor_stride, string_id [1]:""
>>> torch.all(coordinates[unique_map] == manager.get_coordinates(key)) # True
>>> torch.all(coordinates == coordinates[unique_map][inverse_map]) # True
"""
tensor_stride = convert_to_int_list(tensor_stride, self.D)
return self._manager.insert_and_map(coordinates, tensor_stride, string_id)
def insert_field(
self,
coordinates: torch.Tensor,
tensor_stride: Sequence,
string_id: str = "",
) -> Tuple[CoordinateMapKey, Tuple[torch.IntTensor, torch.IntTensor]]:
r"""create a new coordinate map and returns
:attr:`coordinates`: `torch.FloatTensor` (`CUDA` if coordinate_map_type
== `CoordinateMapType.GPU`) that defines the coordinates.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
Example::
>>> manager = CoordinateManager(D=1)
>>> coordinates = torch.FloatTensor([[0, 0.1], [0, 2.3], [0, 1.2], [0, 2.4]])
>>> key, (unique_map, inverse_map) = manager.insert(coordinates, [1])
>>> print(key) # key is tensor_stride, string_id [1]:""
>>> torch.all(coordinates[unique_map] == manager.get_coordinates(key)) # True
>>> torch.all(coordinates == coordinates[unique_map][inverse_map]) # True
"""
return self._manager.insert_field(coordinates, tensor_stride, string_id)
def field_to_sparse_insert_and_map(
self,
field_map_key: CoordinateMapKey,
sparse_tensor_stride: Union[int, Sequence, np.ndarray],
sparse_tensor_string_id: str = "",
) -> Tuple[CoordinateMapKey, Tuple[torch.IntTensor, torch.IntTensor]]:
r"""Create a sparse tensor coordinate map with the tensor stride.
:attr:`field_map_key` (`CoordinateMapKey`): field map that a new sparse
tensor will be created from.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
:attr:`string_id` (`str`): string id of the new sparse tensor coordinate map key.
Example::
>>> manager = CoordinateManager(D=1)
>>> coordinates = torch.FloatTensor([[0, 0.1], [0, 2.3], [0, 1.2], [0, 2.4]])
>>> key, (unique_map, inverse_map) = manager.insert(coordinates, [1])
"""
return self._manager.field_to_sparse_insert_and_map(
field_map_key, sparse_tensor_stride, sparse_tensor_string_id
)
def exists_field_to_sparse(
self, field_map_key: CoordinateMapKey, sparse_map_key: CoordinateMapKey
):
return self._manager.exists_field_to_sparse(field_map_key, sparse_map_key)
def field_to_sparse_keys(self, field_map_key: CoordinateMapKey):
return self._manager.field_to_sparse_keys(field_map_key.get_key())
def get_field_to_sparse_map(
self, field_map_key: CoordinateMapKey, sparse_map_key: CoordinateMapKey
):
return self._manager.get_field_to_sparse_map(field_map_key, sparse_map_key)
def field_to_sparse_map(
self, field_map_key: CoordinateMapKey, sparse_map_key: CoordinateMapKey
):
return self._manager.field_to_sparse_map(field_map_key, sparse_map_key)
def stride(
self,
coordinate_map_key: CoordinateMapKey,
stride: Union[int, Sequence, np.ndarray, torch.Tensor],
string_id: str = "",
) -> CoordinateMapKey:
r"""Generate a new coordinate map and returns the key.
:attr:`coordinate_map_key` (:attr:`MinkowskiEngine.CoordinateMapKey`):
input map to generate the strided map from.
:attr:`stride`: stride size.
"""
stride = convert_to_int_list(stride, self.D)
return self._manager.stride(coordinate_map_key, stride, string_id)
def origin(self) -> CoordinateMapKey:
return self._manager.origin()
def origin_field(self) -> CoordinateMapKey:
return self._manager.origin_field()
def size(self, coordinate_map_key: CoordinateMapKey) -> int:
return self._manager.size(coordinate_map_key)
# def transposed_stride(
# self,
# coords_key: CoordsKey,
# stride: Union[int, Sequence, np.ndarray, torch.Tensor],
# kernel_size: Union[int, Sequence, np.ndarray, torch.Tensor],
# dilation: Union[int, Sequence, np.ndarray, torch.Tensor],
# force_creation: bool = False,
# ):
# assert isinstance(coords_key, CoordsKey)
# stride = convert_to_int_list(stride, self.D)
# kernel_size = convert_to_int_list(kernel_size, self.D)
# dilation = convert_to_int_list(dilation, self.D)
# region_type = 0
# region_offset = torch.IntTensor()
# strided_key = CoordsKey(self.D)
# tensor_stride = coords_key.getTensorStride()
# strided_key.setTensorStride([int(t / s) for t, s in zip(tensor_stride, stride)])
# strided_key.setKey(
# self.CPPCoordsManager.createTransposedStridedRegionCoords(
# coords_key.getKey(),
# coords_key.getTensorStride(),
# stride,
# kernel_size,
# dilation,
# region_type,
# region_offset,
# force_creation,
# )
# )
# return strided_key
def _get_coordinate_map_key(self, key_or_tensor_strides) -> CoordinateMapKey:
r"""Helper function that retrieves the first coordinate map key for the given tensor stride."""
assert isinstance(key_or_tensor_strides, CoordinateMapKey) or isinstance(
key_or_tensor_strides, (Sequence, np.ndarray, torch.IntTensor, int)
), f"The input must be either a CoordinateMapKey or tensor_stride of type (int, list, tuple, array, Tensor). Invalid: {key_or_tensor_strides}"
if isinstance(key_or_tensor_strides, CoordinateMapKey):
# Do nothing and return the input
return key_or_tensor_strides
else:
tensor_strides = convert_to_int_list(key_or_tensor_strides, self.D)
keys = self._manager.get_coordinate_map_keys(tensor_strides)
assert len(keys) > 0
return keys[0]
def get_coordinates(self, coords_key_or_tensor_strides) -> torch.Tensor:
key = self._get_coordinate_map_key(coords_key_or_tensor_strides)
return self._manager.get_coordinates(key)
def get_coordinate_field(self, coords_key_or_tensor_strides) -> torch.Tensor:
key = self._get_coordinate_map_key(coords_key_or_tensor_strides)
return self._manager.get_coordinate_field(key)
def number_of_unique_batch_indices(self) -> int:
return self._manager.origin_map_size()
def get_unique_coordinate_map_key(
self, tensor_stride: Union[int, list]
) -> CoordinateMapKey:
"""
Returns a unique coordinate_map_key for a given tensor stride.
:attr:`tensor_stride` (`list`): a list of `D` elements that defines the
tensor stride for the new order-`D + 1` sparse tensor.
"""
return self._manager.get_random_string_id(tensor_stride, "")
def get_kernel_map(
self,
in_key: CoordinateMapKey,
out_key: CoordinateMapKey,
stride=1,
kernel_size=3,
dilation=1,
region_type=RegionType.HYPER_CUBE,
region_offset=None,
is_transpose=False,
is_pool=False,
) -> dict:
r"""Alias of :attr:`CoordinateManager.kernel_map`. Will be deprecated in the next version."""
warnings.warn(
"`get_kernel_map` will be deprecated. Please use `kernel_map` instead."
)
return self.kernel_map(
in_key,
out_key,
stride,
kernel_size,
dilation,
region_type,
region_offset,
is_transpose,
is_pool,
)
def kernel_map(
self,
in_key: CoordinateMapKey,
out_key: CoordinateMapKey,
stride=1,
kernel_size=3,
dilation=1,
region_type=RegionType.HYPER_CUBE,
region_offset=None,
is_transpose=False,
is_pool=False,
) -> dict:
r"""Get kernel in-out maps for the specified coords keys or tensor strides.
returns dict{kernel_index: in_out_tensor} where in_out_tensor[0] is the input row indices that correspond to in_out_tensor[1], which is the row indices for output.
"""
# region type 1 iteration with kernel_size 1 is invalid
if isinstance(kernel_size, torch.Tensor):
assert (kernel_size > 0).all(), f"Invalid kernel size: {kernel_size}"
if (kernel_size == 1).all() == 1:
region_type = RegionType.HYPER_CUBE
elif isinstance(kernel_size, int):
assert kernel_size > 0, f"Invalid kernel size: {kernel_size}"
if kernel_size == 1:
region_type = RegionType.HYPER_CUBE
in_key = self._get_coordinate_map_key(in_key)
out_key = self._get_coordinate_map_key(out_key)
if region_offset is None:
region_offset = torch.IntTensor()
kernel_map = self._manager.kernel_map(
in_key,
out_key,
convert_to_int_list(kernel_size, self.D), #
convert_to_int_list(stride, self.D), #
convert_to_int_list(dilation, self.D), #
region_type,
region_offset,
is_transpose,
is_pool,
)
return kernel_map
def origin_map(self, key: CoordinateMapKey):
return self._manager.origin_map(key)
def origin_field_map(self, key: CoordinateMapKey):
return self._manager.origin_field_map(key)
def stride_map(self, in_key: CoordinateMapKey, stride_key: CoordinateMapKey):
return self._manager.stride_map(in_key, stride_key)
def union_map(self, in_keys: list, out_key):
return self._manager.union_map(in_keys, out_key)
def interpolation_map_weight(
self,
key: CoordinateMapKey,
samples: torch.Tensor,
):
return self._manager.interpolation_map_weight(samples, key)
# def get_union_map(self, in_keys: List[CoordsKey], out_key: CoordsKey):
# r"""Generates a union of coordinate sets and returns the mapping from input sets to the new output coordinates.
# Args:
# :attr:`in_keys` (List[CoordsKey]): A list of coordinate keys to
# create a union on.
# :attr:`out_key` (CoordsKey): the placeholder for the coords key of
# the generated union coords hash map.
# Returns:
# :attr:`in_maps` (List[Tensor[int]]): A list of long tensors that contain mapping from inputs to the union output. Please see the example for more details.
# :attr:`out_maps` (List[Tensor[int]]): A list of long tensors that contain a mapping from input to the union output. Please see the example for more details.
# Example::
# >>> # Adding two sparse tensors: A, B
# >>> out_key = CoordsKey(coords_man.D)
# >>> ins, outs = coords_man.get_union_map((A.coords_key, B.coords_key), out_key)
# >>> N = coords_man.get_coords_size_by_coords_key(out_key)
# >>> out_F = torch.zeros((N, A.F.size(1)), dtype=A.dtype)
# >>> out_F[outs[0]] = A.F[ins[0]]
# >>> out_F[outs[1]] += B.F[ins[1]]
# """
# return self.CPPCoordsManager.getUnionMap(
# [key.CPPCoordsKey for key in in_keys], out_key.CPPCoordsKey
# )
# def permute_label(
# self, label, max_label, target_tensor_stride, label_tensor_stride=1
# ):
# if target_tensor_stride == label_tensor_stride:
# return label
# label_coords_key = self._get_coordinate_map_key(label_tensor_stride)
# target_coords_key = self._get_coordinate_map_key(target_tensor_stride)
# permutation = self.get_mapping_by_coords_key(
# label_coords_key, target_coords_key
# )
# nrows = self.get_coords_size_by_coords_key(target_coords_key)
# label = label.contiguous().numpy()
# permutation = permutation.numpy()
# counter = np.zeros((nrows, max_label), dtype="int32")
# np.add.at(counter, (permutation, label), 1)
# return torch.from_numpy(np.argmax(counter, 1))
def __repr__(self):
return (
self._CoordinateManagerClass.__name__
+ "(\n"
+ str(self._manager)
+ f"\talgorithm={self.minkowski_algorithm}\n )\n"
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiCoordinateManager.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import math
from typing import Union
import torch
from torch.autograd import Function
from torch.nn import Parameter
from MinkowskiEngineBackend._C import CoordinateMapKey, RegionType, ConvolutionMode
from MinkowskiSparseTensor import SparseTensor, _get_coordinate_map_key
from MinkowskiCommon import (
MinkowskiModuleBase,
get_minkowski_function,
)
from MinkowskiCoordinateManager import CoordinateManager
from MinkowskiKernelGenerator import KernelGenerator
class MinkowskiConvolutionFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
kernel_weights: torch.Tensor,
kernel_generator: KernelGenerator,
convolution_mode: ConvolutionMode,
in_coordinate_map_key: CoordinateMapKey,
out_coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
):
if out_coordinate_map_key is None:
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
input_features = input_features.contiguous()
ctx.input_features = input_features
ctx.kernel_weights = kernel_weights
ctx.misc = [
kernel_generator,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
]
fw_fn = get_minkowski_function("ConvolutionForward", input_features)
return fw_fn(
ctx.input_features,
kernel_weights,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
kernel_generator.expand_coordinates,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager._manager,
)
@staticmethod
def backward(ctx, grad_out_feat: torch.Tensor):
grad_out_feat = grad_out_feat.contiguous()
(
kernel_generator,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
) = ctx.misc
bw_fn = get_minkowski_function("ConvolutionBackward", grad_out_feat)
grad_in_feat, grad_kernel = bw_fn(
ctx.input_features,
grad_out_feat,
ctx.kernel_weights,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager._manager,
)
return (
grad_in_feat,
grad_kernel,
None,
None,
None,
None,
None,
)
class MinkowskiConvolutionTransposeFunction(Function):
@staticmethod
def forward(
ctx,
input_features: torch.Tensor,
kernel_weights: torch.Tensor,
kernel_generator: KernelGenerator,
convolution_mode: ConvolutionMode,
in_coordinate_map_key: CoordinateMapKey,
out_coordinate_map_key: CoordinateMapKey = None,
coordinate_manager: CoordinateManager = None,
):
if out_coordinate_map_key is None:
out_coordinate_map_key = CoordinateMapKey(
in_coordinate_map_key.get_coordinate_size()
)
input_features = input_features.contiguous()
ctx.input_features = input_features
ctx.kernel_weights = kernel_weights
ctx.misc = (
kernel_generator,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
)
fw_fn = get_minkowski_function("ConvolutionTransposeForward", input_features)
return fw_fn(
ctx.input_features,
kernel_weights,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
kernel_generator.expand_coordinates,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager._manager,
)
@staticmethod
def backward(ctx, grad_out_feat: torch.Tensor):
grad_out_feat = grad_out_feat.contiguous()
(
kernel_generator,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager,
) = ctx.misc
bw_fn = get_minkowski_function("ConvolutionTransposeBackward", grad_out_feat)
grad_in_feat, grad_kernel = bw_fn(
ctx.input_features,
grad_out_feat,
ctx.kernel_weights,
kernel_generator.kernel_size,
kernel_generator.kernel_stride,
kernel_generator.kernel_dilation,
kernel_generator.region_type,
kernel_generator.region_offsets,
convolution_mode,
in_coordinate_map_key,
out_coordinate_map_key,
coordinate_manager._manager,
)
return (
grad_in_feat,
grad_kernel,
None,
None,
None,
None,
None,
)
class MinkowskiConvolutionBase(MinkowskiModuleBase):
__slots__ = (
"in_channels",
"out_channels",
"is_transpose",
"kernel_generator",
"dimension",
"use_mm",
"kernel",
"bias",
"conv",
)
def __init__(
self,
in_channels,
out_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
is_transpose=False, # only the base class has this argument
expand_coordinates=False,
convolution_mode=ConvolutionMode.DEFAULT,
dimension=-1,
):
r"""
.. note::
When the kernel generator is provided, all kernel related arguments
(kernel_size, stride, dilation) will be ignored.
"""
super(MinkowskiConvolutionBase, self).__init__()
assert (
dimension > 0
), f"Invalid dimension. Please provide a valid dimension argument. dimension={dimension}"
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
expand_coordinates=expand_coordinates,
dimension=dimension,
)
else:
kernel_generator.expand_coordinates = expand_coordinates
self.is_transpose = is_transpose
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_generator = kernel_generator
self.dimension = dimension
self.use_mm = False # use matrix multiplication when kernel_volume is 1
Tensor = torch.FloatTensor
if (
self.kernel_generator.kernel_volume == 1
and self.kernel_generator.requires_strided_coordinates
):
kernel_shape = (self.in_channels, self.out_channels)
self.use_mm = True
else:
kernel_shape = (
self.kernel_generator.kernel_volume,
self.in_channels,
self.out_channels,
)
self.kernel = Parameter(Tensor(*kernel_shape))
self.bias = Parameter(Tensor(1, out_channels)) if bias else None
self.convolution_mode = convolution_mode
self.conv = (
MinkowskiConvolutionTransposeFunction()
if is_transpose
else MinkowskiConvolutionFunction()
)
def forward(
self,
input: SparseTensor,
coordinates: Union[torch.Tensor, CoordinateMapKey, SparseTensor] = None,
):
r"""
:attr:`input` (`MinkowskiEngine.SparseTensor`): Input sparse tensor to apply a
convolution on.
:attr:`coordinates` ((`torch.IntTensor`, `MinkowskiEngine.CoordinateMapKey`,
`MinkowskiEngine.SparseTensor`), optional): If provided, generate
results on the provided coordinates. None by default.
"""
assert isinstance(input, SparseTensor)
assert input.D == self.dimension
if self.use_mm:
# If the kernel_size == 1, the convolution is simply a matrix
# multiplication
out_coordinate_map_key = input.coordinate_map_key
outfeat = input.F.mm(self.kernel)
else:
# Get a new coordinate_map_key or extract one from the coords
out_coordinate_map_key = _get_coordinate_map_key(
input, coordinates, self.kernel_generator.expand_coordinates
)
outfeat = self.conv.apply(
input.F,
self.kernel,
self.kernel_generator,
self.convolution_mode,
input.coordinate_map_key,
out_coordinate_map_key,
input._manager,
)
if self.bias is not None:
outfeat += self.bias
return SparseTensor(
outfeat,
coordinate_map_key=out_coordinate_map_key,
coordinate_manager=input._manager,
)
def reset_parameters(self, is_transpose=False):
with torch.no_grad():
n = (
self.out_channels if is_transpose else self.in_channels
) * self.kernel_generator.kernel_volume
stdv = 1.0 / math.sqrt(n)
self.kernel.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def __repr__(self):
s = "(in={}, out={}, ".format(
self.in_channels,
self.out_channels,
)
if self.kernel_generator.region_type in [RegionType.CUSTOM]:
s += "region_type={}, kernel_volume={}, ".format(
self.kernel_generator.region_type, self.kernel_generator.kernel_volume
)
else:
s += "kernel_size={}, ".format(self.kernel_generator.kernel_size)
s += "stride={}, dilation={})".format(
self.kernel_generator.kernel_stride,
self.kernel_generator.kernel_dilation,
)
return self.__class__.__name__ + s
class MinkowskiConvolution(MinkowskiConvolutionBase):
r"""Convolution layer for a sparse tensor.
.. math::
\mathbf{x}_\mathbf{u} = \sum_{\mathbf{i} \in \mathcal{N}^D(\mathbf{u}, K,
\mathcal{C}^\text{in})} W_\mathbf{i} \mathbf{x}_{\mathbf{i} +
\mathbf{u}} \;\text{for} \; \mathbf{u} \in \mathcal{C}^\text{out}
where :math:`K` is the kernel size and :math:`\mathcal{N}^D(\mathbf{u}, K,
\mathcal{C}^\text{in})` is the set of offsets that are at most :math:`\left
\lceil{\frac{1}{2}(K - 1)} \right \rceil` away from :math:`\mathbf{u}`
definied in :math:`\mathcal{S}^\text{in}`.
.. note::
For even :math:`K`, the kernel offset :math:`\mathcal{N}^D`
implementation is different from the above definition. The offsets
range from :math:`\mathbf{i} \in [0, K)^D, \; \mathbf{i} \in
\mathbb{Z}_+^D`.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
expand_coordinates=False,
convolution_mode=ConvolutionMode.DEFAULT,
dimension=None,
):
r"""convolution on a sparse tensor
Args:
:attr:`in_channels` (int): the number of input channels in the
input tensor.
:attr:`out_channels` (int): the number of output channels in the
output tensor.
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size of the
convolution layer. If non-identity is used, the output coordinates
will be at least :attr:`stride` :math:`\times` :attr:`tensor_stride`
away. When a list is given, the length must be D; each element will
be used for stride size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`bias` (bool, optional): if True, the convolution layer
has a bias.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): defines custom kernel shape.
:attr:`expand_coordinates` (bool, optional): Force generation of
new coordinates. When True, the output coordinates will be the
outer product of the kernel shape and the input coordinates.
`False` by default.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
"""
MinkowskiConvolutionBase.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
dilation,
bias,
kernel_generator,
is_transpose=False,
expand_coordinates=expand_coordinates,
convolution_mode=convolution_mode,
dimension=dimension,
)
self.reset_parameters()
class MinkowskiConvolutionTranspose(MinkowskiConvolutionBase):
r"""A generalized sparse transposed convolution or deconvolution layer."""
def __init__(
self,
in_channels,
out_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
expand_coordinates=False,
convolution_mode=ConvolutionMode.DEFAULT,
dimension=None,
):
r"""a generalized sparse transposed convolution layer.
Args:
:attr:`in_channels` (int): the number of input channels in the
input tensor.
:attr:`out_channels` (int): the number of output channels in the
output tensor.
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size that defines
upsampling rate. If non-identity is used, the output coordinates
will be :attr:`tensor_stride` / :attr:`stride` apart. When a list is
given, the length must be D; each element will be used for stride
size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`bias` (bool, optional): if True, the convolution layer
has a bias.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): defines custom kernel shape.
:attr:`expand_coordinates` (bool, optional): Force generation of
new coordinates. When True, the output coordinates will be the
outer product of the kernel shape and the input coordinates.
`False` by default.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. note:
TODO: support `kernel_size` > `stride`.
"""
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
dimension=dimension,
)
MinkowskiConvolutionBase.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
dilation,
bias,
kernel_generator,
is_transpose=True,
expand_coordinates=expand_coordinates,
convolution_mode=convolution_mode,
dimension=dimension,
)
self.reset_parameters(True)
class MinkowskiGenerativeConvolutionTranspose(MinkowskiConvolutionBase):
r"""A generalized sparse transposed convolution or deconvolution layer that
generates new coordinates.
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=-1,
stride=1,
dilation=1,
bias=False,
kernel_generator=None,
convolution_mode=ConvolutionMode.DEFAULT,
dimension=None,
):
r"""a generalized sparse transposed convolution layer that creates new coordinates.
Please refer to `Generative Sparse Detection Networks for 3D Single-shot Object Detection <https://arxiv.org/abs/2006.12356>`_ for more detail. Also, please cite the following paper if you use this function.
>> @inproceedings{gwak2020gsdn,
>> title={Generative Sparse Detection Networks for 3D Single-shot Object Detection},
>> author={Gwak, JunYoung and Choy, Christopher B and Savarese, Silvio},
>> booktitle={European conference on computer vision},
>> year={2020}
>> }
Args:
:attr:`in_channels` (int): the number of input channels in the
input tensor.
:attr:`out_channels` (int): the number of output channels in the
output tensor.
:attr:`kernel_size` (int, optional): the size of the kernel in the
output tensor. If not provided, :attr:`region_offset` should be
:attr:`RegionType.CUSTOM` and :attr:`region_offset` should be a 2D
matrix with size :math:`N\times D` such that it lists all :math:`N`
offsets in D-dimension.
:attr:`stride` (int, or list, optional): stride size that defines
upsampling rate. If non-identity is used, the output coordinates
will be :attr:`tensor_stride` / :attr:`stride` apart. When a list is
given, the length must be D; each element will be used for stride
size for the specific axis.
:attr:`dilation` (int, or list, optional): dilation size for the
convolution kernel. When a list is given, the length must be D and
each element is an axis specific dilation. All elements must be > 0.
:attr:`bias` (bool, optional): if True, the convolution layer
has a bias.
:attr:`kernel_generator` (:attr:`MinkowskiEngine.KernelGenerator`,
optional): defines custom kernel shape.
:attr:`expand_coordinates` (bool, optional): Force generation of
new coordinates. When True, the output coordinates will be the
outer product of the kernel shape and the input coordinates.
`False` by defaul.
:attr:`dimension` (int): the spatial dimension of the space where
all the inputs and the network are defined. For example, images are
in a 2D space, meshes and 3D shapes are in a 3D space.
.. note:
TODO: support `kernel_size` > `stride`.
"""
if kernel_generator is None:
kernel_generator = KernelGenerator(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
expand_coordinates=True,
dimension=dimension,
)
else:
kernel_generator.expand_coordinates = True
MinkowskiConvolutionBase.__init__(
self,
in_channels,
out_channels,
kernel_size,
stride,
dilation,
bias,
kernel_generator,
is_transpose=True,
expand_coordinates=True,
convolution_mode=convolution_mode,
dimension=dimension,
)
self.reset_parameters(True)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiConvolution.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from collections.abc import Sequence
import numpy as np
from typing import Union
import torch
from torch.nn import Module
import MinkowskiEngineBackend._C as MEB
StrideType = Union[int, Sequence, np.ndarray, torch.IntTensor]
def convert_to_int_list(
arg: Union[int, Sequence, np.ndarray, torch.Tensor], dimension: int
):
if isinstance(arg, list):
assert len(arg) == dimension
return arg
if isinstance(arg, (Sequence, np.ndarray, torch.Tensor)):
tmp = [i for i in arg]
assert len(tmp) == dimension
elif np.isscalar(arg): # Assume that it is a scalar
tmp = [int(arg) for i in range(dimension)]
else:
raise ValueError("Input must be a scalar or a sequence")
return tmp
def convert_to_int_tensor(
arg: Union[int, Sequence, np.ndarray, torch.IntTensor], dimension: int
):
if isinstance(arg, torch.IntTensor):
assert arg.numel() == dimension
return arg
if isinstance(arg, (Sequence, np.ndarray)):
tmp = torch.IntTensor([i for i in arg])
assert tmp.numel() == dimension
elif np.isscalar(arg): # Assume that it is a scalar
tmp = torch.IntTensor([int(arg) for i in range(dimension)])
else:
raise ValueError("Input must be a scalar or a sequence")
return tmp
def prep_args(
tensor_stride: Union[int, Sequence, np.ndarray, torch.IntTensor],
stride: Union[int, Sequence, np.ndarray, torch.IntTensor],
kernel_size: Union[int, Sequence, np.ndarray, torch.IntTensor],
dilation: Union[int, Sequence, np.ndarray, torch.IntTensor],
region_type: Union[int, MEB.RegionType],
D=-1,
):
assert torch.prod(
kernel_size > 0
), f"kernel_size must be a positive integer, provided {kernel_size}"
assert D > 0, f"dimension must be a positive integer, {D}"
tensor_stride = convert_to_int_tensor(tensor_stride, D)
stride = convert_to_int_tensor(stride, D)
kernel_size = convert_to_int_tensor(kernel_size, D)
dilation = convert_to_int_tensor(dilation, D)
region_type = int(region_type)
return (
tensor_stride,
stride,
kernel_size,
dilation,
region_type,
)
def get_postfix(tensor: torch.Tensor):
postfix = "GPU" if tensor.is_cuda else "CPU"
return postfix
class MinkowskiModuleBase(Module):
pass
def get_minkowski_function(name, variable):
fn_name = name + get_postfix(variable)
if hasattr(MEB, fn_name):
return getattr(MEB, fn_name)
else:
if variable.is_cuda:
raise ValueError(
f"Function {fn_name} not available. Please compile MinkowskiEngine with `torch.cuda.is_available()` is `True`."
)
else:
raise ValueError(f"Function {fn_name} not available.")
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiCommon.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import math
from collections import namedtuple
from collections.abc import Sequence
from functools import reduce
import numpy as np
from typing import Union
import torch
from MinkowskiCommon import convert_to_int_list
from MinkowskiEngineBackend._C import CoordinateMapKey, RegionType
from MinkowskiCoordinateManager import CoordinateManager
def get_kernel_volume(region_type, kernel_size, region_offset, axis_types, dimension):
"""
when center is True, the custom region_offset will be centered at the
origin. Currently, for HYPER_CUBE, HYPER_CROSS with odd kernel sizes cannot
use center=False.
"""
if region_type == RegionType.HYPER_CUBE:
assert reduce(
lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
), "kernel_size must be positive"
assert (
region_offset is None
), "Region offset must be None when region_type is given"
assert axis_types is None, "Axis types must be None when region_type is given"
# Typical convolution kernel
# Convolution kernel with even numbered kernel size not defined.
kernel_volume = torch.prod(torch.IntTensor(kernel_size)).item()
elif region_type == RegionType.HYPER_CROSS:
assert reduce(
lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
), "kernel_size must be positive"
assert (
torch.IntTensor(kernel_size) % 2
).prod().item() == 1, "kernel_size must be odd for region_type HYPER_CROSS"
# 0th: itself, (1, 2) for 0th dim neighbors, (3, 4) for 1th dim ...
kernel_volume = (torch.sum(torch.IntTensor(kernel_size) - 1) + 1).item()
# elif region_type == RegionType.HYBRID:
# assert reduce(
# lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
# ), "kernel_size must be positive"
# assert (
# region_offset is None
# ), "region_offset must be None when region_type is HYBRID"
# kernel_size_list = kernel_size.tolist()
# kernel_volume = 1
# # First HYPER_CUBE
# for axis_type, curr_kernel_size, d in zip(
# axis_types, kernel_size_list, range(dimension)
# ):
# if axis_type == RegionType.HYPER_CUBE:
# kernel_volume *= curr_kernel_size
# # Second, HYPER_CROSS
# for axis_type, curr_kernel_size, d in zip(
# axis_types, kernel_size_list, range(dimension)
# ):
# if axis_type == RegionType.HYPER_CROSS:
# kernel_volume += curr_kernel_size - 1
elif region_type == RegionType.CUSTOM:
assert (
region_offset.numel() > 0
), "region_offset must be non empty when region_type is CUSTOM"
assert (
region_offset.size(1) == dimension
), "region_offset must have the same dimension as the network"
kernel_volume = int(region_offset.size(0))
else:
raise NotImplementedError()
return kernel_volume
def convert_region_type(
region_type: RegionType,
tensor_stride: Union[Sequence, np.ndarray, torch.IntTensor],
kernel_size: Union[Sequence, np.ndarray, torch.IntTensor],
up_stride: Union[Sequence, np.ndarray, torch.IntTensor],
dilation: Union[Sequence, np.ndarray, torch.IntTensor],
region_offset: Union[Sequence, np.ndarray, torch.IntTensor],
axis_types: Union[Sequence, np.ndarray, torch.IntTensor],
dimension: int,
center: bool = True,
):
"""
when center is True, the custom region_offset will be centered at the
origin. Currently, for HYPER_CUBE, HYPER_CROSS with odd kernel sizes cannot
use center=False.
up_stride: stride for conv_transpose, otherwise set it as 1
"""
if region_type == RegionType.HYPER_CUBE:
if isinstance(region_offset, torch.Tensor):
assert (
region_offset.numel() == 0
), "Region offset must be empty when region_type is given"
else:
assert (
region_offset is None
), "Region offset must be None when region_type is given"
assert axis_types is None, "Axis types must be None when region_type is given"
# Typical convolution kernel
assert reduce(
lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
), "kernel_size must be positive"
# assert torch.unique(dilation).numel() == 1
kernel_volume = reduce(lambda k1, k2: k1 * k2, kernel_size)
elif region_type == RegionType.HYPER_CROSS:
assert reduce(
lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
), "kernel_size must be positive"
assert (
kernel_size % 2
).prod() == 1, "kernel_size must be odd for region_type HYPER_CROSS"
# 0th: itself, (1, 2) for 0th dim neighbors, (3, 4) for 1th dim ...
kernel_volume = (
reduce(lambda k1, k2: k1 + k2, map(lambda k: k - 1, kernel_size)) + 1
)
elif region_type == RegionType.HYBRID:
assert reduce(
lambda k1, k2: k1 > 0 and k2 > 0, kernel_size
), "kernel_size must be positive"
if isinstance(region_offset, torch.Tensor):
assert (
region_offset.numel() == 0
), "Region offset must be empty when region_type is given"
else:
assert (
region_offset is None
), "Region offset must be None when region_type is given"
region_offset = [
[
0,
]
* dimension
]
kernel_size_list = kernel_size.tolist()
# First HYPER_CUBE
for axis_type, curr_kernel_size, d in zip(
axis_types, kernel_size_list, range(dimension)
):
new_offset = []
if axis_type == RegionType.HYPER_CUBE:
for offset in region_offset:
for curr_offset in range(curr_kernel_size):
off_center = (
int(math.floor((curr_kernel_size - 1) / 2)) if center else 0
)
offset = offset.copy() # Do not modify the original
# Exclude the coord (0, 0, ..., 0)
if curr_offset == off_center:
continue
offset[d] = (
(curr_offset - off_center)
* dilation[d]
* (tensor_stride[d] / up_stride[d])
)
new_offset.append(offset)
region_offset.extend(new_offset)
# Second, HYPER_CROSS
for axis_type, curr_kernel_size, d in zip(
axis_types, kernel_size_list, range(dimension)
):
new_offset = []
if axis_type == RegionType.HYPER_CROSS:
for curr_offset in range(curr_kernel_size):
off_center = (
int(math.floor((curr_kernel_size - 1) / 2)) if center else 0
)
offset = [
0,
] * dimension
# Exclude the coord (0, 0, ..., 0)
if curr_offset == off_center:
continue
offset[d] = (
(curr_offset - off_center)
* dilation[d]
* (tensor_stride[d] / up_stride[d])
)
new_offset.append(offset)
region_offset.extend(new_offset)
# Convert to CUSTOM type
region_type = RegionType.CUSTOM
region_offset = torch.IntTensor(region_offset)
kernel_volume = int(region_offset.size(0))
elif region_type == RegionType.CUSTOM:
assert (
region_offset.numel() > 0
), "region_offset must be non empty when region_type is CUSTOM"
assert (
region_offset.size(1) == dimension
), "region_offset must have the same dimension as the network"
kernel_volume = int(region_offset.size(0))
assert isinstance(
region_offset.dtype, torch.IntTensor
), "region_offset must be a torch.IntTensor."
else:
raise NotImplementedError()
if region_offset is None:
region_offset = torch.IntTensor()
return region_type, region_offset, kernel_volume
class KernelGenerator:
__slots__ = (
"cache",
"kernel_size",
"kernel_stride",
"kernel_dilation",
"region_type",
"region_offsets",
"axis_types",
"dimension",
"kernel_volume",
"requires_strided_coordinates",
"expand_coordinates",
)
def __init__(
self,
kernel_size=-1,
stride=1,
dilation=1,
is_transpose: bool = False,
region_type: RegionType = RegionType.HYPER_CUBE,
region_offsets: torch.Tensor = None,
expand_coordinates: bool = False,
axis_types=None,
dimension=-1,
):
r"""
:attr:`region_type` (RegionType, optional): defines the kernel
shape. Please refer to MinkowskiEngine.Comon for details.
:attr:`region_offset` (torch.IntTensor, optional): when the
:attr:`region_type` is :attr:`RegionType.CUSTOM`, the convolution
kernel uses the provided `region_offset` to define offsets. It
should be a matrix of size :math:`N \times D` where :math:`N` is
the number of offsets and :math:`D` is the dimension of the
space.
:attr:`axis_types` (list of RegionType, optional): If given, it
uses different methods to create a kernel for each axis. e.g., when
it is `[RegionType.HYPER_CUBE, RegionType.HYPER_CUBE,
RegionType.HYPER_CROSS]`, the kernel would be rectangular for the
first two dimensions and cross shaped for the thrid dimension.
"""
assert dimension > 0
assert isinstance(region_type, RegionType)
kernel_size = convert_to_int_list(kernel_size, dimension)
kernel_stride = convert_to_int_list(stride, dimension)
kernel_dilation = convert_to_int_list(dilation, dimension)
self.cache = {}
self.kernel_size = kernel_size
self.kernel_stride = kernel_stride
self.kernel_dilation = kernel_dilation
self.region_type = region_type
self.region_offsets = region_offsets if region_offsets else torch.IntTensor()
self.axis_types = axis_types
self.dimension = dimension
self.kernel_volume = get_kernel_volume(
region_type, kernel_size, region_offsets, axis_types, dimension
)
self.requires_strided_coordinates = reduce(
lambda s1, s2: s1 == 1 and s2 == 1, kernel_stride
)
self.expand_coordinates = expand_coordinates
def get_kernel(self, tensor_stride, is_transpose):
assert len(tensor_stride) == self.dimension
if tuple(tensor_stride) not in self.cache:
up_stride = (
self.stride
if is_transpose
else torch.Tensor(
[
1,
]
* self.dimension
)
)
self.cache[tuple(tensor_stride)] = convert_region_type(
self.region_type,
tensor_stride,
self.kernel_size,
up_stride,
self.kernel_dilation,
self.region_offsets,
self.axis_types,
self.dimension,
)
return self.cache[tuple(tensor_stride)]
def __repr__(self):
return (
self.__class__.__name__
+ f"(kernel_size={self.kernel_size}, kernel_stride={self.kernel_stride}, kernel_dilation={self.kernel_dilation}, "
+ f"region_type={self.region_type}, expand_coordinates={self.expand_coordinates}, dimension={self.dimension})"
)
class KernelRegion(
namedtuple(
"KernelRegion",
(
"kernel_size",
"kernel_stride",
"kernel_dilation",
"region_type",
"offset",
"D",
),
)
):
"""adding functionality to a named tuple"""
__slots__ = ()
def __init__(
self,
kernel_size,
kernel_stride,
kernel_dilation,
region_type,
offset,
dimension,
):
kernel_size = convert_to_int_list(kernel_size, dimension)
kernel_stride = convert_to_int_list(kernel_stride, dimension)
kernel_dilation = convert_to_int_list(kernel_dilation, dimension)
super(KernelRegion, self).__init__(
kernel_size, kernel_stride, kernel_dilation, region_type, offset, dimension
)
def __str__(self):
return "kernel_size:{self.kernel_size}, kernel_stride:{self.kernel_stride}, region_type:{self.region_type}"
def save_ctx(
ctx, # function object context
kernel_generator: KernelGenerator,
in_coords_key: CoordinateMapKey,
out_coords_key: CoordinateMapKey,
coordinate_manager: CoordinateManager,
):
ctx.kernel_generator = kernel_generator
ctx.in_coordinate_map_key = in_coords_key
ctx.out_coordinate_map_key = out_coords_key
ctx.coordinate_manager = coordinate_manager
return ctx
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiKernelGenerator.py |
# Copyright (c) 2020 NVIDIA CORPORATION.
# Copyright (c) 2018-2020 Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import os
import torch
import copy
from enum import Enum
from MinkowskiEngineBackend._C import CoordinateMapKey
class SparseTensorOperationMode(Enum):
r"""Enum class for SparseTensor internal instantiation modes.
:attr:`SEPARATE_COORDINATE_MANAGER`: always create a new coordinate manager.
:attr:`SHARE_COORDINATE_MANAGER`: always use the globally defined coordinate
manager. Must clear the coordinate manager manually by
:attr:`MinkowskiEngine.SparseTensor.clear_global_coordinate_manager`.
"""
SEPARATE_COORDINATE_MANAGER = 0
SHARE_COORDINATE_MANAGER = 1
class SparseTensorQuantizationMode(Enum):
r"""
`RANDOM_SUBSAMPLE`: Subsample one coordinate per each quantization block randomly.
`UNWEIGHTED_AVERAGE`: average all features within a quantization block equally.
`UNWEIGHTED_SUM`: sum all features within a quantization block equally.
`NO_QUANTIZATION`: No quantization is applied. Should not be used for normal operation.
`MAX_POOL`: Voxel-wise max pooling is applied.
`SPLAT_LINEAR_INTERPOLATION`: Splat features using N-dimensional linear interpolation to 2^N neighbors.
"""
RANDOM_SUBSAMPLE = 0
UNWEIGHTED_AVERAGE = 1
UNWEIGHTED_SUM = 2
NO_QUANTIZATION = 3
MAX_POOL = 4
SPLAT_LINEAR_INTERPOLATION = 5
_sparse_tensor_operation_mode = SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER
_global_coordinate_manager = None
COORDINATE_MANAGER_DIFFERENT_ERROR = "SparseTensors must share the same coordinate manager for this operation. Please refer to the SparseTensor creation API (https://nvidia.github.io/MinkowskiEngine/sparse_tensor.html) to share the coordinate manager, or set the sparse tensor operation mode with `set_sparse_tensor_operation_mode` to share it by default."
COORDINATE_KEY_DIFFERENT_ERROR = "SparseTensors must have the same coordinate_map_key."
def set_sparse_tensor_operation_mode(operation_mode: SparseTensorOperationMode):
r"""Define the sparse tensor coordinate manager operation mode.
By default, a :attr:`MinkowskiEngine.SparseTensor.SparseTensor`
instantiation creates a new coordinate manager that is not shared with
other sparse tensors. By setting this function with
:attr:`MinkowskiEngine.SparseTensorOperationMode.SHARE_COORDINATE_MANAGER`, you
can share the coordinate manager globally with other sparse tensors.
However, you must explicitly clear the coordinate manger after use. Please
refer to :attr:`MinkowskiEngine.clear_global_coordinate_manager`.
Args:
:attr:`operation_mode`
(:attr:`MinkowskiEngine.SparseTensorOperationMode`): The operation mode
for the sparse tensor coordinate manager. By default
:attr:`MinkowskiEngine.SparseTensorOperationMode.SEPARATE_COORDINATE_MANAGER`.
Example:
>>> import MinkowskiEngine as ME
>>> ME.set_sparse_tensor_operation_mode(ME.SparseTensorOperationMode.SHARE_COORDINATE_MANAGER)
>>> ...
>>> a = ME.SparseTensor(...)
>>> b = ME.SparseTensor(...) # coords_man shared
>>> ... # one feed forward and backward
>>> ME.clear_global_coordinate_manager() # Must use to clear the coordinates after one forward/backward
"""
assert isinstance(
operation_mode, SparseTensorOperationMode
), f"Input must be an instance of SparseTensorOperationMode not {operation_mode}"
global _sparse_tensor_operation_mode
_sparse_tensor_operation_mode = operation_mode
def sparse_tensor_operation_mode() -> SparseTensorOperationMode:
r"""Return the current sparse tensor operation mode."""
global _sparse_tensor_operation_mode
return copy.deepcopy(_sparse_tensor_operation_mode)
def global_coordinate_manager():
r"""Return the current global coordinate manager"""
global _global_coordinate_manager
return _global_coordinate_manager
def set_global_coordinate_manager(coordinate_manager):
r"""Set the global coordinate manager.
:attr:`MinkowskiEngine.CoordinateManager` The coordinate manager which will
be set to the global coordinate manager.
"""
global _global_coordinate_manager
_global_coordinate_manager = coordinate_manager
def clear_global_coordinate_manager():
r"""Clear the global coordinate manager cache.
When you use the operation mode:
:attr:`MinkowskiEngine.SparseTensor.SparseTensorOperationMode.SHARE_COORDINATE_MANAGER`,
you must explicitly clear the coordinate manager after each feed forward/backward.
"""
global _global_coordinate_manager
_global_coordinate_manager = None
class Tensor:
r"""A sparse tensor class. Can be accessed via
:attr:`MinkowskiEngine.SparseTensor`.
The :attr:`SparseTensor` class is the basic tensor in MinkowskiEngine. For
the definition of a sparse tensor, please visit `the terminology page
<https://nvidia.github.io/MinkowskiEngine/terminology.html#sparse-tensor>`_.
We use the COOrdinate (COO) format to save a sparse tensor `[1]
<http://groups.csail.mit.edu/commit/papers/2016/parker-thesis.pdf>`_. This
representation is simply a concatenation of coordinates in a matrix
:math:`C` and associated features :math:`F`.
.. math::
\mathbf{C} = \begin{bmatrix}
b_1 & x_1^1 & x_1^2 & \cdots & x_1^D \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
b_N & x_N^1 & x_N^2 & \cdots & x_N^D
\end{bmatrix}, \; \mathbf{F} = \begin{bmatrix}
\mathbf{f}_1^T\\
\vdots\\
\mathbf{f}_N^T
\end{bmatrix}
where :math:`\mathbf{x}_i \in \mathcal{Z}^D` is a :math:`D`-dimensional
coordinate and :math:`b_i \in \mathcal{Z}_+` denotes the corresponding
batch index. :math:`N` is the number of non-zero elements in the sparse
tensor, each with the coordinate :math:`(b_i, x_i^1, x_i^1, \cdots,
x_i^D)`, and the associated feature :math:`\mathbf{f}_i`. Internally, we
handle the batch index as an additional spatial dimension.
Example::
>>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])
>>> A = ME.SparseTensor(features=feats, coordinates=coords)
>>> B = ME.SparseTensor(features=feats, coordinate_map_key=A.coordiante_map_key, coordinate_manager=A.coordinate_manager)
>>> C = ME.SparseTensor(features=feats, coordinates=coords, quantization_mode=ME.SparseTensorQuantizationMode.UNWEIGHTED_AVERAGE)
>>> D = ME.SparseTensor(features=feats, coordinates=coords, tensor_stride=2)
.. warning::
To use the GPU-backend for coordinate management, the
:attr:`coordinates` must be a torch tensor on GPU. Applying `to(device)`
after a :attr:`MinkowskiEngine.SparseTensor` initialization with a CPU
`coordinates` will waste time and computation for creating a CPU
CoordinateMap since GPU CoordinateMap will be created from scratch.
.. warning::
Before MinkowskiEngine version 0.4, we put the batch indices on the last
column. Thus, direct manipulation of coordinates will be incompatible
with the latest versions. Instead, please use
:attr:`MinkowskiEngine.utils.batched_coordinates` or
:attr:`MinkowskiEngine.utils.sparse_collate` to create batched
coordinates.
Also, to access coordinates or features batch-wise, use the functions
:attr:`coordinates_at(batch_index : int)`, :attr:`features_at(batch_index : int)` of
a sparse tensor. Or to access all batch-wise coordinates and features,
`decomposed_coordinates`, `decomposed_features`,
`decomposed_coordinates_and_features` of a sparse tensor.
Example::
>>> coords, feats = ME.utils.sparse_collate([coords_batch0, coords_batch1], [feats_batch0, feats_batch1])
>>> A = ME.SparseTensor(features=feats, coordinates=coords)
>>> coords_batch0 = A.coordinates_at(batch_index=0)
>>> feats_batch1 = A.features_at(batch_index=1)
>>> list_of_coords, list_of_featurs = A.decomposed_coordinates_and_features
"""
@property
def coordinate_manager(self):
return self._manager
@property
def tensor_stride(self):
return self.coordinate_map_key.get_tensor_stride()
@tensor_stride.setter
def tensor_stride(self, p):
r"""
This function is not recommended to be used directly.
"""
raise SyntaxError("Direct modification of tensor_stride is not permitted")
def _get_coordinates(self):
return self._manager.get_coordinates(self.coordinate_map_key)
@property
def C(self):
r"""The alias of :attr:`coords`."""
return self.coordinates
@property
def coordinates(self):
r"""
The coordinates of the current sparse tensor. The coordinates are
represented as a :math:`N \times (D + 1)` dimensional matrix where
:math:`N` is the number of points in the space and :math:`D` is the
dimension of the space (e.g. 3 for 3D, 4 for 3D + Time). Additional
dimension of the column of the matrix C is for batch indices which is
internally treated as an additional spatial dimension to disassociate
different instances in a batch.
"""
if self._C is None:
self._C = self._get_coordinates()
return self._C
@property
def coordinate_key(self):
raise NotImplementedError("Tensor interface does not have coordinate_key")
@C.setter
def C(self):
raise SyntaxError("Direct modification of coordinates is not permitted")
@coordinates.setter
def coordinates(self):
raise SyntaxError("Direct modification of coordinates is not permitted")
@property
def F(self):
r"""The alias of :attr:`feats`."""
return self._F
@property
def features(self):
r"""
The features of the current sparse tensor. The features are :math:`N
\times D_F` where :math:`N` is the number of points in the space and
:math:`D_F` is the dimension of each feature vector. Please refer to
:attr:`coords` to access the associated coordinates.
"""
return self._F
@property
def _batchwise_row_indices(self):
if self._batch_rows is None:
_, self._batch_rows = self._manager.origin_map(self.coordinate_map_key)
return self._batch_rows
@property
def _sorted_batchwise_row_indices(self):
if self._sorted_batch_rows is None:
batch_rows = self._batchwise_row_indices
with torch.no_grad():
self._sorted_batch_rows = [t.sort()[0] for t in batch_rows]
return self._sorted_batch_rows
@property
def decomposition_permutations(self):
r"""Returns a list of indices per batch that where indices defines the permutation of the batch-wise decomposition.
Example::
>>> # coords, feats, labels are given. All follow the same order
>>> stensor = ME.SparseTensor(feats, coords)
>>> conv = ME.MinkowskiConvolution(in_channels=3, out_nchannel=3, kernel_size=3, dimension=3)
>>> list_of_featurs = stensor.decomposed_features
>>> list_of_permutations = stensor.decomposition_permutations
>>> # list_of_features == [feats[inds] for inds in list_of_permutations]
>>> list_of_decomposed_labels = [labels[inds] for inds in list_of_permutations]
>>> for curr_feats, curr_labels in zip(list_of_features, list_of_decomposed_labels):
>>> loss += torch.functional.mse_loss(curr_feats, curr_labels)
"""
return self._batchwise_row_indices
@property
def decomposed_coordinates(self):
r"""Returns a list of coordinates per batch.
Returns a list of torch.IntTensor :math:`C \in \mathcal{R}^{N_i
\times D}` coordinates per batch where :math:`N_i` is the number of non
zero elements in the :math:`i`th batch index in :math:`D` dimensional
space.
.. note::
The order of coordinates is non-deterministic within each batch. Use
:attr:`decomposed_coordinates_and_features` to retrieve both
coordinates features with the same order. To retrieve the order the
decomposed coordinates is generated, use
:attr:`decomposition_permutations`.
"""
return [self.C[row_inds, 1:] for row_inds in self._batchwise_row_indices]
def coordinates_at(self, batch_index):
r"""Return coordinates at the specified batch index.
Returns a torch.IntTensor :math:`C \in \mathcal{R}^{N_i
\times D}` coordinates at the specified batch index where :math:`N_i`
is the number of non zero elements in the :math:`i`th batch index in
:math:`D` dimensional space.
.. note::
The order of coordinates is non-deterministic within each batch. Use
:attr:`decomposed_coordinates_and_features` to retrieve both
coordinates features with the same order. To retrieve the order the
decomposed coordinates is generated, use
:attr:`decomposition_permutations`.
"""
return self.C[self._batchwise_row_indices[batch_index], 1:]
@property
def decomposed_features(self):
r"""Returns a list of features per batch.
Returns a list of torch.Tensor :math:`C \in \mathcal{R}^{N_i
\times N_F}` features per batch where :math:`N_i` is the number of non
zero elements in the :math:`i`th batch index in :math:`D` dimensional
space.
.. note::
The order of features is non-deterministic within each batch. Use
:attr:`decomposed_coordinates_and_features` to retrieve both
coordinates features with the same order. To retrieve the order the
decomposed features is generated, use
:attr:`decomposition_permutations`.
"""
return [self._F[row_inds] for row_inds in self._batchwise_row_indices]
def features_at(self, batch_index):
r"""Returns a feature matrix at the specified batch index.
Returns a torch.Tensor :math:`C \in \mathcal{R}^{N
\times N_F}` feature matrix :math:`N` is the number of non
zero elements in the specified batch index and :math:`N_F` is the
number of channels.
.. note::
The order of features is non-deterministic within each batch. Use
:attr:`decomposed_coordinates_and_features` to retrieve both
coordinates features with the same order. To retrieve the order the
decomposed features is generated, use
:attr:`decomposition_permutations`.
"""
return self._F[self._batchwise_row_indices[batch_index]]
def coordinates_and_features_at(self, batch_index):
r"""Returns a coordinate and feature matrix at the specified batch index.
Returns a coordinate and feature matrix at the specified `batch_index`.
The coordinate matrix is a torch.IntTensor :math:`C \in \mathcal{R}^{N
\times D}` where :math:`N` is the number of non zero elements in the
specified batch index in :math:`D` dimensional space. The feature
matrix is a torch.Tensor :math:`C \in \mathcal{R}^{N \times N_F}`
matrix :math:`N` is the number of non zero elements in the specified
batch index and :math:`N_F` is the number of channels.
.. note::
The order of features is non-deterministic within each batch. To
retrieve the order the decomposed features is generated, use
:attr:`decomposition_permutations`.
"""
row_inds = self._batchwise_row_indices[batch_index]
return self.C[row_inds, 1:], self._F[row_inds]
@property
def decomposed_coordinates_and_features(self):
r"""Returns a list of coordinates and a list of features per batch.abs
.. note::
The order of decomposed coordinates and features is
non-deterministic within each batch. To retrieve the order the
decomposed features is generated, use
:attr:`decomposition_permutations`.
"""
row_inds_list = self._batchwise_row_indices
return (
[self.C[row_inds, 1:] for row_inds in row_inds_list],
[self._F[row_inds] for row_inds in row_inds_list],
)
@property
def dimension(self):
r"""Alias of attr:`D`"""
return self._D
@dimension.setter
def dimension(self):
raise SyntaxError("Direct modification not permitted")
@property
def D(self):
r"""Alias of attr:`D`"""
return self._D
@D.setter
def D(self):
raise SyntaxError("Direct modification not permitted")
@property
def requires_grad(self):
return self._F.requires_grad
def requires_grad_(self, requires_grad: bool = True):
self._F.requires_grad_(requires_grad)
def float(self):
self._F = self._F.float()
return self
def double(self):
self._F = self._F.double()
return self
def __len__(self):
return len(self._F)
def size(self):
return self._F.size()
@property
def shape(self):
return self._F.shape
@property
def device(self):
return self._F.device
@property
def dtype(self):
return self._F.dtype
def detach(self):
self._F = self._F.detach()
return self
def get_device(self):
return self._F.get_device()
def _is_same_key(self, other):
assert isinstance(other, self.__class__)
assert self._manager == other._manager, COORDINATE_MANAGER_DIFFERENT_ERROR
assert (
self.coordinate_map_key == other.coordinate_map_key
), COORDINATE_KEY_DIFFERENT_ERROR
# Operation overloading
def __iadd__(self, other):
self._is_same_key(other)
self._F += other.F
return self
def __isub__(self, other):
self._is_same_key(other)
self._F -= other.F
return self
def __imul__(self, other):
self._is_same_key(other)
self._F *= other.F
return self
def __idiv__(self, other):
self._is_same_key(other)
self._F /= other.F
return self
def _binary_functor(self, other, binary_fn):
assert isinstance(other, (self.__class__, torch.Tensor))
if isinstance(other, self.__class__):
assert self._manager == other._manager, COORDINATE_MANAGER_DIFFERENT_ERROR
if self.coordinate_map_key == other.coordinate_map_key:
return self.__class__(
binary_fn(self._F, other.F),
coordinate_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
else:
# Generate union maps
out_key = CoordinateMapKey(
self.coordinate_map_key.get_coordinate_size()
)
union_maps = self.coordinate_manager.union_map(
[self.coordinate_map_key, other.coordinate_map_key], out_key
)
N_out = self.coordinate_manager.size(out_key)
out_F = torch.zeros(
(N_out, self._F.size(1)), dtype=self.dtype, device=self.device
)
out_F[union_maps[0][1]] = self._F[union_maps[0][0]]
out_F[union_maps[1][1]] = binary_fn(
out_F[union_maps[1][1]], other._F[union_maps[1][0]]
)
return self.__class__(
out_F, coordinate_map_key=out_key, coordinate_manager=self._manager
)
else: # when it is a torch.Tensor
return self.__class__(
binary_fn(self._F, other),
coordinate_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
def __add__(self, other):
r"""
Add its feature with the corresponding feature of the other
:attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`
element-wise. For coordinates that exist on one sparse tensor but not
on the other, features of the counterpart that do not exist will be set
to 0.
"""
return self._binary_functor(other, lambda x, y: x + y)
def __sub__(self, other):
r"""
Subtract the feature of the other :attr:`MinkowskiEngine.SparseTensor`
or a :attr:`torch.Tensor` from its corresponding feature element-wise.
For coordinates that exist on one sparse tensor but not on the other,
features of the counterpart that do not exist will be set to 0.
"""
return self._binary_functor(other, lambda x, y: x - y)
def __mul__(self, other):
r"""
Multiply its feature of with the corresponding feature of the other
:attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`
element-wise. For coordinates that exist on one sparse tensor but not
on the other, features of the counterpart that do not exist will be set
to 0.
"""
return self._binary_functor(other, lambda x, y: x * y)
def __truediv__(self, other):
r"""
Divide its feature by the corresponding feature of the other
:attr:`MinkowskiEngine.SparseTensor` or a :attr:`torch.Tensor`
element-wise. For coordinates that exist on one sparse tensor but not
on the other, features of the counterpart that do not exist will be set
to 0.
"""
return self._binary_functor(other, lambda x, y: x / y)
def __power__(self, power):
return self.__class__(
self._F ** power,
coordinate_map_key=self.coordinate_map_key,
coordinate_manager=self._manager,
)
__slots__ = (
"_C",
"_F",
"_D",
"coordinate_map_key",
"_manager",
"unique_index",
"inverse_mapping",
"quantization_mode",
"_batch_rows",
)
| MinkowskiEngine-master | MinkowskiEngine/MinkowskiTensor.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
from MinkowskiSparseTensor import SparseTensor
def get_coords_map(x, y):
r"""Get mapping between sparse tensor 1 and sparse tensor 2.
Args:
:attr:`x` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor with
`x.tensor_stride` <= `y.tensor_stride`.
:attr:`y` (:attr:`MinkowskiEngine.SparseTensor`): a sparse tensor with
`x.tensor_stride` <= `y.tensor_stride`.
Returns:
:attr:`x_indices` (:attr:`torch.LongTensor`): the indices of x that
corresponds to the returned indices of y.
:attr:`x_indices` (:attr:`torch.LongTensor`): the indices of y that
corresponds to the returned indices of x.
Example::
.. code-block:: python
sp_tensor = ME.SparseTensor(features, coordinates=coordinates)
out_sp_tensor = stride_2_conv(sp_tensor)
ins, outs = get_coords_map(sp_tensor, out_sp_tensor)
for i, o in zip(ins, outs):
print(f"{i} -> {o}")
"""
assert isinstance(x, SparseTensor)
assert isinstance(y, SparseTensor)
assert (
x.coords_man == y.coords_man
), "X and Y are using different CoordinateManagers. Y must be derived from X through strided conv/pool/etc."
return x.coords_man.get_coords_map(x.coords_key, y.coords_key)
| MinkowskiEngine-master | MinkowskiEngine/utils/coords.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import numpy as np
from collections.abc import Sequence
import MinkowskiEngineBackend._C as MEB
from typing import Union, Tuple
from MinkowskiCommon import convert_to_int_list
def fnv_hash_vec(arr):
"""
FNV64-1A
"""
assert arr.ndim == 2
# Floor first for negative coordinates
arr = arr.copy()
arr = arr.astype(np.uint64, copy=False)
hashed_arr = np.uint64(14695981039346656037) * np.ones(
arr.shape[0], dtype=np.uint64
)
for j in range(arr.shape[1]):
hashed_arr *= np.uint64(1099511628211)
hashed_arr = np.bitwise_xor(hashed_arr, arr[:, j])
return hashed_arr
def ravel_hash_vec(arr):
"""
Ravel the coordinates after subtracting the min coordinates.
"""
assert arr.ndim == 2
arr = arr.copy()
arr -= arr.min(0)
arr = arr.astype(np.uint64, copy=False)
arr_max = arr.max(0).astype(np.uint64) + 1
keys = np.zeros(arr.shape[0], dtype=np.uint64)
# Fortran style indexing
for j in range(arr.shape[1] - 1):
keys += arr[:, j]
keys *= arr_max[j + 1]
keys += arr[:, -1]
return keys
def quantize(coords):
r"""Returns a unique index map and an inverse index map.
Args:
:attr:`coords` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
Returns:
:attr:`unique_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines unique coordinates.
:attr:`coords[unique_map]` is the unique coordinates.
:attr:`inverse_map` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
list of indices that defines the inverse map that recovers the original
coordinates. :attr:`coords[unique_map[inverse_map]] == coords`
Example::
>>> unique_map, inverse_map = quantize(coords)
>>> unique_coords = coords[unique_map]
>>> print(unique_coords[inverse_map] == coords) # True, ..., True
>>> print(coords[unique_map[inverse_map]] == coords) # True, ..., True
"""
assert isinstance(coords, np.ndarray) or isinstance(
coords, torch.Tensor
), "Invalid coords type"
if isinstance(coords, np.ndarray):
assert (
coords.dtype == np.int32
), f"Invalid coords type {coords.dtype} != np.int32"
return MEB.quantize_np(coords.astype(np.int32))
else:
# Type check done inside
return MEB.quantize_th(coords.int())
def quantize_label(coords, labels, ignore_label):
assert isinstance(coords, np.ndarray) or isinstance(
coords, torch.Tensor
), "Invalid coords type"
if isinstance(coords, np.ndarray):
assert isinstance(labels, np.ndarray)
assert (
coords.dtype == np.int32
), f"Invalid coords type {coords.dtype} != np.int32"
assert (
labels.dtype == np.int32
), f"Invalid label type {labels.dtype} != np.int32"
return MEB.quantize_label_np(coords, labels, ignore_label)
else:
assert isinstance(labels, torch.Tensor)
# Type check done inside
return MEB.quantize_label_th(coords, labels.int(), ignore_label)
def _auto_floor(array):
assert isinstance(
array, (np.ndarray, torch.Tensor)
), "array must be either np.array or torch.Tensor."
if isinstance(array, np.ndarray):
return np.floor(array)
else:
return torch.floor(array)
def sparse_quantize(
coordinates,
features=None,
labels=None,
ignore_label=-100,
return_index=False,
return_inverse=False,
return_maps_only=False,
quantization_size=None,
device="cpu",
):
r"""Given coordinates, and features (optionally labels), the function
generates quantized (voxelized) coordinates.
Args:
:attr:`coordinates` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`): a
matrix of size :math:`N \times D` where :math:`N` is the number of
points in the :math:`D` dimensional space.
:attr:`features` (:attr:`numpy.ndarray` or :attr:`torch.Tensor`, optional): a
matrix of size :math:`N \times D_F` where :math:`N` is the number of
points and :math:`D_F` is the dimension of the features. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor, `feats`
must also be a torch.Tensor).
:attr:`labels` (:attr:`numpy.ndarray` or :attr:`torch.IntTensor`,
optional): integer labels associated to eah coordinates. Must have the
same container as `coords` (i.e. if `coords` is a torch.Tensor,
`labels` must also be a torch.Tensor). For classification where a set
of points are mapped to one label, do not feed the labels.
:attr:`ignore_label` (:attr:`int`, optional): the int value of the
IGNORE LABEL.
:attr:`torch.nn.CrossEntropyLoss(ignore_index=ignore_label)`
:attr:`return_index` (:attr:`bool`, optional): set True if you want the
indices of the quantized coordinates. False by default.
:attr:`return_inverse` (:attr:`bool`, optional): set True if you want
the indices that can recover the discretized original coordinates.
False by default. `return_index` must be True when `return_reverse` is True.
:attr:`return_maps_only` (:attr:`bool`, optional): if set, return the
unique_map or optionally inverse map, but not the coordinates. Can be
used if you don't care about final coordinates or if you use
device==cuda and you don't need coordinates on GPU. This returns either
unique_map alone or (unique_map, inverse_map) if return_inverse is set.
:attr:`quantization_size` (attr:`float`, optional): if set, will use
the quanziation size to define the smallest distance between
coordinates.
:attr:`device` (attr:`str`, optional): Either 'cpu' or 'cuda'.
Example::
>>> unique_map, inverse_map = sparse_quantize(discrete_coords, return_index=True, return_inverse=True)
>>> unique_coords = discrete_coords[unique_map]
>>> print(unique_coords[inverse_map] == discrete_coords) # True
:attr:`quantization_size` (:attr:`float`, :attr:`list`, or
:attr:`numpy.ndarray`, optional): the length of the each side of the
hyperrectangle of of the grid cell.
Example::
>>> # Segmentation
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats, labels = MinkowskiEngine.utils.sparse_quantize(
>>> coords, feats, labels, ignore_label=-100, quantization_size=0.1)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
>>>
>>> # Classification
>>> criterion = torch.nn.CrossEntropyLoss(ignore_index=-100)
>>> coords, feats = MinkowskiEngine.utils.sparse_quantize(coords, feats)
>>> output = net(MinkowskiEngine.SparseTensor(feats, coords))
>>> loss = criterion(output.F, labels.long())
"""
assert isinstance(
coordinates, (np.ndarray, torch.Tensor)
), "Coords must be either np.array or torch.Tensor."
use_label = labels is not None
use_feat = features is not None
assert (
coordinates.ndim == 2
), "The coordinates must be a 2D matrix. The shape of the input is " + str(
coordinates.shape
)
if return_inverse:
assert return_index, "return_reverse must be set with return_index"
if use_feat:
assert features.ndim == 2
assert coordinates.shape[0] == features.shape[0]
if use_label:
assert coordinates.shape[0] == len(labels)
dimension = coordinates.shape[1]
# Quantize the coordinates
if quantization_size is not None:
if isinstance(quantization_size, (Sequence, np.ndarray, torch.Tensor)):
assert (
len(quantization_size) == dimension
), "Quantization size and coordinates size mismatch."
if isinstance(coordinates, np.ndarray):
quantization_size = np.array([i for i in quantization_size])
else:
quantization_size = torch.Tensor([i for i in quantization_size])
discrete_coordinates = _auto_floor(coordinates / quantization_size)
elif np.isscalar(quantization_size): # Assume that it is a scalar
if quantization_size == 1:
discrete_coordinates = _auto_floor(coordinates)
else:
discrete_coordinates = _auto_floor(coordinates / quantization_size)
else:
raise ValueError("Not supported type for quantization_size.")
else:
discrete_coordinates = _auto_floor(coordinates)
if isinstance(coordinates, np.ndarray):
discrete_coordinates = discrete_coordinates.astype(np.int32)
else:
discrete_coordinates = discrete_coordinates.int()
if (type(device) == str and device == "cpu") or (type(device) == torch.device and device.type == "cpu"):
manager = MEB.CoordinateMapManagerCPU()
elif (type(device) == str and "cuda" in device) or (type(device) == torch.device and device.type == "cuda"):
manager = MEB.CoordinateMapManagerGPU_c10()
else:
raise ValueError("Invalid device. Only `cpu`, `cuda` or torch.device supported.")
# Return values accordingly
if use_label:
if isinstance(coordinates, np.ndarray):
unique_map, inverse_map, colabels = MEB.quantize_label_np(
discrete_coordinates, labels, ignore_label
)
else:
assert (
not discrete_coordinates.is_cuda
), "Quantization with label requires cpu tensors."
assert not labels.is_cuda, "Quantization with label requires cpu tensors."
unique_map, inverse_map, colabels = MEB.quantize_label_th(
discrete_coordinates, labels, ignore_label
)
return_args = [discrete_coordinates[unique_map]]
if use_feat:
return_args.append(features[unique_map])
# Labels
return_args.append(colabels)
# Additional return args
if return_index:
return_args.append(unique_map)
if return_inverse:
return_args.append(inverse_map)
if len(return_args) == 1:
return return_args[0]
else:
return tuple(return_args)
else:
tensor_stride = [1 for i in range(discrete_coordinates.shape[1] - 1)]
discrete_coordinates = (
discrete_coordinates.to(device)
if isinstance(discrete_coordinates, torch.Tensor)
else torch.from_numpy(discrete_coordinates).to(device)
)
_, (unique_map, inverse_map) = manager.insert_and_map(
discrete_coordinates, tensor_stride, ""
)
if return_maps_only:
if return_inverse:
return unique_map, inverse_map
else:
return unique_map
return_args = [discrete_coordinates[unique_map]]
if use_feat:
return_args.append(features[unique_map])
if return_index:
return_args.append(unique_map)
if return_inverse:
return_args.append(inverse_map)
if len(return_args) == 1:
return return_args[0]
else:
return tuple(return_args)
def unique_coordinate_map(
coordinates: torch.Tensor,
tensor_stride: Union[int, Sequence, np.ndarray] = 1,
) -> Tuple[torch.IntTensor, torch.IntTensor]:
r"""Returns the unique indices and the inverse indices of the coordinates.
:attr:`coordinates`: `torch.Tensor` (Int tensor. `CUDA` if
coordinate_map_type == `CoordinateMapType.GPU`) that defines the
coordinates.
Example::
>>> coordinates = torch.IntTensor([[0, 0], [0, 0], [0, 1], [0, 2]])
>>> unique_map, inverse_map = unique_coordinates_map(coordinates)
>>> coordinates[unique_map] # unique coordinates
>>> torch.all(coordinates == coordinates[unique_map][inverse_map]) # True
"""
assert coordinates.ndim == 2, "Coordinates must be a matrix"
assert isinstance(coordinates, torch.Tensor)
if not coordinates.is_cuda:
manager = MEB.CoordinateMapManagerCPU()
else:
manager = MEB.CoordinateMapManagerGPU_c10()
tensor_stride = convert_to_int_list(tensor_stride, coordinates.shape[-1] - 1)
key, (unique_map, inverse_map) = manager.insert_and_map(
coordinates, tensor_stride, ""
)
return unique_map, inverse_map
| MinkowskiEngine-master | MinkowskiEngine/utils/quantization.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
from .quantization import sparse_quantize, ravel_hash_vec, fnv_hash_vec, unique_coordinate_map
from .collation import SparseCollation, batched_coordinates, sparse_collate, batch_sparse_collate
# from .coords import get_coords_map
from .init import kaiming_normal_
from .summary import summary | MinkowskiEngine-master | MinkowskiEngine/utils/__init__.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import numpy as np
import torch
import logging
import collections.abc
def batched_coordinates(coords, dtype=torch.int32, device=None):
r"""Create a `ME.SparseTensor` coordinates from a sequence of coordinates
Given a list of either numpy or pytorch tensor coordinates, return the
batched coordinates suitable for `ME.SparseTensor`.
Args:
:attr:`coords` (a sequence of `torch.Tensor` or `numpy.ndarray`): a
list of coordinates.
:attr:`dtype`: torch data type of the return tensor. torch.int32 by default.
Returns:
:attr:`batched_coordindates` (`torch.Tensor`): a batched coordinates.
.. warning::
From v0.4, the batch index will be prepended before all coordinates.
"""
assert isinstance(
coords, collections.abc.Sequence
), "The coordinates must be a sequence."
assert np.array(
[cs.ndim == 2 for cs in coords]
).all(), "All coordinates must be in a 2D array."
D = np.unique(np.array([cs.shape[1] for cs in coords]))
assert len(D) == 1, f"Dimension of the array mismatch. All dimensions: {D}"
D = D[0]
if device is None:
if isinstance(coords, torch.Tensor):
device = coords[0].device
else:
device = "cpu"
assert dtype in [
torch.int32,
torch.float32,
], "Only torch.int32, torch.float32 supported for coordinates."
# Create a batched coordinates
N = np.array([len(cs) for cs in coords]).sum()
bcoords = torch.zeros((N, D + 1), dtype=dtype, device=device) # uninitialized
s = 0
for b, cs in enumerate(coords):
if dtype == torch.int32:
if isinstance(cs, np.ndarray):
cs = torch.from_numpy(np.floor(cs))
elif not (
isinstance(cs, torch.IntTensor) or isinstance(cs, torch.LongTensor)
):
cs = cs.floor()
cs = cs.int()
else:
if isinstance(cs, np.ndarray):
cs = torch.from_numpy(cs)
cn = len(cs)
# BATCH_FIRST:
bcoords[s : s + cn, 1:] = cs
bcoords[s : s + cn, 0] = b
s += cn
return bcoords
def sparse_collate(coords, feats, labels=None, dtype=torch.int32, device=None):
r"""Create input arguments for a sparse tensor `the documentation
<https://nvidia.github.io/MinkowskiEngine/sparse_tensor.html>`_.
Convert a set of coordinates and features into the batch coordinates and
batch features.
Args:
:attr:`coords` (set of `torch.Tensor` or `numpy.ndarray`): a set of coordinates.
:attr:`feats` (set of `torch.Tensor` or `numpy.ndarray`): a set of features.
:attr:`labels` (set of `torch.Tensor` or `numpy.ndarray`): a set of labels
associated to the inputs.
"""
use_label = False if labels is None else True
feats_batch, labels_batch = [], []
assert isinstance(
coords, collections.abc.Sequence
), "The coordinates must be a sequence of arrays or tensors."
assert isinstance(
feats, collections.abc.Sequence
), "The features must be a sequence of arrays or tensors."
D = np.unique(np.array([cs.shape[1] for cs in coords]))
assert len(D) == 1, f"Dimension of the array mismatch. All dimensions: {D}"
D = D[0]
if device is None:
if isinstance(coords[0], torch.Tensor):
device = coords[0].device
else:
device = "cpu"
assert dtype in [
torch.int32,
torch.float32,
], "Only torch.int32, torch.float32 supported for coordinates."
if use_label:
assert isinstance(
labels, collections.abc.Sequence
), "The labels must be a sequence of arrays or tensors."
N = np.array([len(cs) for cs in coords]).sum()
Nf = np.array([len(fs) for fs in feats]).sum()
assert N == Nf, f"Coordinate length {N} != Feature length {Nf}"
batch_id = 0
s = 0 # start index
bcoords = torch.zeros((N, D + 1), dtype=dtype, device=device) # uninitialized
for coord, feat in zip(coords, feats):
if isinstance(coord, np.ndarray):
coord = torch.from_numpy(coord)
else:
assert isinstance(
coord, torch.Tensor
), "Coords must be of type numpy.ndarray or torch.Tensor"
if dtype == torch.int32 and coord.dtype in [torch.float32, torch.float64]:
coord = coord.floor()
if isinstance(feat, np.ndarray):
feat = torch.from_numpy(feat)
else:
assert isinstance(
feat, torch.Tensor
), "Features must be of type numpy.ndarray or torch.Tensor"
# Labels
if use_label:
label = labels[batch_id]
if isinstance(label, np.ndarray):
label = torch.from_numpy(label)
labels_batch.append(label)
cn = coord.shape[0]
# Batched coords
bcoords[s : s + cn, 1:] = coord
bcoords[s : s + cn, 0] = batch_id
# Features
feats_batch.append(feat)
# Post processing steps
batch_id += 1
s += cn
# Concatenate all lists
feats_batch = torch.cat(feats_batch, 0)
if use_label:
if isinstance(labels_batch[0], torch.Tensor):
labels_batch = torch.cat(labels_batch, 0)
return bcoords, feats_batch, labels_batch
else:
return bcoords, feats_batch
def batch_sparse_collate(data, dtype=torch.int32, device=None):
r"""The wrapper function that can be used in in conjunction with
`torch.utils.data.DataLoader` to generate inputs for a sparse tensor.
Please refer to `the training example
<https://nvidia.github.io/MinkowskiEngine/demo/training.html>`_ for the
usage.
Args:
:attr:`data`: list of (coordinates, features, labels) tuples.
"""
return sparse_collate(*list(zip(*data)), dtype=dtype, device=device)
class SparseCollation:
r"""Generates collate function for coords, feats, labels.
Please refer to `the training example
<https://nvidia.github.io/MinkowskiEngine/demo/training.html>`_ for the
usage.
Args:
:attr:`limit_numpoints` (int): If positive integer, limits batch size
so that the number of input coordinates is below limit_numpoints. If 0
or False, concatenate all points. -1 by default.
Example::
>>> data_loader = torch.utils.data.DataLoader(
>>> dataset,
>>> ...,
>>> collate_fn=SparseCollation())
>>> for d in iter(data_loader):
>>> print(d)
"""
def __init__(self, limit_numpoints=-1, dtype=torch.int32, device=None):
self.limit_numpoints = limit_numpoints
self.dtype = dtype
self.device = device
def __call__(self, list_data):
coords, feats, labels = list(zip(*list_data))
coords_batch, feats_batch, labels_batch = [], [], []
batch_num_points = 0
for batch_id, _ in enumerate(coords):
num_points = coords[batch_id].shape[0]
batch_num_points += num_points
if self.limit_numpoints > 0 and batch_num_points > self.limit_numpoints:
num_full_points = sum(len(c) for c in coords)
num_full_batch_size = len(coords)
logging.warning(
f"\tCannot fit {num_full_points} points into"
" {self.limit_numpoints} points limit. Truncating batch "
f"size at {batch_id} out of {num_full_batch_size} with "
f"{batch_num_points - num_points}."
)
break
coords_batch.append(coords[batch_id])
feats_batch.append(feats[batch_id])
labels_batch.append(labels[batch_id])
# Concatenate all lists
return sparse_collate(
coords_batch,
feats_batch,
labels_batch,
dtype=self.dtype,
device=self.device,
)
| MinkowskiEngine-master | MinkowskiEngine/utils/collation.py |
import torch
import torch.nn as nn
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import MinkowskiEngine as ME
from MinkowskiSparseTensor import SparseTensor
def summary(model, summary_input):
result, params_info = minkowski_summary_string(model, summary_input)
print(result)
return params_info
def pruned_weight_sparsity_string(module) -> float:
r"""
returns the sparsity ratio of weights.
"""
for k in dir(module):
if '_mask' in k:
return (getattr(module, k.replace('_mask', '')) == 0).float().mean().item()
else:
return 0.0;
def size2list(size: torch.Size) -> list:
return [i for i in size]
def get_hash_occupancy_ratio(minkowski_tensor):
alg = minkowski_tensor.coordinate_manager.minkowski_algorithm
if alg == ME.MinkowskiAlgorithm.SPEED_OPTIMIZED:
return 25;
else:
return 50;
def minkowski_summary_string(model, summary_input):
summary_str = ''
def register_hook(module):
def hook(module, input, output):
class_name = str(module.__class__).split(".")[-1].split("'")[0]
module_idx = len(summary)
m_key = "%s-%i" % (class_name, module_idx + 1)
summary[m_key] = OrderedDict()
# for the weight pruned model, print the sparsity information
summary[m_key]['sparsity_ratio'] = pruned_weight_sparsity_string(module)
# save only the size of NNZ
summary[m_key]["input_shape"] = input[0].shape
if isinstance(output, (list, tuple)):
summary[m_key]["output_shape"] = [size2list(o.shape) for o in output]
else:
summary[m_key]["output_shape"] = size2list(output.shape)
params = 0
if hasattr(module, "weight") and hasattr(module.weight, "size"):
params += module.weight.numel()
summary[m_key]["trainable"] = module.weight.requires_grad
if hasattr(module, "kernel") and hasattr(module.kernel, "size"):
params += module.kernel.numel()
summary[m_key]["trainable"] = module.kernel.requires_grad
if hasattr(module, "bias") and hasattr(module.bias, "size"):
params += module.bias.numel()
summary[m_key]["nb_params"] = params
if (
not isinstance(module, nn.Sequential)
and not isinstance(module, nn.ModuleList)
):
hooks.append(module.register_forward_hook(hook))
# create properties
summary = OrderedDict()
hooks = []
# register hook
model.apply(register_hook)
# make a forward pass
# print(x.shape)
model(summary_input)
# remove these hooks
for h in hooks:
h.remove()
summary_str += "----------------------------------------------------------------" + "\n"
line_new = "{:>20} {:>25} {:>15}".format(
"Layer (type)", "Output Shape", "Param #")
summary_str += line_new + "\n"
summary_str += "================================================================" + "\n"
total_params = 0
total_output = 0
trainable_params = 0
for layer in summary:
# input_shape, output_shape, trainable, nb_params
line_new = "{:>20} {:>25} {:>15}".format(
layer,
str(summary[layer]["output_shape"]),
"{0:,}".format(summary[layer]["nb_params"]),
)
total_params += summary[layer]["nb_params"]
total_output += np.prod(summary[layer]["output_shape"])
if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True:
trainable_params += summary[layer]["nb_params"]
summary_str += line_new + "\n"
# assume 4 bytes/number (float on cuda).
total_input_size = (len(summary_input) * summary_input.shape[1] # feature size
+ len(summary_input) * (1 + summary_input.D) * (100 / get_hash_occupancy_ratio(summary_input)) # coordinate size
) * 4. / (1024 ** 2.)
total_output_size = abs(2. * total_output * 4. /
(1024 ** 2.)) # x2 for gradients
total_params_size = abs(total_params * 4. / (1024 ** 2.))
total_size = total_params_size + total_output_size + total_input_size
summary_str += "================================================================" + "\n"
summary_str += "Total params: {0:,}".format(total_params) + "\n"
summary_str += "Trainable params: {0:,}".format(trainable_params) + "\n"
summary_str += "Non-trainable params: {0:,}".format(total_params -
trainable_params) + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
summary_str += "Input size (MB): %0.2f" % total_input_size + "\n"
summary_str += "Forward/backward pass size (MB): %0.2f" % total_output_size + "\n"
summary_str += "Params size (MB): %0.2f" % total_params_size + "\n"
summary_str += "Estimated Total Size (MB): %0.2f" % total_size + "\n"
summary_str += "----------------------------------------------------------------" + "\n"
# return summary
return summary_str, (total_params, trainable_params) | MinkowskiEngine-master | MinkowskiEngine/utils/summary.py |
import math
import torch
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.dim()
if dimensions < 2:
raise ValueError(
"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions"
)
if dimensions == 2: # Linear
fan_in = tensor.size(1)
fan_out = tensor.size(0)
else:
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(2)
receptive_field_size = tensor.size(0)
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(
mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(tensor, mode)
gain = torch.nn.init.calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
with torch.no_grad():
return tensor.normal_(0, std)
| MinkowskiEngine-master | MinkowskiEngine/utils/init.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
assert torch.__version__ >= "1.7.0", "Gradcheck requires pytorch 1.7 or higher"
from torch.types import _TensorOrTensors
from typing import Callable, Union, Optional
from torch.autograd.gradcheck import gradcheck as _gradcheck
def gradcheck(
func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors]
inputs: _TensorOrTensors,
eps: float = 1e-6,
atol: float = 1e-5,
rtol: float = 1e-3,
raise_exception: bool = True,
check_sparse_nnz: bool = False,
nondet_tol: float = 0.0,
check_undefined_grad: bool = True,
check_grad_dtypes: bool = False,
) -> bool:
return _gradcheck(
lambda *x: func.apply(*x),
inputs,
eps=eps,
atol=atol,
rtol=rtol,
raise_exception=raise_exception,
check_sparse_nnz=check_sparse_nnz,
nondet_tol=nondet_tol,
check_undefined_grad=check_undefined_grad,
check_grad_dtypes=check_grad_dtypes,
)
| MinkowskiEngine-master | MinkowskiEngine/utils/gradcheck.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch.nn as nn
import MinkowskiEngine as ME
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(BasicBlock, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=3, stride=1, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(Bottleneck, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=3, stride=stride, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv3 = ME.MinkowskiConvolution(
planes, planes * self.expansion, kernel_size=1, dimension=dimension)
self.norm3 = ME.MinkowskiBatchNorm(
planes * self.expansion, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
| MinkowskiEngine-master | MinkowskiEngine/modules/resnet_block.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
class SELayer(nn.Module):
def __init__(self, channel, reduction=16, D=-1):
# Global coords does not require coords_key
super(SELayer, self).__init__()
self.fc = nn.Sequential(
ME.MinkowskiLinear(channel, channel // reduction),
ME.MinkowskiReLU(inplace=True),
ME.MinkowskiLinear(channel // reduction, channel),
ME.MinkowskiSigmoid())
self.pooling = ME.MinkowskiGlobalPooling()
self.broadcast_mul = ME.MinkowskiBroadcastMultiplication()
def forward(self, x):
y = self.pooling(x)
y = self.fc(y)
return self.broadcast_mul(x, y)
class SEBasicBlock(BasicBlock):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
reduction=16,
D=-1):
super(SEBasicBlock, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
D=D)
self.se = SELayer(planes, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
D=3,
reduction=16):
super(SEBottleneck, self).__init__(
inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
D=D)
self.se = SELayer(planes * self.expansion, reduction=reduction, D=D)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.norm3(out)
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
| MinkowskiEngine-master | MinkowskiEngine/modules/senet_block.py |
# Copyright (c) Chris Choy ([email protected]).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
| MinkowskiEngine-master | MinkowskiEngine/modules/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
with open('VERSION', 'r') as f:
version = f.read().strip()
if version.endswith("dev"):
version = version[:-3]
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename)) as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt")
extras_require = {
# User packages
'nsys': req_file("requirements_nsys.txt"),
}
setup(
name='nvidia-pyprof',
version=version,
packages=find_packages(),
author="Aditya Agrawal,Marek Kolodziej",
author_email="[email protected],[email protected]",
maintainer="Elias Bermudez",
maintainer_email="[email protected]",
url="https://github.com/NVIDIA/PyProf",
download_url="https://github.com/NVIDIA/PyProf",
license="BSD 3-Clause License",
description='NVIDIA Pytorch Profiler',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Utilities',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Natural Language :: English',
'Operating System :: OS Independent',
],
keywords='nvidia, profiling, deep learning, ' \
'machine learning, supervised learning, ' \
'unsupervised learning, reinforcement learning, ',
platforms=["Linux"],
include_package_data=True,
install_requires=install_requires,
extras_require=extras_require,
)
| PyProf-master | setup.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This test runs lenet through the 3 steps on pyprof.
It ensures:
- A database is created from nsys
- A dict is created from pyprof.parse
- A csv with valid data is created from pyprof.prof
'''
import subprocess
from pathlib import Path
import unittest
import csv
unittest.TestLoader.sortTestMethodsUsing = None
class TestPyprofWithLenet(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pyprof_path = Path("/opt/pytorch/pyprof/pyprof/examples")
def test_run_nsys(self):
# Print a blank line to make the test output more readable
print()
command = "nsys profile -f true -o lenet --export sqlite python " + self.pyprof_path.as_posix() + "/lenet.py"
command_tokens = command.split()
ret_val = subprocess.run(command_tokens)
self.assertEqual(ret_val.returncode, 0)
db_path = Path('./lenet.sqlite')
self.assertTrue(db_path.exists())
def test_run_parse(self):
command = "python -m pyprof.parse lenet.sqlite"
command_tokens = command.split()
with open("lenet.dict", "w") as f:
ret_val = subprocess.run(command_tokens, stdout=f)
self.assertEqual(ret_val.returncode, 0)
dict_path = Path('./lenet.dict')
self.assertTrue(dict_path.exists())
def test_run_profile(self):
lenet_csv = "./lenet.csv"
command = "python -m pyprof.prof --csv lenet.dict"
command_tokens = command.split()
with open(lenet_csv, "w") as f:
ret_val = subprocess.run(command_tokens, stdout=f)
self.assertEqual(ret_val.returncode, 0)
csv_path = Path(lenet_csv)
self.assertTrue(csv_path.exists())
directions = ["bprop", "fprop"]
ops = [
"", # covers the "reduce_kernel" kernel, op will be an empty string in the report
"add_",
"backward",
"bias",
"conv2d",
"linear",
"max_pool2d",
"mse_loss",
"relu",
"sum",
]
with open("lenet.csv", "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# verify direction
self.assertTrue(row['Direction'] in directions, f"Row direction: {row['Direction']}")
# verify op
self.assertTrue(row['Op'] in ops, f"Row op: {row['Op']}")
# verify final id is in the range
# Which kernel cuDNN uses is nondeterministic.
# While the exact number of kernels is not clear, for this network, it should be [60, 70]
self.assertTrue(int(row['Idx']) in range(65, 75), f"Final Idx: {row['Idx']}")
if __name__ == '__main__':
unittest.main(verbosity=2)
| PyProf-master | qa/L0_lenet/test_lenet.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_pyprof_function_stack.TestPyProfFuncStack as TestPyProfFuncStack
| PyProf-master | qa/L0_function_stack/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This test exercises the tracemarker get_func_stack() functionality
'''
import inspect
import unittest
import pyprof
from pyprof.nvtx.config import Config
from pyprof.nvtx.dlprof import DLProf
config = Config(enable_function_stack=True)
dlprof = DLProf()
class TestPyProfFuncStack(unittest.TestCase):
def __init__(self, testName):
super().__init__(testName)
def setUp(self):
pass
def tearDown(self):
pass
def compare_funcstack(self, actual_tracemarker, expected_str):
# Given a funcstack string, remove TestPyProfFuncStack::__callTestMethod and everything above it
#
def remove_test_class_hierarchy(x):
separator = "/"
fn_split = x.split(separator)
split = 0
# Find the LAST instance of run in the split
#
for i, n in enumerate(fn_split):
if (n == "TestPyProfFuncStack::_callTestMethod"):
split = i + 1
fn_split = fn_split[split:]
joined = separator.join(fn_split)
return joined
tracemarker_dict = eval(actual_tracemarker)
actual_func_stack = remove_test_class_hierarchy(tracemarker_dict["funcStack"])
self.assertEqual(expected_str, actual_func_stack, f"Expected: {expected_str}\nActual: {actual_func_stack}")
# Basic function hierarchy test
# Function stack is func1->func2->func3->verify
# Local function 'verify' gets recognized as a member of TestPyProfFuncStack because it uses 'self'
#
def test_basic(self):
def verify():
tracemarker = pyprof.nvtx.nvmarker.traceMarker("opname")
self.compare_funcstack(
tracemarker, "TestPyProfFuncStack::test_basic/func1/func2/func3/TestPyProfFuncStack::verify/opname"
)
def func3():
verify()
def func2():
func3()
def func1():
func2()
func1()
# Test that 'always_benchmark_wrapper' is ignored in hierarchy
# Test that 'wrapper_func' is ignored in hierarchy
# Function stack is func1->func2->always_benchmark_wrapper->func3->wrapper_func->verify
# Local function 'verify' gets recognized as a member of TestPyProfFuncStack because it uses 'self'
#
def test_ignore_wrapper_func(self):
def verify():
tracemarker = pyprof.nvtx.nvmarker.traceMarker("opname")
self.compare_funcstack(
tracemarker,
"TestPyProfFuncStack::test_ignore_wrapper_func/func1/func2/func3/TestPyProfFuncStack::verify/opname"
)
def wrapper_func():
verify()
def func3():
wrapper_func()
def always_benchmark_wrapper():
func3()
def func2():
always_benchmark_wrapper()
def func1():
func2()
func1()
# Test that lambdas are NOT ignored in hierarchy
# Function stack is func1->func2->lambda->func3->verify
# Local function 'verify' gets recognized as a member of TestPyProfFuncStack because it uses 'self'
#
def test_ignore_lambda(self):
def verify():
tracemarker = pyprof.nvtx.nvmarker.traceMarker("opname")
self.compare_funcstack(
tracemarker,
"TestPyProfFuncStack::test_ignore_lambda/func1/func2/<lambda>/func3/TestPyProfFuncStack::verify/opname"
)
def func3():
verify()
def func2():
x = lambda: func3()
x()
def func1():
func2()
func1()
# Test that duplicates are ignored in hierarchy
#
# Function stack is func1->func1->func1->func1->func2->verify
# Local function 'verify' gets recognized as a member of TestPyProfFuncStack because it uses 'self'
#
def test_ignore_duplicates(self):
def verify():
tracemarker = pyprof.nvtx.nvmarker.traceMarker("opname")
self.compare_funcstack(
tracemarker,
"TestPyProfFuncStack::test_ignore_duplicates/func1/func2/TestPyProfFuncStack::verify/opname"
)
def func2():
verify()
def func1(count):
if (count > 0):
func1(count - 1)
else:
func2()
func1(3)
# Function stack is func1->func2->wrapper_func. It is called 4 times.
#
# Only the 4th time is any checking done
#
# On that 4th call, it will be the 2nd time executing func2, from func1, and
# it will be the 2nd time executing wrapper_func from that 2nd call of func2.
#
# Even though wrapper_func is omitted from the func stack, its call count should
# be passed on to the opname.
#
def test_uniquified_nodes(self):
def verify(check):
tracemarker = pyprof.nvtx.nvmarker.traceMarker("opname")
if (check):
self.compare_funcstack(
tracemarker,
"TestPyProfFuncStack::test_uniquified_nodes/func1/func2(2)/TestPyProfFuncStack::verify/opname(2)"
)
def wrapper_func(check):
verify(check)
def func2(check):
wrapper_func(False)
wrapper_func(check)
def func1():
func2(False)
func2(True)
func1()
def run_tests(test_name):
dummy = TestPyProfFuncStack(test_name)
test_cases = list(
filter(lambda x: 'test_' in x, map(lambda x: x[0], inspect.getmembers(dummy, predicate=inspect.ismethod)))
)
print(f'Running tests for {test_name}')
suite = unittest.TestSuite()
for test_case in test_cases:
suite.addTest(TestPyProfFuncStack(test_case))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
exit(0)
else:
exit(1)
if __name__ == '__main__':
run_tests("test_basic")
| PyProf-master | qa/L0_function_stack/test_pyprof_func_stack.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This test exercises different pytorch methods ensuring that the nvtx monkey patching
worked as expected.
'''
import inspect
import os
import torch
import torch.nn.functional as F
import unittest
#from apex import pyprof
import pyprof
pyprof.nvtx.init()
# TODO: add tests for:
# F.bilinear, F.l1_loss, F.multilabel_soft_margin_loss, F.multi_margin_loss
class TestPyProfNvtx(unittest.TestCase):
def __init__(self, testName, dtype=torch.float16):
super().__init__(testName)
self.dtype = dtype
def setUp(self):
pass
def tearDown(self):
pass
def test_conv1d(self):
# Data and weight tensors
tensor1d_in_conv = torch.randn(32, 3, 224, device='cuda', dtype=self.dtype)
tensor1d_in_conv_grouped = torch.randn(32, 6, 224, device='cuda', dtype=self.dtype)
conv1d_filter = torch.randn(16, 3, 3, device='cuda', dtype=self.dtype)
conv1d_bias = torch.ones(16, device='cuda', dtype=self.dtype)
# Vanilla conv1d
conv1d_out_vanilla = F.conv1d(tensor1d_in_conv, conv1d_filter)
# conv1d with bias
conv1d_out_with_bias = F.conv1d(tensor1d_in_conv, conv1d_filter, bias=conv1d_bias)
# conv1d - stride > 1
conv1d_out_strided = F.conv1d(tensor1d_in_conv, conv1d_filter, stride=2)
# conv1d - dilation > 1
conv1d_out_dilated = F.conv1d(tensor1d_in_conv, conv1d_filter, dilation=2)
# conv1d - groups > 1
conv1d_out_grouped = F.conv1d(tensor1d_in_conv_grouped, conv1d_filter, groups=2)
# conv1d - padding with zeros
conv1d_out_padding_zeros = F.conv1d(tensor1d_in_conv, conv1d_filter, padding=6)
def test_conv2d(self):
# Data and weight tensors
tensor2d_in_conv = torch.randn(32, 3, 224, 224, device='cuda', dtype=self.dtype)
tensor2d_in_conv_grouped = torch.randn(32, 6, 224, 224, device='cuda', dtype=self.dtype)
conv2d_filter = torch.randn(16, 3, 3, 3, device='cuda', dtype=self.dtype)
conv2d_bias = torch.ones(16, device='cuda', dtype=self.dtype)
# Vanilla conv2d
conv2d_out_vanilla = F.conv2d(tensor2d_in_conv, conv2d_filter)
# conv2d with bias
conv2d_with_bias = F.conv2d(tensor2d_in_conv, conv2d_filter, bias=conv2d_bias)
# conv2d - stride > 1
conv2d_out_strided = F.conv2d(tensor2d_in_conv, conv2d_filter, stride=2)
# conv2d - dilation > 1
conv2d_out_dilated = F.conv2d(tensor2d_in_conv, conv2d_filter, dilation=2)
# conv2d - groups > 1
conv2d_out_grouped = F.conv2d(tensor2d_in_conv_grouped, conv2d_filter, groups=2)
# conv2d - padding with zeros
conv2d_out_padding_zeros = F.conv2d(tensor2d_in_conv, conv2d_filter, padding=6)
def test_conv3d(self):
# Data and weight tensors
tensor3d_in_conv = torch.randn(32, 3, 16, 224, 224, device='cuda', dtype=self.dtype)
tensor3d_in_conv_grouped = torch.randn(32, 6, 16, 224, 224, device='cuda', dtype=self.dtype)
conv3d_filter = torch.randn(16, 3, 3, 3, 3, device='cuda', dtype=self.dtype)
conv3d_bias = torch.ones(16, device='cuda', dtype=self.dtype)
# Vanilla conv3d
conv3d_out_vanilla = F.conv3d(tensor3d_in_conv, conv3d_filter)
# conv3d - stride > 1
conv3d_out_strided = F.conv3d(tensor3d_in_conv, conv3d_filter, stride=2)
# conv3d - dilation > 1
conv3d_out_dilated = F.conv3d(tensor3d_in_conv, conv3d_filter, dilation=2)
# conv3d - groups > 1
conv3d_out_grouped = F.conv3d(tensor3d_in_conv_grouped, conv3d_filter, groups=2)
# conv3d - padding with zeros
conv3d_out_padding_zeros = F.conv3d(tensor3d_in_conv, conv3d_filter, padding=6)
def test_conv_transpose1d(self):
# Data and weight tensors
conv_transpose1d_tensor = torch.randn(64, 16, 64, device='cuda', dtype=self.dtype)
conv_transpose1d_filter = torch.randn(16, 32, 3, device='cuda', dtype=self.dtype)
conv_transpose1d_bias = torch.randn(32, device='cuda', dtype=self.dtype)
# Conv transpose runs
conv_transpose1d_out = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter)
conv_transpose1d_out_biased = F.conv_transpose1d(
conv_transpose1d_tensor, conv_transpose1d_filter, bias=conv_transpose1d_bias
)
conv_transpose1d_out_strided = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, stride=2)
conv_transpose1d_out_padded = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, padding=3)
conv_transpose1d_out2_padded = F.conv_transpose1d(
conv_transpose1d_tensor, conv_transpose1d_filter, output_padding=2, dilation=3
)
conv_transpose1d_out_grouped = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, groups=2)
conv_transpose1d_out_dilated = F.conv_transpose1d(conv_transpose1d_tensor, conv_transpose1d_filter, dilation=2)
def test_conv_transpose2d(self):
# Data and weight tensors
conv_transpose2d_tensor = torch.randn(64, 8, 5, 5, device='cuda', dtype=self.dtype)
conv_transpose2d_filter = torch.randn(8, 16, 3, 3, device='cuda', dtype=self.dtype)
conv_transpose2d_bias = torch.randn(16, device='cuda', dtype=self.dtype)
# Conv transpose runs
conv_transpose2d_out = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter)
conv_transpose2d_out_biased = F.conv_transpose2d(
conv_transpose2d_tensor, conv_transpose2d_filter, bias=conv_transpose2d_bias
)
conv_transpose2d_out_strided = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, stride=2)
conv_transpose2d_out_padded = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, padding=3)
conv_transpose2d_out2_padded = F.conv_transpose2d(
conv_transpose2d_tensor, conv_transpose2d_filter, output_padding=2, dilation=3
)
conv_transpose2d_out_grouped = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, groups=2)
conv_transpose2d_out_dilated = F.conv_transpose2d(conv_transpose2d_tensor, conv_transpose2d_filter, dilation=2)
def test_conv_transpose3d(self):
# Data and weight tensors
conv_transpose3d_tensor = torch.randn(20, 16, 50, 10, 20, device='cuda', dtype=self.dtype)
conv_transpose3d_filter = torch.randn(16, 33, 3, 3, 3, device='cuda', dtype=self.dtype)
conv_transpose3d_bias = torch.randn(33, device='cuda', dtype=self.dtype)
# Conv transpose runs
conv_transpose3d_out = F.conv_transpose3d(conv_transpose3d_tensor, conv_transpose3d_filter)
conv_transpose3d_out_biased = F.conv_transpose3d(
conv_transpose3d_tensor, conv_transpose3d_filter, bias=conv_transpose3d_bias
)
conv_transpose3d_out_strided = F.conv_transpose3d(conv_transpose3d_tensor, conv_transpose3d_filter, stride=2)
conv_transpose3d_out_padded = F.conv_transpose3d(conv_transpose3d_tensor, conv_transpose3d_filter, padding=3)
conv_transpose3d_out2_padded = F.conv_transpose3d(
conv_transpose3d_tensor, conv_transpose3d_filter, output_padding=2, dilation=3
)
conv_transpose3d_out_grouped = F.conv_transpose3d(conv_transpose3d_tensor, conv_transpose3d_filter, groups=2)
conv_transpose3d_out_dilated = F.conv_transpose3d(conv_transpose3d_tensor, conv_transpose3d_filter, dilation=2)
def test_unfold(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
kernel_size = (4, 5)
inp_unf_dilated = F.unfold(inp, kernel_size, dilation=2)
inp_unf_padded = F.unfold(inp, kernel_size, padding=2)
inp_unf_strided = F.unfold(inp, kernel_size, stride=2)
def test_fold(self):
inp = torch.randn(3, 20, 20, device='cuda', dtype=self.dtype)
inp_folded = F.fold(inp, (4, 5), (1, 1))
def test_avg_pool1d(self):
inp = torch.randn(1, 1, 28, device='cuda', dtype=self.dtype)
out = F.avg_pool1d(inp, kernel_size=5, stride=2, padding=2, ceil_mode=True, count_include_pad=False)
def test_avg_pool2d(self):
inp = torch.randn(1, 3, 224, 224, device='cuda', dtype=self.dtype)
out = F.avg_pool2d(inp, kernel_size=5, stride=2, padding=2, ceil_mode=True, count_include_pad=False)
def test_avg_pool3d(self):
inp = torch.randn(1, 3, 16, 224, 224, device='cuda', dtype=self.dtype)
out = F.avg_pool3d(inp, kernel_size=5, stride=2, padding=2, ceil_mode=True, count_include_pad=False)
def test_adaptive_avg_pool1d(self):
inp = torch.randn(1, 1, 28, device='cuda', dtype=self.dtype)
out = F.adaptive_avg_pool1d(inp, output_size=5)
def test_adaptive_avg_pool2d(self):
inp = torch.randn(1, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.adaptive_avg_pool2d(inp, output_size=5)
def test_adaptive_avg_pool3d(self):
inp = torch.randn(1, 16, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.adaptive_avg_pool3d(inp, output_size=5)
def test_max_pool1d(self):
inp = torch.randn(1, 16, 32, device='cuda', dtype=self.dtype)
out = F.max_pool1d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
def test_max_pool2d(self):
inp = torch.randn(1, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.max_pool2d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
def test_max_pool3d(self):
inp = torch.randn(1, 16, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.max_pool3d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
def test_adaptive_max_pool1d(self):
inp = torch.randn(1, 16, 28, device='cuda', dtype=self.dtype)
out = F.adaptive_max_pool1d(inp, output_size=5, return_indices=True)
def test_adaptive_max_pool2d(self):
inp = torch.randn(1, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.adaptive_max_pool2d(inp, output_size=5, return_indices=True)
def test_adaptive_max_pool3d(self):
inp = torch.randn(1, 16, 16, 32, 32, device='cuda', dtype=self.dtype)
out = F.adaptive_max_pool3d(inp, output_size=5, return_indices=True)
def test_max_unpool1d(self):
inp = torch.randn(1, 16, 32, device='cuda', dtype=self.dtype)
output, indices = F.max_pool1d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
output = F.max_unpool1d(output, indices, kernel_size=2, stride=2, padding=2)
def test_max_unpool2d(self):
inp = torch.randn(1, 16, 32, 32, device='cuda', dtype=self.dtype)
output, indices = F.max_pool2d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
output = F.max_unpool2d(output, indices, kernel_size=2, stride=2, padding=2)
def test_max_unpool3d(self):
inp = torch.randn(1, 16, 8, 32, 32, device='cuda', dtype=self.dtype)
output, indices = F.max_pool3d(inp, kernel_size=5, stride=2, padding=2, return_indices=True, ceil_mode=True)
output = F.max_unpool3d(output, indices, kernel_size=2, stride=2, padding=2)
def test_lp_pool1d(self):
inp = torch.randn(1, 32, 64, device='cuda', dtype=self.dtype)
output = F.lp_pool1d(inp, 2, 3, stride=2, ceil_mode=True)
def test_lp_pool2d(self):
#torch.nn.LPPool2d(norm_type, kernel_size, stride=None, ceil_mode=False)
inp = torch.randn(1, 32, 64, 64, device='cuda', dtype=self.dtype)
output = F.lp_pool2d(inp, 2, 3, stride=2, ceil_mode=True)
def test_threshold(self):
inp = torch.randn(1, 8, 32, 32, device='cuda', dtype=self.dtype)
output = F.threshold(inp, 6, 6, inplace=False)
def test_threshold_(self):
inp = torch.randn(1, 8, 32, 32, device='cuda', dtype=self.dtype)
output = F.threshold_(inp, 6, 6)
def test_relu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.relu(inp, inplace=False)
def test_relu_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.relu_(inp)
def test_hardtanh(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.hardtanh(inp, min_val=-1., max_val=1., inplace=False)
def test_hardtanh_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.hardtanh_(inp, min_val=-1., max_val=1.)
def test_relu6(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.relu6(inp, inplace=False)
def test_elu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.elu(inp, alpha=1.0, inplace=False)
def test_elu_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.elu_(inp, alpha=1.0)
def test_selu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.selu(inp)
def test_celu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.celu(inp, alpha=1.0, inplace=False)
def test_leaky_relu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.leaky_relu(inp, negative_slope=0.01, inplace=False)
def test_leaky_relu_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.leaky_relu_(inp, negative_slope=0.01)
def test_prelu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
weight = torch.randn(1, device='cuda', dtype=self.dtype)
output = F.prelu(inp, weight)
def test_rrelu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.rrelu(inp, lower=1. / 8, upper=1. / 3, training=False, inplace=False)
def test_rrelu_(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.rrelu(inp, lower=1. / 8, upper=1. / 3, training=False)
def test_glu(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.glu(inp, dim=-1)
def test_logsigmoid(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.logsigmoid(inp)
def test_hardshrink(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.hardshrink(inp, lambd=0.5)
def test_tanhshrink(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.tanhshrink(inp)
def test_softsign(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.softsign(inp)
def test_softplus(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.softplus(inp, beta=1, threshold=20)
def test_softmin(self):
inp = torch.randn(16, 1024, device='cuda', dtype=self.dtype)
output = F.softmin(inp, dim=1, _stacklevel=3, dtype=self.dtype)
def test_softmax(self):
inp = torch.randn(16, 1024, device='cuda', dtype=self.dtype)
output = F.softmax(inp, dim=1, _stacklevel=3, dtype=self.dtype)
def test_softshrink(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.softshrink(inp, lambd=0.5)
def test_gumbel_softmax(self):
inp = torch.randn(16, 1024, device='cuda', dtype=self.dtype)
output = F.gumbel_softmax(inp, tau=1, hard=False, eps=1e-10, dim=-1)
def test_log_softmax(self):
inp = torch.randn(16, 1024, device='cuda', dtype=self.dtype)
output = F.log_softmax(inp, dim=-1, _stacklevel=3)
def test_tanh(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = torch.tanh(inp)
def test_sigmoid(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = torch.sigmoid(inp)
def test_batch_norm(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
# running_mean, running_var
running_mean = torch.randn(3, device='cuda', dtype=self.dtype)
running_var = torch.randn(3, device='cuda', dtype=self.dtype)
output = F.batch_norm(
inp, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-05
)
def test_instance_norm(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
running_mean = torch.randn(3, device='cuda', dtype=self.dtype)
running_var = torch.randn(3, device='cuda', dtype=self.dtype)
output = F.instance_norm(
inp, running_mean=running_mean, running_var=running_var, weight=None, bias=None, use_input_stats=True,
momentum=0.1, eps=1e-05
)
def test_layer_norm(self):
inp = torch.randn(1, 3, 32, 32, device='cuda', dtype=self.dtype)
output = F.layer_norm(inp, inp.size()[1:], weight=None, bias=None, eps=1e-05)
def test_local_response_norm(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.local_response_norm(inp, 2, alpha=0.0001, beta=0.75, k=1.0)
def test_normalize(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.normalize(inp, p=2, dim=1, eps=1e-12, out=None)
def test_linear(self):
inp = torch.randn(32, 64, 128, device='cuda', dtype=self.dtype)
weight = torch.randn(256, 128, device='cuda', dtype=self.dtype)
output = F.linear(inp, weight, bias=None)
def test_dropout(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.dropout(inp, p=0.5, training=True, inplace=False)
def test_alpha_dropout(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.alpha_dropout(inp, p=0.5, training=True, inplace=False)
def test_dropout2d(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.dropout2d(inp, p=0.5, training=True, inplace=False)
def test_dropout3d(self):
inp = torch.randn(16, 8, 32, 64, 64, device='cuda', dtype=self.dtype)
output = F.dropout3d(inp, p=0.5, training=True, inplace=False)
def test_embedding(self):
pre_embed_dim = 1024
post_embed_dim = 32
inp = torch.randint(0, pre_embed_dim, (128, 16), device='cuda')
weight = torch.randn(pre_embed_dim, post_embed_dim, device='cuda', dtype=self.dtype)
output = F.embedding(
inp, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False
)
def test_embedding_bag(self):
pre_embed_dim = 1024
post_embed_dim = 32
inp = torch.randint(0, pre_embed_dim, (128, 16), device='cuda')
weight = torch.randn(pre_embed_dim, post_embed_dim, device='cuda', dtype=self.dtype)
output = F.embedding_bag(
inp, weight, offsets=None, max_norm=None, norm_type=2, scale_grad_by_freq=False, mode='mean', sparse=False
)
def test_one_hot(self):
num_classes = 10
inp = torch.randint(0, num_classes, (128, 16), device='cuda')
output = F.one_hot(inp, num_classes=10)
def test_pairwise_distance(self):
inp1 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
inp2 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
output = F.pairwise_distance(inp1, inp2, p=2.0, eps=1e-06, keepdim=False)
def test_cosine_similarity(self):
inp1 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
inp2 = torch.randn(1024, 128, device='cuda', dtype=self.dtype)
output = F.cosine_similarity(inp1, inp2, dim=1, eps=1e-8)
def test_pdist(self):
# pdist is not implemented for fp16
inp = torch.randn(128, 128, device='cuda', dtype=torch.float32)
output = F.pdist(inp, p=2)
def test_binary_cross_entropy(self):
# binary_cross_entropy is not implemented for fp16
inp = torch.randn(32, 128, device='cuda', dtype=torch.float32, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=torch.float32, requires_grad=False)
output = F.binary_cross_entropy(torch.sigmoid(inp), target)
def test_binary_cross_entropy_with_logits(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.empty_like(inp).random_(2)
output = F.binary_cross_entropy_with_logits(inp, target)
def test_poisson_nll_loss(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=False)
output = F.poisson_nll_loss(
inp, target, log_input=True, full=False, size_average=None, eps=1e-08, reduce=None, reduction='mean'
)
def test_cosine_embedding_loss(self):
inp1 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
inp2 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, device='cuda', dtype=self.dtype, requires_grad=False)
output = F.cosine_embedding_loss(inp1, inp2, target, margin=0, size_average=None, reduce=None, reduction='mean')
def test_cross_entropy(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randint(0, 100, (32, ), device='cuda', dtype=torch.long, requires_grad=False)
output = F.cross_entropy(
inp, target, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean'
)
def test_ctc_loss(self):
# force fp32 because _th_normal_ (used by next line is not supported for fp16)
log_probs = torch.randn(50, 16, 20, device='cuda', dtype=torch.float32).log_softmax(2).detach().requires_grad_()
targets = torch.randint(1, 20, (16, 30), device='cuda', dtype=torch.long)
input_lengths = torch.full((16, ), 50, dtype=torch.long)
target_lengths = torch.randint(10, 30, (16, ), dtype=torch.long)
loss = F.ctc_loss(log_probs, targets, input_lengths, target_lengths)
def test_hinge_embedding_loss(self):
inp = torch.randn(128, 32, device='cuda', dtype=self.dtype)
target = torch.randint(0, 1, (32, ), device='cuda') - 1
output = F.hinge_embedding_loss(inp, target, margin=1.0, size_average=None, reduce=None, reduction='mean')
def test_kl_div(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
output = F.kl_div(inp, target, size_average=None, reduce=None, reduction='batchmean')
def test_mse_loss(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
output = F.mse_loss(inp, target, size_average=None, reduce=None, reduction='mean')
def test_margin_ranking_loss(self):
inp1 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
inp2 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = (torch.randint(0, 1, (128, ), device='cuda') - 1).type_as(inp1)
output = F.margin_ranking_loss(inp1, inp2, target, margin=0, size_average=None, reduce=None, reduction='mean')
def test_multilabel_margin_loss(self):
inp = torch.randn(1024, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randint(0, 10, (1024, ), dtype=torch.long, device='cuda')
output = F.multilabel_margin_loss(inp, target, size_average=None, reduce=None, reduction='mean')
def test_nll_loss(self):
inp = torch.randn(64, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randint(0, 10, (64, ), device='cuda', dtype=torch.long)
output = F.nll_loss(
inp, target, weight=None, size_average=None, ignore_index=-100, reduce=None, reduction='mean'
)
def test_smooth_l1_loss(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=False)
output = F.smooth_l1_loss(inp, target, size_average=None, reduce=None, reduction='mean')
def test_soft_margin_loss(self):
inp = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
target = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=False)
output = F.soft_margin_loss(inp, target, size_average=None, reduce=None, reduction='mean')
def test_triplet_margin_loss(self):
inp1 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
inp2 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
inp3 = torch.randn(32, 128, device='cuda', dtype=self.dtype, requires_grad=True)
output = F.triplet_margin_loss(
inp1, inp2, inp3, margin=1.0, p=2, eps=1e-06, swap=False, size_average=None, reduce=None, reduction='mean'
)
def test_pixel_shuffle(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = torch.nn.functional.pixel_shuffle(inp, 2)
def test_pad(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
pad = (3, 3)
output = F.pad(inp, pad, mode='constant', value=0)
def test_interpolate(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
output = F.interpolate(inp, size=None, scale_factor=2, mode='nearest', align_corners=None)
def test_grid_sample(self):
inp = torch.randn(16, 8, 64, 64, device='cuda', dtype=self.dtype)
grid = torch.randn(16, 32, 32, 2, device='cuda', dtype=self.dtype)
output = F.grid_sample(inp, grid, mode='bilinear', padding_mode='zeros')
def test_affine_grid(self):
theta = torch.randn(32, 2, 3, device='cuda', dtype=self.dtype)
size = (32, 8, 32, 32)
output = F.affine_grid(theta, size)
def run_tests(precision):
dummy = TestPyProfNvtx('test_affine_grid', None)
test_cases = list(
filter(lambda x: 'test_' in x, map(lambda x: x[0], inspect.getmembers(dummy, predicate=inspect.ismethod)))
)
print("Running tests for {}".format(precision))
suite = unittest.TestSuite()
for test_case in test_cases:
suite.addTest(TestPyProfNvtx(test_case, precision))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
exit(0)
else:
exit(1)
if __name__ == '__main__':
run_tests(torch.float32)
run_tests(torch.float16)
| PyProf-master | qa/L0_nvtx/test_pyprof_nvtx.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_pyprof_nvtx.TestPyProfNvtx as TestPyProfNvtx
| PyProf-master | qa/L0_nvtx/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This test creates 2 kernels and exercises the pyprof code for generating their representation.
'''
import inspect
import unittest
from pyprof.prof.data import Data
from pyprof.prof.prof import foo
class TestPyProfData(unittest.TestCase):
def __init__(self, testName):
super().__init__(testName)
def setUp(self):
pass
def tearDown(self):
pass
def test_data(self):
kernels = [
{
'kShortName':
'elementwise_kernel',
'kDuration':
2848,
'layer': [],
'trace': [],
'reprMarkers': [],
'marker':
[
"{'mod': 'Tensor', 'op': 'float', 'args': [{'name': '', 'type': 'tensor', 'shape': (18, 104, 160), 'dtype': 'bool'}]}"
],
'seqMarker': ['to, seq = 60471'],
'seqId': [60471],
'subSeqId':
0,
'altSeqId': [],
'dir':
'fprop',
'mod': ['Tensor'],
'op': ['float'],
'tid':
1431533376,
'device':
0,
'stream':
7,
'grid': (585, 1, 1),
'block': (512, 1, 1),
'kLongName':
'void at::native::elementwise_kernel<512, 1, void at::native::gpu_kernel_impl<void at::native::copy_kernel_impl<float, bool>(at::TensorIterator&)::{lambda(bool)#1}>(at::TensorIterator&, void at::native::copy_kernel_impl<float, bool>(at::TensorIterator&)::{lambda(bool)#1} const&)::{lambda(int)#1}>(int, void at::native::gpu_kernel_impl<void at::native::copy_kernel_impl<float, bool>(at::TensorIterator&)::{lambda(bool)#1}>(at::TensorIterator&, void at::native::copy_kernel_impl<float, bool>(at::TensorIterator&)::{lambda(bool)#1} const&)::{lambda(int)#1})'
},
{
'kShortName':
'elementwise_kernel',
'kDuration':
201182,
'layer': [],
'trace': [],
'reprMarkers': [],
'marker':
[
"{'mod': 'Tensor', 'op': 'clone', 'args': [{'name': '', 'type': 'tensor', 'shape': (18, 4, 416, 640), 'dtype': 'float32'}]}"
],
'seqMarker': ['clone, seq = 60161'],
'seqId': [60161],
'subSeqId':
0,
'altSeqId': [],
'dir':
'fprop',
'mod': ['Tensor'],
'op': ['clone'],
'tid':
1431533376,
'device':
0,
'stream':
7,
'grid': (37440, 1, 1),
'block': (128, 1, 1),
'kLongName':
'void at::native::elementwise_kernel<128, 4, void at::native::gpu_kernel_impl<void at::native::copy_kernel_impl<float, float>(at::TensorIterator&)::{lambda(float)#1}>(at::TensorIterator&, void at::native::copy_kernel_impl<float, float>(at::TensorIterator&)::{lambda(float)#1} const&)::{lambda(int)#2}>(int, void at::native::gpu_kernel_impl<void at::native::copy_kernel_impl<float, float>(at::TensorIterator&)::{lambda(float)#1}>(at::TensorIterator&, void at::native::copy_kernel_impl<float, float>(at::TensorIterator&)::{lambda(float)#1} const&)::{lambda(int)#2})'
},
]
for k in kernels:
d = Data(k)
mod = k['mod']
op = k['op']
xx = foo(mod, op, d)
d.setParams(xx.params())
def run_tests(test_name):
dummy = TestPyProfData(test_name)
test_cases = list(
filter(lambda x: 'test_' in x, map(lambda x: x[0], inspect.getmembers(dummy, predicate=inspect.ismethod)))
)
print(f'Running tests for {test_name}')
suite = unittest.TestSuite()
for test_case in test_cases:
suite.addTest(TestPyProfData(test_case))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
exit(0)
else:
exit(1)
if __name__ == '__main__':
run_tests('test_data')
| PyProf-master | qa/L0_pyprof_data/test_pyprof_data.py |
PyProf-master | qa/L0_pyprof_data/__init__.py |
|
#!/usr/bin/python
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
FLAGS = None
SKIP_EXTS = ('jpeg', 'jpg', 'pgm', 'png',
'log', 'serverlog',
'preprocessed', 'jmx', 'gz',
'caffemodel', 'json')
SKIP_PATHS = ('requirements.txt',
'requirements/requirements_nsys.txt',
'requirements/requirements.txt',
'qa/L0_docs/VERSION',
'LICENSE',
'VERSION',
'MANIFEST.in',
'build/',
'dist/',
'nvidia_pyprof.egg-info/')
COPYRIGHT_YEAR_RE0 = 'Copyright \\(c\\) (20[0-9][0-9]),'
COPYRIGHT_YEAR_RE1 = 'Copyright \\(c\\) (20[0-9][0-9])-(20[0-9][0-9]),'
COPYRIGHT ='''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
single_re = re.compile(COPYRIGHT_YEAR_RE0)
range_re = re.compile(COPYRIGHT_YEAR_RE1)
def visit(path):
if FLAGS.verbose:
print("visiting " + path)
for skip in SKIP_EXTS:
if path.endswith('.' + skip):
if FLAGS.verbose:
print("skipping due to extension: " + path)
return True
for skip in SKIP_PATHS:
if path.startswith(skip):
if FLAGS.verbose:
print("skipping due to path prefix: " + path)
return True
with open(path, 'r') as f:
first_line = True
second_line = True
line = None
try:
for fline in f:
line = fline
# Skip any '#!', '..', '<!--', or '{{/*' lines at the
# start of the file
if first_line:
first_line = False
if (fline.startswith("#!") or fline.startswith("..") or
fline.startswith("<!--") or fline.startswith("{{/*")):
continue
# Skip any '# -*-' liines as the second line
if second_line:
second_line = False
if (fline.startswith("# -*-")):
continue
# Skip empty lines...
if len(fline.strip()) != 0:
break
except UnicodeDecodeError as ex:
# If we get this exception on the first line then assume a
# non-text file.
if not first_line:
raise ex
if FLAGS.verbose:
print("skipping binary file: " + path)
return True
if line is None:
if FLAGS.verbose:
print("skipping empty file: " + path)
return True
line = line.strip()
# The next line must be the copyright line with a single year
# or a year range. It is optionally allowed to have '# ' or
# '// ' prefix.
prefix = ""
if line.startswith('# '):
prefix = '# '
elif line.startswith('// '):
prefix = '// '
elif not line.startswith(COPYRIGHT_YEAR_RE0[0]):
print("incorrect prefix for copyright line, allowed prefixes '# ' or '// ', for " +
path + ": " + line)
return False
start_year = 0
end_year = 0
m = single_re.match(line[len(prefix):])
if m and len(m.groups()) == 1:
start_year = end_year = int(m.group(1))
else:
m = range_re.match(line[len(prefix):])
if m and len(m.groups()) == 2:
start_year = int(m.group(1))
end_year = int(m.group(2))
else:
print("copyright year is not recognized for " + path + ": " + line)
return False
if start_year > FLAGS.year:
print("copyright start year greater than current year for " + path + ": " + line)
return False
if end_year > FLAGS.year:
print("copyright end year greater than current year for " + path + ": " + line)
return False
if end_year < start_year:
print("copyright start year greater than end year for " + path + ": " + line)
return False
# Subsequent lines must match the copyright body.
copyright_body = [l.rstrip() for i, l in enumerate(COPYRIGHT.splitlines()) if i > 0]
copyright_idx = 0
for line in f:
if copyright_idx >= len(copyright_body):
break
if len(prefix) == 0:
line = line.rstrip()
else:
line = line.strip()
if len(copyright_body[copyright_idx]) == 0:
expected = prefix.strip()
else:
expected = (prefix + copyright_body[copyright_idx])
if line != expected:
print("incorrect copyright body for " + path)
print(" expected: '" + expected + "'")
print(" got: '" + line + "'")
return False
copyright_idx += 1
if copyright_idx != len(copyright_body):
print("missing " + str(len(copyright_body) - copyright_idx) +
" lines of the copyright body")
return False
if FLAGS.verbose:
print("copyright correct for " + path)
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-y', '--year', type=int, required=True,
help='Copyright year')
parser.add_argument('paths', type=str, nargs='*', default=None,
help='Directories or files to check')
FLAGS = parser.parse_args()
if FLAGS.paths is None or len(FLAGS.paths) == 0:
parser.print_help()
exit(1)
ret = True
for path in FLAGS.paths:
if not os.path.isdir(path):
if not visit(path):
ret = False
else:
for root, dirs, files in os.walk(path):
for name in files:
if not visit(os.path.join(root, name)):
ret = False
exit(0 if ret else 1)
| PyProf-master | qa/common/check_copyright.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This is a test script to run the L0 tests.
'''
import unittest
import sys
test_dirs = ["run_pyprof_nvtx", "run_pyprof_data"]
runner = unittest.TextTestRunner(verbosity=2)
errcode = 0
for test_dir in test_dirs:
suite = unittest.TestLoader().discover(test_dir)
print("\nExecuting tests from " + test_dir)
result = runner.run(suite)
if not result.wasSuccessful():
errcode = 1
sys.exit(errcode)
| PyProf-master | qa/common/run_test.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('..'))
from builtins import str
import os
import re
import sphinx_rtd_theme
import subprocess
import textwrap
# -- Project information -----------------------------------------------------
project = u'NVIDIA PyProf'
copyright = u'2020, NVIDIA Corporation'
author = u'NVIDIA Corporation'
version_long = u'0.0.0'
with open("../VERSION") as f:
version_long = f.readline()
version_short = re.match(r'^[\d]+\.[\d]+', version_long).group(0)
git_sha = os.getenv("GIT_SHA")
if not git_sha:
try:
git_sha = subprocess.check_output(["git", "log", "--pretty=format:'%h'", "-n1"]).decode('ascii').replace("'","").strip()
except:
git_sha = u'0000000'
git_sha = git_sha[:7] if len(git_sha) > 7 else git_sha
version = str(version_long + u"-" + git_sha)
# The full version, including alpha/beta/rc tags
release = str(version_long)
# hack: version is used for html creation, so put the version picker
# link here as well:
version = version + """<br/>
Version select: <select onChange="window.location.href = this.value" onFocus="this.selectedIndex = -1">
<option value="https://docs.nvidia.com/deeplearning/frameworks/pyprof-user-guide/docs/index.html">Current release</option>
<option value="https://docs.nvidia.com/deeplearning/frameworks/pyprof-archived/index.html">Older releases</option>
</select>"""
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.ifconfig',
'sphinx.ext.extlinks',
'nbsphinx',
'breathe'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Setup the breathe extension
breathe_projects = {
"BreathePyProf": "./doxyoutput/xml"
}
breathe_default_project = "BreathePyProf"
# Tell sphinx what the pygments highlight language should be.
highlight_language = 'text'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'canonical_url': 'https://docs.nvidia.com/deeplearning/frameworks/pyprof-user-guide/docs/index.html',
'collapse_navigation': False,
'display_version': True,
'logo_only': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NVIDIAPyProfdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NVIDIAPyProf.tex', u'NVIDIA PyProf Documentation',
u'NVIDIA Corporation', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nvidiapyprof', u'NVIDIA PyProf Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NVIDIAPyProf', u'NVIDIA PyProf Documentation',
author, 'NVIDIAPyProf', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
extlinks = {'issue': ('https://github.com/NVIDIA/PyProf/issues/%s',
'issue '),
'fileref': ('https://github.com/NVIDIA/PyProf/tree/' +
(git_sha if git_sha != u'0000000' else "master") + '/%s', ''),}
def setup(app):
# If envvar is set then the file is expected to contain a script
# that is added to every documentation page
visitor_script = os.getenv("VISITS_COUNTING_SCRIPT")
if visitor_script:
app.add_js_file(visitor_script)
| PyProf-master | docs/conf.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from .nvtx.nvmarker import init
from .nvtx.nvmarker import add_wrapper as wrap
| PyProf-master | pyprof/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, sqlite3
class DB(object):
"""
This class provides functions for DB operations
with exception handling.
"""
def __init__(self, dbFile):
try:
conn = sqlite3.connect(dbFile)
conn.row_factory = sqlite3.Row
c = conn.cursor()
except:
print("Error opening {}".format(dbFile))
sys.exit(1)
self.conn = conn
self.c = c
def select(self, cmd):
try:
self.c.execute(cmd)
#rows = self.c.fetchall()
rows = [dict(row) for row in self.c.fetchall()]
except sqlite3.Error as e:
print(e)
sys.exit(1)
except:
print("Uncaught error in SQLite access while executing {}".format(cmd))
sys.exit(1)
#print(rows)
return rows
def insert(self, cmd, data):
try:
self.c.execute(cmd, data)
except sqlite3.Error as e:
print(e)
sys.exit(1)
except:
print("Uncaught error in SQLite access while executing {}".format(cmd))
sys.exit(1)
def execute(self, cmd):
try:
self.c.execute(cmd)
except sqlite3.Error as e:
print(e)
sys.exit(1)
except:
print("Uncaught error in SQLite access while executing {}".format(cmd))
sys.exit(1)
def commit(self):
self.conn.commit()
def close(self):
self.c.close()
self.conn.close()
| PyProf-master | pyprof/parse/db.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cxxfilt, struct, binascii
#Helper functions
def demangle(name):
"""
Demangle a C++ string
"""
result = name
try:
result = cxxfilt.demangle(name)
except:
pass
return result
def getShortName(name):
"""
Returns a shorter kernel name
"""
sname = name.split("<")[0] \
.replace("void ", "") \
.replace("at::","") \
.replace("cuda::", "") \
.replace("native::","") \
.replace("(anonymous namespace)::", "")
sname = sname.split("(")[0]
return sname
class Kernel(object):
"""
This class stores information about a kernel.
"""
kernels = []
profStart = 0
def __init__(self):
self.kNameId = None
self.kShortName = None
self.kLongName = None
self.kStartTime = None #GPU start time
self.kEndTime = None #GPU end time
self.kDuration = None
self.device = None
self.stream = None
self.grid = ()
self.block = ()
self.corrId = None
self.rStartTime = None #CPU start time
self.rEndTime = None #CPU end time
self.rDuration = None
self.tid = None
self.pid = None
self.objId = None
self.timeOffset = None
self.layerMarkers = []
self.traceMarkers = []
self.reprMarkers = []
self.pyprofMarkers = []
self.seqMarkers = []
self.otherMarkers = []
self.altMarkers = []
self.seqId = []
self.altSeqId = []
self.layer = []
self.subSeqId = None
self.dir = None
self.mod = []
self.op = []
def setKernelInfo(self, info):
self.kNameId = info['kNameId']
self.corrId = int(info['correlationId'])
start = int(info['start'])
end = int(info['end'])
assert end > start, "This assertion can fail for very large profiles. It usually fails when start = end = 0."
self.kStartTime = start
self.kEndTime = end
self.kDuration = end - start
assert (start > Kernel.profStart)
self.device = int(info['deviceId'])
self.stream = int(info['streamId'])
self.grid = (info['gridX'], info['gridY'], info['gridZ'])
self.block = (info['blockX'], info['blockY'], info['blockZ'])
self.timeOffset = Kernel.profStart
self.setKernelName(info['name'])
self.setRunTimeInfo(info)
def setKernelName(self, name):
cadena = demangle(name)
self.kLongName = cadena
self.kShortName = getShortName(cadena)
def setRunTimeInfo(self, info):
self.rStartTime = info['rStart']
self.rEndTime = info['rEnd']
self.rDuration = info['rEnd'] - info['rStart']
self.pid = info['pid']
self.tid = info['tid']
self.objId = info['objId']
assert (self.rStartTime < self.rEndTime)
assert (self.rStartTime < self.kStartTime)
def setMarkerInfo(self, info):
self.layerMarkers, self.traceMarkers, self.reprMarkers, self.pyprofMarkers, self.seqMarkers, self.otherMarkers, self.altMarkers, self.seqId, self.altSeqId, self.layer = info
self.subSeqId = 0
def setDirection(self):
"""
Set direction (fprop, bprop) based on PyTorch sequence markers.
It is a heuristic and not a foolproof method.
"""
if any("Backward, seq = " in x for x in self.seqMarkers) or \
any("backward, seq = " in x for x in self.seqMarkers) or \
any("Backward0, seq = " in x for x in self.seqMarkers):
self.dir = "bprop"
else:
self.dir = "fprop"
def setOp(self):
"""
Detect and set the class/module (mod) and operation (op)
of the kernel e.g. torch.nn.functional / linear, torch / sigmoid.
The lookup sequence we use is
NVTX markers inserted by pyprof
NVTX markers inserted by PyTorch in bprop
NVTX markers inserted by PyTorch in fprop
It is a heuristic and not a foolproof method.
"""
def sanitize(name):
name = name.replace("torch","") \
.replace("autograd","") \
.replace("_backward","") \
.replace("::","") \
.replace("jit","") \
.replace("(anonymous namespace)","")
head, sep, tail = name.partition("Backward")
return head
#Check pyprof markers
for m in self.pyprofMarkers:
assert ("mod" in m) and ("op" in m) and ("args" in m)
t = eval(m)
self.op.append(t['op'])
self.mod.append(t['mod'])
if len(self.op):
return
#Check bprop kernel markers
for m in self.seqMarkers:
if ("backward, seq = " in m) or ("Backward, seq = " in m):
op = m.split(",")[0]
op = sanitize(op)
self.op.append(op)
self.mod.append('na')
if len(self.op):
return
#Check markers with "seq = "
for m in self.seqMarkers:
if ", seq = " in m:
op = m.split(",")[0]
self.op.append(op)
self.mod.append('na')
if len(self.op):
return
#If nothing else
if len(self.otherMarkers):
self.op.append(self.otherMarkers[0])
self.mod.append('na')
def print(self):
"""
Print kernel information. This is used by prof.py.
"""
a = lambda: None
a.kShortName = self.kShortName
a.kDuration = self.kDuration
#a.layerMarkers = self.layerMarkers
a.layer = self.layer
a.trace = self.traceMarkers
a.reprMarkers = self.reprMarkers
a.marker = self.pyprofMarkers
a.seqMarker = self.seqMarkers
a.seqId = self.seqId
a.subSeqId = self.subSeqId
a.altSeqId = self.altSeqId
a.dir = self.dir
a.mod = self.mod
a.op = self.op
a.tid = self.tid
a.device = self.device
a.stream = self.stream
a.grid = self.grid
a.block = self.block
a.kLongName = self.kLongName
print(a.__dict__)
| PyProf-master | pyprof/parse/kernel.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import struct, binascii
class NVVP(object):
"""
This class gets kernel information from the SQL (nvvp) database.
"""
driverT = "CUPTI_ACTIVITY_KIND_DRIVER"
runtimeT = "CUPTI_ACTIVITY_KIND_RUNTIME"
kernelT = "CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL"
markerT = "CUPTI_ACTIVITY_KIND_MARKER"
stringT = "StringTable"
def __init__(self, db):
self.db = db
self.markerId = 0
def getProfileStart(self):
"""
Get the profile start time
"""
profStart = sys.maxsize
for table in [self.driverT, self.runtimeT, self.kernelT, self.markerT]:
colname = "timestamp" if table is self.markerT else "start"
cmd = "select {} from {} ORDER BY {} ASC LIMIT 1".format(colname, table, colname)
result = self.db.select(cmd)
assert (len(result) <= 1)
if (len(result) == 1):
assert (colname in result[0])
t = result[0][colname]
if (t < profStart):
profStart = t
assert (profStart < sys.maxsize)
return profStart
def getString(self, id_):
"""
Get the string associated with an id.
"""
cmd = "select value from {} where _id_ = {}".format(self.stringT, id_)
result = self.db.select(cmd)
assert (len(result) == 1)
return result[0]['value']
def createMarkerTable(self):
"""
Create a temporary table and index it to speed up repeated SQL quesries.
The table is an INNER JOIN of CUPTI_ACTIVITY_KIND_MARKER with itself.
"""
cmd = 'CREATE TEMPORARY TABLE marker AS SELECT \
a._id_ as id, \
a.timestamp AS startTime, \
b.timestamp AS endTime, \
HEX(a.objectId) AS objectId, \
a.name AS name \
FROM {} AS a INNER JOIN {} AS b ON \
a.id = b.id and \
a.flags = 2 and b.flags = 4'.format(self.markerT, self.markerT)
self.db.execute(cmd)
self.db.execute('CREATE INDEX start_index ON marker (startTime)')
self.db.execute('CREATE INDEX end_index ON marker (endTime)')
self.db.execute('CREATE INDEX id_index ON marker (id)')
def encode_object_id(self, info):
"""
Encode the object ID from the pid and tid values, and put into dict
"""
objId = struct.pack('<i', info['pid']) + struct.pack('<q', info['tid'])
objId = binascii.hexlify(objId).decode('ascii').upper()
info['objId'] = objId
def getKernelInfo(self):
"""
Get GPU kernel info
"""
cmd = (
"SELECT "
"name AS kNameId, "
"strings.value as name, "
"coalesce(runtime.start, driver.start) as rStart, "
"coalesce(runtime.end, driver.end) as rEnd, "
"coalesce(runtime.processId, driver.processId) as pid, "
"coalesce(runtime.threadId, driver.threadId) & 0xFFFFFFFF as tid, "
"kernels.correlationId,kernels.start,kernels.end,deviceId,streamId,"
"gridX,gridY,gridZ,blockX,blockY,blockZ "
"FROM {} AS kernels "
"JOIN {} AS strings ON (KNameId = strings._id_) "
"LEFT JOIN {} AS runtime ON (kernels.correlationId = runtime.correlationId) "
"LEFT JOIN {} AS driver ON (kernels.correlationId = driver.correlationId) "
).format(self.kernelT, self.stringT, self.runtimeT, self.driverT)
result = self.db.select(cmd)
return result
def getMarkerInfo(self, objId, startTime, endTime):
"""
This function first finds all NVTX markers encapsulating
a runtime / driver kernel launch.
It then splits the markers into many lists.
layerMarkers : User added NVTX markers
traceMarkers : Call trace markers (inserted by pyprof)
reprMarkers : Markers containing the extra_repr() of a module (inserted by pyprof)
pyprofMarkers: Markers containing args and kwargs (tensor shape, datatype etc.)
seqMarkers : Markers containing PyTorch internal sequence markers (inserted by PyTorch)
altSeqMarkers: Markers inserted by PyTorch between two kernel launches. Needs better explanation.
otherMarkers : Markers not in either of the above categories.
We extract seqId from the seq and altSeq markers. The seqId is used in bprop.
We also extract information from the layerMarkers.
"""
layerMarkers = []
traceMarkers = []
reprMarkers = []
pyprofMarkers = []
seqMarkers = []
otherMarkers = []
altSeqMarkers = []
bprop = False
#Helper functions
def delete(objId, sTime):
"""
Delete rows from the temporary SQL table which are no longer required.
This speeds up future queries.
"""
margin = 0
cmd = 'DELETE FROM marker WHERE objectId = "{}" AND endTime < {}'.format(objId, sTime - margin)
#cmd = 'DELETE FROM marker WHERE endTime < {}'.format(sTime - margin)
self.db.execute(cmd)
def getLayerName(mlist):
"""
Get layer names from layer marker list.
"""
layers = []
assert (type(mlist) == list)
for m in mlist:
assert ("layer:" in m)
l = m.split(":")[1]
layers.append(l)
return layers
def getSeqId(mlist):
"""
Get sequence ids from seq / alt seq marker list.
"""
ids = []
assert (type(mlist) == list)
for m in mlist:
assert (", seq = " in m)
seq = int(m.split("=")[1])
ids.append(seq)
#Remove duplicates
ids = list(set(ids))
ids.sort()
return ids
def seqcompare(elem):
"""
Sorting function for sequence markers
"""
assert (", seq = " in elem)
#sort by sequence id and then the string
l = elem.split(" = ")
return l[1] + l[0]
def prune(mlist):
"""
Remove markers with the same seqId and if the strings are similar.
This function works on a sorted sequence.
"""
assert (type(mlist) == list)
assert (len(mlist))
a = mlist[0:1]
for i in range(1, len(mlist)):
m = mlist[i]
pm = mlist[i - 1]
name, seq = m.split(",")
pname, pseq = pm.split(",")
similar = (name in pname) or (pname in name)
if (seq == pseq) and similar:
continue
else:
a.append(m)
return a
def filterTrace(mlist):
"""
Filter trace markers to remove certain file names.
"""
assert (type(mlist) == list)
if len(mlist) == 0:
return mlist
mlist = mlist[-1] #The last stack trace will be a super set.
mlist = eval(mlist)
mlist = mlist['traceMarker']
assert (type(mlist) == list)
mlist = list(filter(lambda x: "/torch/nn/modules/" not in x, mlist))
mlist = list(filter(lambda x: "/torch/nn/functional.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/tensor.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/autograd/__init__.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/_jit_internal.py" not in x, mlist))
mlist = list(filter(lambda x: "/pyprof/nvtx/nvmarker.py" not in x, mlist))
mlist = list(filter(lambda x: "/apex/optimizers/" not in x, mlist))
mlist = list(filter(lambda x: "/torch/_utils.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/optim/" not in x, mlist))
return mlist
#Find all encapsulating markers
cmd = 'SELECT id,name from marker where \
objectId = "{}" and \
startTime < {} and \
endTime > {} \
ORDER BY startTime ASC'.format(objId, startTime, endTime)
result = self.db.select(cmd)
#Bin markers into different lists
for r in result:
m = self.getString(r['name'])
#Hack: If its a known gradient checkpointing marker, ignore it.
if m.find("CheckpointFunctionBackward") >= 0:
continue
if ("_backward, seq =" in m) or ("Backward, seq =" in m) or ("Backward0, seq =" in m):
bprop = True
if ("mod" in m) and ("op" in m) and ("args" in m) and ("type" in m):
pyprofMarkers.append(m)
elif ("layer:" in m):
layerMarkers.append(m)
elif ("traceMarker" in m):
traceMarkers.append(m)
elif ("strRepr" in m):
reprMarkers.append(m)
elif (", seq = " in m):
seqMarkers.append(m)
else:
otherMarkers.append(m)
#Remove duplicates, sort and prune seqMarkers
if (len(seqMarkers)):
seqMarkers = list(set(seqMarkers))
seqMarkers.sort(key=seqcompare)
seqMarkers = prune(seqMarkers)
#Remove duplicates from otherMarkers
otherMarkers = list(set(otherMarkers))
#Get markers with seq id (inserted by PyTorch) from the previous kernel to the present kernel
#Only for fprop kernels
if (len(result) and not bprop):
loId = self.markerId
hiId = result[-1]['id']
self.markerId = hiId
#Get markers between loId and hiId
cmd = 'SELECT id,name from marker where objectId = "{}" and id > {} and id < {} ORDER BY startTime ASC'.format(
objId, loId, hiId
)
result1 = self.db.select(cmd)
for r in result1:
m = self.getString(r['name'])
#Get only markers with seq id
if (", seq=" in m):
altSeqMarkers.append(m)
#Remove duplicates, sort and prune altSeqMarkers
if (len(altSeqMarkers)):
altSeqMarkers = list(set(altSeqMarkers))
altSeqMarkers.sort(key=seqcompare)
altSeqMarkers = prune(altSeqMarkers)
delete(objId, startTime)
return layerMarkers, filterTrace(
traceMarkers
), reprMarkers, pyprofMarkers, seqMarkers, otherMarkers, altSeqMarkers, getSeqId(seqMarkers), getSeqId(
altSeqMarkers
), getLayerName(layerMarkers)
| PyProf-master | pyprof/parse/nvvp.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| PyProf-master | pyprof/parse/__init__.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Aditya Agrawal.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
class Nsight(object):
"""
This class gets kernel information from the SQL (nvvp) database.
"""
#driverT = "CUPTI_ACTIVITY_KIND_DRIVER"
runtimeT = "CUPTI_ACTIVITY_KIND_RUNTIME"
kernelT = "CUPTI_ACTIVITY_KIND_KERNEL"
markerT = "NVTX_EVENTS"
stringT = "StringIds"
def __init__(self, db):
self.db = db
self.markerId = 0
def getProfileStart(self):
"""
Get the profile start time
"""
profStart = sys.maxsize
#for table in [self.driverT, self.runtimeT, self.kernelT, self.markerT]:
for table in [self.runtimeT, self.kernelT, self.markerT]:
colname = "start"
cmd = "select {} from {} ORDER BY {} ASC LIMIT 1".format(colname, table, colname)
result = self.db.select(cmd)
assert (len(result) <= 1)
if (len(result) == 1):
assert (colname in result[0])
t = result[0][colname]
if (t < profStart):
profStart = t
assert (profStart < sys.maxsize)
return profStart
def createMarkerTable(self):
"""
Create a temporary table and index it to speed up repeated SQL quesries.
"""
cmd = 'CREATE TEMPORARY TABLE marker AS SELECT * FROM {}'.format(self.markerT)
self.db.execute(cmd)
self.db.execute('CREATE INDEX start_index ON marker (start)')
self.db.execute('CREATE INDEX end_index ON marker (end)')
#self.db.execute('CREATE INDEX id_index ON marker (id)')
def encode_object_id(self, info):
# Nothing to do for nsight. objId comes out of database
assert 'objId' in info
def getKernelInfo(self):
"""
Get GPU kernel info
"""
cmd = (
"SELECT "
"demangledName as kNameId, "
"strings.value as name, "
"runtime.start as rStart, "
"runtime.end as rEnd, "
"runtime.globalTid as objId, "
"runtime.globalTid / 0x1000000 % 0x1000000 AS pid, "
"runtime.globalTid % 0x1000000 AS tid, "
"kernels.globalPid / 0x1000000 % 0x1000000 AS kpid, "
"kernels.correlationId,kernels.start,kernels.end,deviceId,streamId,"
"gridX,gridY,gridZ,blockX,blockY,blockZ "
"FROM {} AS kernels "
"JOIN {} AS strings ON (kNameId = strings.Id) "
"JOIN {} AS runtime ON (kernels.correlationId = runtime.correlationId AND kpid = pid) "
).format(self.kernelT, self.stringT, self.runtimeT)
result = self.db.select(cmd)
return result
def getMarkerInfo(self, objId, startTime, endTime):
"""
This function first finds all NVTX markers encapsulating
a runtime / driver kernel launch.
It then splits the markers into many lists.
layerMarkers : User added NVTX markers
traceMarkers : Call trace markers (inserted by pyprof)
reprMarkers : Markers containing the extra_repr() of a module (inserted by pyprof)
pyprofMarkers: Markers containing args and kwargs (tensor shape, datatype etc.)
seqMarkers : Markers containing PyTorch internal sequence markers (inserted by PyTorch)
altSeqMarkers: Markers inserted by PyTorch between two kernel launches. Needs better explanation.
otherMarkers : Markers not in either of the above categories.
We extract seqId from the seq and altSeq markers. The seqId is used in bprop.
We also extract information from the layerMarkers.
"""
layerMarkers = []
traceMarkers = []
reprMarkers = []
pyprofMarkers = []
seqMarkers = []
otherMarkers = []
altSeqMarkers = []
bprop = False
#Helper functions
def delete(objId, sTime):
"""
Delete rows from the temporary SQL table which are no longer required.
This speeds up future queries.
"""
margin = 0
cmd = 'DELETE FROM marker WHERE globalTid = {} AND end < {}'.format(objId, sTime - margin)
#cmd = 'DELETE FROM marker WHERE end < {}'.format(sTime - margin)
self.db.execute(cmd)
def getLayerName(mlist):
"""
Get layer names from layer marker list.
"""
layers = []
assert (type(mlist) == list)
for m in mlist:
assert ("layer:" in m)
l = m.split(":")[1]
layers.append(l)
return layers
def getSeqId(mlist):
"""
Get sequence ids from seq / alt seq marker list.
"""
ids = []
assert (type(mlist) == list)
for m in mlist:
assert (", seq = " in m)
seq = int(m.split("=")[1])
ids.append(seq)
#Remove duplicates
ids = list(set(ids))
ids.sort()
return ids
def seqcompare(elem):
"""
Sorting function for sequence markers
"""
assert (", seq = " in elem)
#sort by sequence id and then the string
l = elem.split(" = ")
return l[1] + l[0]
def prune(mlist):
"""
Remove markers with the same seqId and if the strings are similar.
This function works on a sorted sequence.
"""
assert (type(mlist) == list)
assert (len(mlist))
a = mlist[0:1]
for i in range(1, len(mlist)):
m = mlist[i]
pm = mlist[i - 1]
name, seq = m.split(",")
pname, pseq = pm.split(",")
similar = (name in pname) or (pname in name)
if (seq == pseq) and similar:
continue
else:
a.append(m)
return a
def filterTrace(mlist):
"""
Filter trace markers to remove certain file names.
"""
assert (type(mlist) == list)
if len(mlist) == 0:
return mlist
mlist = mlist[-1] #The last stack trace will be a super set.
mlist = eval(mlist)
mlist = mlist['traceMarker']
assert (type(mlist) == list)
mlist = list(filter(lambda x: "/torch/nn/modules/" not in x, mlist))
mlist = list(filter(lambda x: "/torch/nn/functional.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/tensor.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/autograd/__init__.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/_jit_internal.py" not in x, mlist))
mlist = list(filter(lambda x: "/pyprof/nvtx/nvmarker.py" not in x, mlist))
mlist = list(filter(lambda x: "/apex/optimizers/" not in x, mlist))
mlist = list(filter(lambda x: "/torch/_utils.py" not in x, mlist))
mlist = list(filter(lambda x: "/torch/optim/" not in x, mlist))
return mlist
#Find all encapsulating markers
cmd = 'SELECT text from marker where \
globalTid = {} and \
start < {} and \
end > {} \
ORDER BY start ASC'.format(objId, startTime, endTime)
result = self.db.select(cmd)
#Bin markers into different lists
for r in result:
#m = self.getString(r['name'])
m = r['text']
#Hack: If its a known gradient checkpointing marker, ignore it.
if m.find("CheckpointFunctionBackward") >= 0:
continue
if ("_backward, seq =" in m) or ("Backward, seq =" in m) or ("Backward0, seq =" in m):
bprop = True
if ("mod" in m) and ("op" in m) and ("args" in m) and ("type" in m):
pyprofMarkers.append(m)
elif ("layer:" in m):
layerMarkers.append(m)
elif ("traceMarker" in m):
traceMarkers.append(m)
elif ("strRepr" in m):
reprMarkers.append(m)
elif (", seq = " in m):
seqMarkers.append(m)
else:
otherMarkers.append(m)
#Remove duplicates, sort and prune seqMarkers
if (len(seqMarkers)):
seqMarkers = list(set(seqMarkers))
seqMarkers.sort(key=seqcompare)
seqMarkers = prune(seqMarkers)
#Remove duplicates from otherMarkers
otherMarkers = list(set(otherMarkers))
#Get markers with seq id (inserted by PyTorch) from the previous kernel to the present kernel
#Only for fprop kernels
if (len(result) and not bprop):
'''
loId = self.markerId
hiId = result[-1]['id']
self.markerId = hiId
#Get markers between loId and hiId
cmd = 'SELECT id,name from marker where objectId = "{}" and id > {} and id < {} ORDER BY startTime ASC'.format(objId, loId, hiId)
result1 = self.db.select(cmd)
for r in result1:
m = self.getString(r['name'])
#Get only markers with seq id
if (", seq=" in m):
altSeqMarkers.append(m)
#Remove duplicates, sort and prune altSeqMarkers
if (len(altSeqMarkers)):
altSeqMarkers = list(set(altSeqMarkers))
altSeqMarkers.sort(key=seqcompare)
altSeqMarkers = prune(altSeqMarkers)
'''
pass
delete(objId, startTime)
#delete("", startTime)
return layerMarkers, filterTrace(
traceMarkers
), reprMarkers, pyprofMarkers, seqMarkers, otherMarkers, altSeqMarkers, getSeqId(seqMarkers), getSeqId(
altSeqMarkers
), getLayerName(layerMarkers)
| PyProf-master | pyprof/parse/nsight.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parse the SQLite3 database from NVprof or Nsight and print a dictionary for every kernel.
"""
import sys
import os
import argparse
from tqdm import tqdm
from .db import DB
from .kernel import Kernel
from .nvvp import NVVP
from .nsight import Nsight
def parseArgs():
parser = argparse.ArgumentParser(prog=sys.argv[0], description="Parse SQLite3 DB from NVprof or Nsight.")
parser.add_argument("file", type=str, default=None, help="SQLite3 database.")
args = parser.parse_args()
if not os.path.isfile(args.file):
raise parser.error("No such file '{}'.".format(args.file))
return args
def dbIsNvvp(db):
cmd = "SELECT * FROM sqlite_master where type='table' AND name='StringTable'"
result = db.select(cmd)
return True if len(result) == 1 else False
def main():
args = parseArgs()
db = DB(args.file)
nvvp = None
if dbIsNvvp(db):
nvvp = NVVP(db)
else:
nvvp = Nsight(db)
kInfo = nvvp.getKernelInfo()
if len(kInfo) == 0:
print("Found 0 kernels. Exiting.", file=sys.stderr)
db.close()
sys.exit(0)
else:
print("Found {} kernels. Getting info for each kernel.".format(len(kInfo)), file=sys.stderr)
nvvp.createMarkerTable()
prevSeqId = -1
prevSubSeqId = -1
prevOp = "na"
Kernel.profStart = nvvp.getProfileStart()
for i in tqdm(range(len(kInfo)), ascii=True):
info = kInfo[i]
k = Kernel()
#Calculate/encode object ID
nvvp.encode_object_id(info)
#Set kernel info
k.setKernelInfo(info)
#Get and set marker and seqid info
info = nvvp.getMarkerInfo(k.objId, k.rStartTime, k.rEndTime)
k.setMarkerInfo(info)
#If the seqId contains both 0 and non zero integers, remove 0.
if any(seq != 0 for seq in k.seqId) and (0 in k.seqId):
k.seqId.remove(0)
#Set direction (it uses seq id)
k.setDirection()
#Set op
k.setOp()
#The following code is based on heuristics.
#TODO: Refactor.
#Assign subSeqId, adjust seqId and altSeqId
#seqId can be 0.
#A kernel can have multiple seqIds both in fprop and bprop.
#In bprop, seqIds might not decrease monotonically. I have observed a few blips.
if len(k.seqId):
assert (k.dir in ["fprop", "bprop"])
if (k.dir == "fprop"):
#Check if there is a sequence id larger than the previous
inc = (k.seqId[-1] > prevSeqId)
if inc:
currSeqId = [x for x in k.seqId if x > prevSeqId][0]
else:
currSeqId = prevSeqId
else:
currSeqId = k.seqId[0]
#if ((currSeqId == prevSeqId) and (k.op == prevOp)):
if ((currSeqId == prevSeqId) and (k.op == prevOp)) or ((k.op[0] == "forward") and (k.op == prevOp) and
(k.mod[0] in ["LSTMCell", "GRUCell", "RNNCell"])):
#The second condition is to trap cases when pytorch does not use cudnn for a LSTMCell.
k.subSeqId = prevSubSeqId + 1
prevSeqId = currSeqId
prevSubSeqId = k.subSeqId
prevOp = k.op
#Keep currSeqId in k.seqId, move everything else to k.altSeqId
for s in k.seqId:
if s != currSeqId:
k.seqId.remove(s)
k.altSeqId.append(s)
for s in k.altSeqId:
if s == currSeqId:
k.altSeqId.remove(s)
k.altSeqId = list(set(k.altSeqId))
if (len(k.altSeqId)):
(k.altSeqId).sort()
k.print()
db.close()
if __name__ == '__main__':
main()
| PyProf-master | pyprof/parse/parse.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .parse import main
if __name__ == '__main__':
main()
| PyProf-master | pyprof/parse/__main__.py |