diff --git a/README.md b/README.md index fe22efc91d..cb9f8bfdcc 100644 --- a/README.md +++ b/README.md @@ -131,6 +131,19 @@ e.g., CC=clang CXX=clang++ CFLAGS='-stdlib=libc++' MMCV_WITH_OPS=1 pip install -e . ``` +If you are on Windows10, set the following environment variable before the installing command. + +```bash +set MMCV_WITH_OPS=1 +``` + +e.g., + +```bash +set MMCV_WITH_OPS=1 +pip install -e . +``` + Note: If you would like to use `opencv-python-headless` instead of `opencv-python`, e.g., in a minimum container environment or servers without GUI, you can first install it before installing MMCV to skip the installation of `opencv-python`. diff --git a/mmcv/ops/csrc/pytorch/focal_loss_cuda.cu b/mmcv/ops/csrc/pytorch/focal_loss_cuda.cu index 508f449ba3..c7cd215f5d 100644 --- a/mmcv/ops/csrc/pytorch/focal_loss_cuda.cu +++ b/mmcv/ops/csrc/pytorch/focal_loss_cuda.cu @@ -8,7 +8,7 @@ void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target, const float alpha) { int output_size = output.numel(); int num_classes = input.size(1); - AT_ASSERTM(target.max().item() <= (long)num_classes, + AT_ASSERTM(target.max().item() <= (int64_t)num_classes, "target label should smaller or equal than num classes"); at::cuda::CUDAGuard device_guard(input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); @@ -53,7 +53,7 @@ void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target, int output_size = output.numel(); int num_classes = softmax.size(1); - AT_ASSERTM(target.max().item() <= (long)num_classes, + AT_ASSERTM(target.max().item() <= (int64_t)num_classes, "target label should smaller or equal than num classes"); at::cuda::CUDAGuard device_guard(softmax.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); @@ -80,7 +80,9 @@ void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, at::cuda::CUDAGuard device_guard(grad_input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( - grad_input.scalar_type(), "softmax_focal_loss_backward_cuda1_kernel", + grad_input.scalar_type(), + "softmax_focal_loss_backward_cuda1_" + "kernel", [&] { softmax_focal_loss_backward_cuda1_kernel <<>>( @@ -93,7 +95,9 @@ void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, output_size = grad_input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( - grad_input.scalar_type(), "softmax_focal_loss_backward_cuda2_kernel", + grad_input.scalar_type(), + "softmax_focal_loss_backward_cuda2_" + "kernel", [&] { softmax_focal_loss_backward_cuda2_kernel <<>>(