-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvirtualSensor.py
77 lines (57 loc) · 2.68 KB
/
virtualSensor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import torch
from torchvision import transforms
from dataset import Dataset
from PIL import Image
import numpy as np
import os
import math
from camera_sensors import CamDetails
import copy
from tqdm import tqdm
class VirtualSensor():
def __init__(self, dataset, increment):
self.rgb_timestamps = dataset.rgb_timestamps
self.rgb_images_path = dataset.rgb_images_path
self.depth_timestamps = dataset.depth_timestamps
self.depth_images_path = dataset.depth_images_path
self.trajectory_timestamps = dataset.trajectory_timestamps
self.trajectory = dataset.trajectory
self.kinect_dataset_path = dataset.kinect_dataset_path
self.currentIdx = -1
self.increment = increment ## Change increment to skip frames in the middle
self.m_colorImageWidth = CamDetails.colorWidth
self.m_colorImageHeight = CamDetails.colorHeight
self.m_depthImageWidth = CamDetails.depthWidth
self.m_depthImageHeight = CamDetails.depthHeight
self.m_colorIntrinsics = CamDetails.colorIntrinsics
self.m_depthIntrinsics = CamDetails.depthIntrinsics
self.m_colorExtrinsics = np.identity(4)
self.m_depthExtrinsics = np.identity(4)
self.m_depthFrame = torch.full((self.m_depthImageWidth, self.m_depthImageHeight), 0.5)
self.m_colorFrame = torch.full((self.m_colorImageWidth, self.m_colorImageHeight),255)
def processNextFrame(self, pbar=None):
if(self.currentIdx==-1):
self.currentIdx =0
else:
self.currentIdx += self.increment
if(self.currentIdx>=len(self.rgb_images_path)):
return False
if ( pbar is not None):
pbar.update(1)
# print("ProcessNextFrame "+str(self.currentIdx)+" | " + str(len((self.rgb_images_path))))
transform_toTensor = transforms.ToTensor()
self.rgbImage = Image.open(os.path.join(self.kinect_dataset_path,self.rgb_images_path[self.currentIdx]))
self.dImageRaw = Image.open(os.path.join(self.kinect_dataset_path,self.depth_images_path[self.currentIdx]))
self.dImage = transform_toTensor(self.dImageRaw).permute(2,1,0)
self.dImage = torch.where(self.dImage==0, -math.inf, self.dImage * 1 / 5000)
## Finds the nearest neighbouring trajectory
timestamp = self.depth_timestamps[self.currentIdx]
min_val = math.inf
idx = 0
for i in range(len(self.trajectory)):
d = abs(float(self.trajectory_timestamps[i]) - float(timestamp))
if (min_val > d) :
min_val = d
idx = i
self.currentTrajectory = self.trajectory[idx]
return True