Skip to content

Commit c646eee

Browse files
authored
Revert fullframe DM model (commaai#24812)
* Revert "fullframe DM: flip RHD yaw to use matching thresholds" This reverts commit 2ac6931. * Revert "fullframe DM model (commaai#24762)" This reverts commit d6c07a6. * revert cereal
1 parent 66bc246 commit c646eee

25 files changed

+308
-227
lines changed

cereal

common/modeldata.h

+6
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,12 @@ constexpr auto T_IDXS_FLOAT = build_idxs<float, TRAJECTORY_SIZE>(10.0);
2424
constexpr auto X_IDXS = build_idxs<double, TRAJECTORY_SIZE>(192.0);
2525
constexpr auto X_IDXS_FLOAT = build_idxs<float, TRAJECTORY_SIZE>(192.0);
2626

27+
namespace tici_dm_crop {
28+
const int x_offset = -72;
29+
const int y_offset = -144;
30+
const int width = 954;
31+
};
32+
2733
const mat3 fcam_intrinsic_matrix = (mat3){{2648.0, 0.0, 1928.0 / 2,
2834
0.0, 2648.0, 1208.0 / 2,
2935
0.0, 0.0, 1.0}};

selfdrive/camerad/cameras/camera_common.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ static kj::Array<capnp::byte> yuv420_to_jpeg(const CameraBuf *b, int thumbnail_w
239239
int in_stride = b->cur_yuv_buf->stride;
240240

241241
// make the buffer big enough. jpeg_write_raw_data requires 16-pixels aligned height to be used.
242-
std::unique_ptr<uint8_t[]> buf(new uint8_t[(thumbnail_width * ((thumbnail_height + 15) & ~15) * 3) / 2]);
242+
std::unique_ptr<uint8[]> buf(new uint8_t[(thumbnail_width * ((thumbnail_height + 15) & ~15) * 3) / 2]);
243243
uint8_t *y_plane = buf.get();
244244
uint8_t *u_plane = y_plane + thumbnail_width * thumbnail_height;
245245
uint8_t *v_plane = u_plane + (thumbnail_width * thumbnail_height) / 4;

selfdrive/camerad/cameras/camera_qcom2.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -837,7 +837,7 @@ void cameras_init(VisionIpcServer *v, MultiCameraState *s, cl_device_id device_i
837837
s->road_cam.camera_init(s, v, CAMERA_ID_AR0231, 1, 20, device_id, ctx, VISION_STREAM_RGB_ROAD, VISION_STREAM_ROAD, !env_disable_road);
838838
s->wide_road_cam.camera_init(s, v, CAMERA_ID_AR0231, 0, 20, device_id, ctx, VISION_STREAM_RGB_WIDE_ROAD, VISION_STREAM_WIDE_ROAD, !env_disable_wide_road);
839839

840-
s->sm = new SubMaster({"driverStateV2"});
840+
s->sm = new SubMaster({"driverState"});
841841
s->pm = new PubMaster({"roadCameraState", "driverCameraState", "wideRoadCameraState", "thumbnail"});
842842
}
843843

selfdrive/hardware/tici/test_power_draw.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class Proc:
2121
PROCS = [
2222
Proc('camerad', 2.15),
2323
Proc('modeld', 1.0),
24-
Proc('dmonitoringmodeld', 0.35),
24+
Proc('dmonitoringmodeld', 0.25),
2525
Proc('encoderd', 0.23),
2626
]
2727

selfdrive/modeld/dmonitoringmodeld.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
ExitHandler do_exit;
1313

1414
void run_model(DMonitoringModelState &model, VisionIpcClient &vipc_client) {
15-
PubMaster pm({"driverStateV2"});
15+
PubMaster pm({"driverState"});
1616
SubMaster sm({"liveCalibration"});
1717
float calib[CALIB_LEN] = {0};
1818
double last = 0;
@@ -31,11 +31,11 @@ void run_model(DMonitoringModelState &model, VisionIpcClient &vipc_client) {
3131
}
3232

3333
double t1 = millis_since_boot();
34-
DMonitoringModelResult model_res = dmonitoring_eval_frame(&model, buf->addr, buf->width, buf->height, buf->stride, buf->uv_offset, calib);
34+
DMonitoringResult res = dmonitoring_eval_frame(&model, buf->addr, buf->width, buf->height, buf->stride, buf->uv_offset, calib);
3535
double t2 = millis_since_boot();
3636

3737
// send dm packet
38-
dmonitoring_publish(pm, extra.frame_id, model_res, (t2 - t1) / 1000.0, model.output);
38+
dmonitoring_publish(pm, extra.frame_id, res, (t2 - t1) / 1000.0, model.output);
3939

4040
//printf("dmonitoring process: %.2fms, from last %.2fms\n", t2 - t1, t1 - last);
4141
last = t1;

selfdrive/modeld/models/dmonitoring.cc

+159-75
Original file line numberDiff line numberDiff line change
@@ -10,124 +10,208 @@
1010

1111
#include "selfdrive/modeld/models/dmonitoring.h"
1212

13-
constexpr int MODEL_WIDTH = 1440;
14-
constexpr int MODEL_HEIGHT = 960;
13+
constexpr int MODEL_WIDTH = 320;
14+
constexpr int MODEL_HEIGHT = 640;
1515

1616
template <class T>
1717
static inline T *get_buffer(std::vector<T> &buf, const size_t size) {
1818
if (buf.size() < size) buf.resize(size);
1919
return buf.data();
2020
}
2121

22+
static inline void init_yuv_buf(std::vector<uint8_t> &buf, const int width, int height) {
23+
uint8_t *y = get_buffer(buf, width * height * 3 / 2);
24+
uint8_t *u = y + width * height;
25+
uint8_t *v = u + (width / 2) * (height / 2);
26+
27+
// needed on comma two to make the padded border black
28+
// equivalent to RGB(0,0,0) in YUV space
29+
memset(y, 16, width * height);
30+
memset(u, 128, (width / 2) * (height / 2));
31+
memset(v, 128, (width / 2) * (height / 2));
32+
}
33+
2234
void dmonitoring_init(DMonitoringModelState* s) {
2335
s->is_rhd = Params().getBool("IsRHD");
36+
for (int x = 0; x < std::size(s->tensor); ++x) {
37+
s->tensor[x] = (x - 128.f) * 0.0078125f;
38+
}
39+
init_yuv_buf(s->resized_buf, MODEL_WIDTH, MODEL_HEIGHT);
2440

2541
#ifdef USE_ONNX_MODEL
26-
s->m = new ONNXModel("models/dmonitoring_model.onnx", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME, false, true);
42+
s->m = new ONNXModel("models/dmonitoring_model.onnx", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME);
2743
#else
28-
s->m = new SNPEModel("models/dmonitoring_model_q.dlc", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME, false, true);
44+
s->m = new SNPEModel("models/dmonitoring_model_q.dlc", &s->output[0], OUTPUT_SIZE, USE_DSP_RUNTIME);
2945
#endif
3046

3147
s->m->addCalib(s->calib, CALIB_LEN);
3248
}
3349

34-
void parse_driver_data(DriverStateResult &ds_res, const DMonitoringModelState* s, int out_idx_offset) {
35-
for (int i = 0; i < 3; ++i) {
36-
ds_res.face_orientation[i] = s->output[out_idx_offset+i] * REG_SCALE;
37-
ds_res.face_orientation_std[i] = exp(s->output[out_idx_offset+6+i]);
38-
}
39-
for (int i = 0; i < 2; ++i) {
40-
ds_res.face_position[i] = s->output[out_idx_offset+3+i] * REG_SCALE;
41-
ds_res.face_position_std[i] = exp(s->output[out_idx_offset+9+i]);
42-
}
43-
for (int i = 0; i < 4; ++i) {
44-
ds_res.ready_prob[i] = sigmoid(s->output[out_idx_offset+35+i]);
45-
}
46-
for (int i = 0; i < 2; ++i) {
47-
ds_res.not_ready_prob[i] = sigmoid(s->output[out_idx_offset+39+i]);
48-
}
49-
ds_res.face_prob = sigmoid(s->output[out_idx_offset+12]);
50-
ds_res.left_eye_prob = sigmoid(s->output[out_idx_offset+21]);
51-
ds_res.right_eye_prob = sigmoid(s->output[out_idx_offset+30]);
52-
ds_res.left_blink_prob = sigmoid(s->output[out_idx_offset+31]);
53-
ds_res.right_blink_prob = sigmoid(s->output[out_idx_offset+32]);
54-
ds_res.sunglasses_prob = sigmoid(s->output[out_idx_offset+33]);
55-
ds_res.occluded_prob = sigmoid(s->output[out_idx_offset+34]);
50+
static inline auto get_yuv_buf(std::vector<uint8_t> &buf, const int width, int height) {
51+
uint8_t *y = get_buffer(buf, width * height * 3 / 2);
52+
uint8_t *u = y + width * height;
53+
uint8_t *v = u + (width /2) * (height / 2);
54+
return std::make_tuple(y, u, v);
5655
}
5756

58-
void fill_driver_data(cereal::DriverStateV2::DriverData::Builder ddata, const DriverStateResult &ds_res) {
59-
ddata.setFaceOrientation(ds_res.face_orientation);
60-
ddata.setFaceOrientationStd(ds_res.face_orientation_std);
61-
ddata.setFacePosition(ds_res.face_position);
62-
ddata.setFacePositionStd(ds_res.face_position_std);
63-
ddata.setFaceProb(ds_res.face_prob);
64-
ddata.setLeftEyeProb(ds_res.left_eye_prob);
65-
ddata.setRightEyeProb(ds_res.right_eye_prob);
66-
ddata.setLeftBlinkProb(ds_res.left_blink_prob);
67-
ddata.setRightBlinkProb(ds_res.right_blink_prob);
68-
ddata.setSunglassesProb(ds_res.sunglasses_prob);
69-
ddata.setOccludedProb(ds_res.occluded_prob);
70-
ddata.setReadyProb(ds_res.ready_prob);
71-
ddata.setNotReadyProb(ds_res.not_ready_prob);
57+
struct Rect {int x, y, w, h;};
58+
void crop_nv12_to_yuv(uint8_t *raw, int stride, int uv_offset, uint8_t *y, uint8_t *u, uint8_t *v, const Rect &rect) {
59+
uint8_t *raw_y = raw;
60+
uint8_t *raw_uv = raw_y + uv_offset;
61+
for (int r = 0; r < rect.h / 2; r++) {
62+
memcpy(y + 2 * r * rect.w, raw_y + (2 * r + rect.y) * stride + rect.x, rect.w);
63+
memcpy(y + (2 * r + 1) * rect.w, raw_y + (2 * r + rect.y + 1) * stride + rect.x, rect.w);
64+
for (int h = 0; h < rect.w / 2; h++) {
65+
u[r * rect.w/2 + h] = raw_uv[(r + (rect.y/2)) * stride + (rect.x/2 + h)*2];
66+
v[r * rect.w/2 + h] = raw_uv[(r + (rect.y/2)) * stride + (rect.x/2 + h)*2 + 1];
67+
}
68+
}
7269
}
7370

74-
DMonitoringModelResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib) {
75-
int v_off = height - MODEL_HEIGHT;
76-
int h_off = (width - MODEL_WIDTH) / 2;
77-
int yuv_buf_len = MODEL_WIDTH * MODEL_HEIGHT;
78-
79-
uint8_t *raw_buf = (uint8_t *) stream_buf;
80-
// vertical crop free
81-
uint8_t *raw_y_start = raw_buf + stride * v_off;
71+
DMonitoringResult dmonitoring_eval_frame(DMonitoringModelState* s, void* stream_buf, int width, int height, int stride, int uv_offset, float *calib) {
72+
const int cropped_height = tici_dm_crop::width / 1.33;
73+
Rect crop_rect = {width / 2 - tici_dm_crop::width / 2 + tici_dm_crop::x_offset,
74+
height / 2 - cropped_height / 2 + tici_dm_crop::y_offset,
75+
cropped_height / 2,
76+
cropped_height};
77+
if (!s->is_rhd) {
78+
crop_rect.x += tici_dm_crop::width - crop_rect.w;
79+
}
8280

83-
uint8_t *net_input_buf = get_buffer(s->net_input_buf, yuv_buf_len);
81+
int resized_width = MODEL_WIDTH;
82+
int resized_height = MODEL_HEIGHT;
83+
84+
auto [cropped_y, cropped_u, cropped_v] = get_yuv_buf(s->cropped_buf, crop_rect.w, crop_rect.h);
85+
if (!s->is_rhd) {
86+
crop_nv12_to_yuv((uint8_t *)stream_buf, stride, uv_offset, cropped_y, cropped_u, cropped_v, crop_rect);
87+
} else {
88+
auto [mirror_y, mirror_u, mirror_v] = get_yuv_buf(s->premirror_cropped_buf, crop_rect.w, crop_rect.h);
89+
crop_nv12_to_yuv((uint8_t *)stream_buf, stride, uv_offset, mirror_y, mirror_u, mirror_v, crop_rect);
90+
libyuv::I420Mirror(mirror_y, crop_rect.w,
91+
mirror_u, crop_rect.w / 2,
92+
mirror_v, crop_rect.w / 2,
93+
cropped_y, crop_rect.w,
94+
cropped_u, crop_rect.w / 2,
95+
cropped_v, crop_rect.w / 2,
96+
crop_rect.w, crop_rect.h);
97+
}
8498

85-
// here makes a uint8 copy
86-
for (int r = 0; r < MODEL_HEIGHT; ++r) {
87-
memcpy(net_input_buf + r * MODEL_WIDTH, raw_y_start + r * stride + h_off, MODEL_WIDTH);
99+
auto [resized_buf, resized_u, resized_v] = get_yuv_buf(s->resized_buf, resized_width, resized_height);
100+
uint8_t *resized_y = resized_buf;
101+
libyuv::FilterMode mode = libyuv::FilterModeEnum::kFilterBilinear;
102+
libyuv::I420Scale(cropped_y, crop_rect.w,
103+
cropped_u, crop_rect.w / 2,
104+
cropped_v, crop_rect.w / 2,
105+
crop_rect.w, crop_rect.h,
106+
resized_y, resized_width,
107+
resized_u, resized_width / 2,
108+
resized_v, resized_width / 2,
109+
resized_width, resized_height,
110+
mode);
111+
112+
113+
int yuv_buf_len = (MODEL_WIDTH/2) * (MODEL_HEIGHT/2) * 6; // Y|u|v -> y|y|y|y|u|v
114+
float *net_input_buf = get_buffer(s->net_input_buf, yuv_buf_len);
115+
// one shot conversion, O(n) anyway
116+
// yuvframe2tensor, normalize
117+
for (int r = 0; r < MODEL_HEIGHT/2; r++) {
118+
for (int c = 0; c < MODEL_WIDTH/2; c++) {
119+
// Y_ul
120+
net_input_buf[(r*MODEL_WIDTH/2) + c + (0*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r)*resized_width + 2*c]];
121+
// Y_dl
122+
net_input_buf[(r*MODEL_WIDTH/2) + c + (1*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r+1)*resized_width + 2*c]];
123+
// Y_ur
124+
net_input_buf[(r*MODEL_WIDTH/2) + c + (2*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r)*resized_width + 2*c+1]];
125+
// Y_dr
126+
net_input_buf[(r*MODEL_WIDTH/2) + c + (3*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_y[(2*r+1)*resized_width + 2*c+1]];
127+
// U
128+
net_input_buf[(r*MODEL_WIDTH/2) + c + (4*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_u[r*resized_width/2 + c]];
129+
// V
130+
net_input_buf[(r*MODEL_WIDTH/2) + c + (5*(MODEL_WIDTH/2)*(MODEL_HEIGHT/2))] = s->tensor[resized_v[r*resized_width/2 + c]];
131+
}
88132
}
89133

90-
// printf("preprocess completed. %d \n", yuv_buf_len);
91-
// FILE *dump_yuv_file = fopen("/tmp/rawdump.yuv", "wb");
92-
// fwrite(net_input_buf, yuv_buf_len, sizeof(uint8_t), dump_yuv_file);
93-
// fclose(dump_yuv_file);
134+
//printf("preprocess completed. %d \n", yuv_buf_len);
135+
//FILE *dump_yuv_file = fopen("/tmp/rawdump.yuv", "wb");
136+
//fwrite(resized_buf, yuv_buf_len, sizeof(uint8_t), dump_yuv_file);
137+
//fclose(dump_yuv_file);
138+
139+
// *** testing ***
140+
// idat = np.frombuffer(open("/tmp/inputdump.yuv", "rb").read(), np.float32).reshape(6, 160, 320)
141+
// imshow(cv2.cvtColor(tensor_to_frames(idat[None]/0.0078125+128)[0], cv2.COLOR_YUV2RGB_I420))
142+
143+
//FILE *dump_yuv_file2 = fopen("/tmp/inputdump.yuv", "wb");
144+
//fwrite(net_input_buf, MODEL_HEIGHT*MODEL_WIDTH*3/2, sizeof(float), dump_yuv_file2);
145+
//fclose(dump_yuv_file2);
94146

95147
double t1 = millis_since_boot();
96-
s->m->addImage((float*)net_input_buf, yuv_buf_len / 4);
148+
s->m->addImage(net_input_buf, yuv_buf_len);
97149
for (int i = 0; i < CALIB_LEN; i++) {
98150
s->calib[i] = calib[i];
99151
}
100152
s->m->execute();
101153
double t2 = millis_since_boot();
102154

103-
DMonitoringModelResult model_res = {0};
104-
parse_driver_data(model_res.driver_state_lhd, s, 0);
105-
parse_driver_data(model_res.driver_state_rhd, s, 41);
106-
model_res.poor_vision_prob = sigmoid(s->output[82]);
107-
model_res.wheel_on_right_prob = sigmoid(s->output[83]);
108-
model_res.dsp_execution_time = (t2 - t1) / 1000.;
109-
110-
return model_res;
155+
DMonitoringResult ret = {0};
156+
for (int i = 0; i < 3; ++i) {
157+
ret.face_orientation[i] = s->output[i] * REG_SCALE;
158+
ret.face_orientation_meta[i] = exp(s->output[6 + i]);
159+
}
160+
for (int i = 0; i < 2; ++i) {
161+
ret.face_position[i] = s->output[3 + i] * REG_SCALE;
162+
ret.face_position_meta[i] = exp(s->output[9 + i]);
163+
}
164+
for (int i = 0; i < 4; ++i) {
165+
ret.ready_prob[i] = sigmoid(s->output[39 + i]);
166+
}
167+
for (int i = 0; i < 2; ++i) {
168+
ret.not_ready_prob[i] = sigmoid(s->output[43 + i]);
169+
}
170+
ret.face_prob = sigmoid(s->output[12]);
171+
ret.left_eye_prob = sigmoid(s->output[21]);
172+
ret.right_eye_prob = sigmoid(s->output[30]);
173+
ret.left_blink_prob = sigmoid(s->output[31]);
174+
ret.right_blink_prob = sigmoid(s->output[32]);
175+
ret.sg_prob = sigmoid(s->output[33]);
176+
ret.poor_vision = sigmoid(s->output[34]);
177+
ret.partial_face = sigmoid(s->output[35]);
178+
ret.distracted_pose = sigmoid(s->output[36]);
179+
ret.distracted_eyes = sigmoid(s->output[37]);
180+
ret.occluded_prob = sigmoid(s->output[38]);
181+
ret.dsp_execution_time = (t2 - t1) / 1000.;
182+
return ret;
111183
}
112184

113-
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringModelResult &model_res, float execution_time, kj::ArrayPtr<const float> raw_pred) {
185+
void dmonitoring_publish(PubMaster &pm, uint32_t frame_id, const DMonitoringResult &res, float execution_time, kj::ArrayPtr<const float> raw_pred) {
114186
// make msg
115187
MessageBuilder msg;
116-
auto framed = msg.initEvent().initDriverStateV2();
188+
auto framed = msg.initEvent().initDriverState();
117189
framed.setFrameId(frame_id);
118190
framed.setModelExecutionTime(execution_time);
119-
framed.setDspExecutionTime(model_res.dsp_execution_time);
120-
121-
framed.setPoorVisionProb(model_res.poor_vision_prob);
122-
framed.setWheelOnRightProb(model_res.wheel_on_right_prob);
123-
fill_driver_data(framed.initLeftDriverData(), model_res.driver_state_lhd);
124-
fill_driver_data(framed.initRightDriverData(), model_res.driver_state_rhd);
125-
191+
framed.setDspExecutionTime(res.dsp_execution_time);
192+
193+
framed.setFaceOrientation(res.face_orientation);
194+
framed.setFaceOrientationStd(res.face_orientation_meta);
195+
framed.setFacePosition(res.face_position);
196+
framed.setFacePositionStd(res.face_position_meta);
197+
framed.setFaceProb(res.face_prob);
198+
framed.setLeftEyeProb(res.left_eye_prob);
199+
framed.setRightEyeProb(res.right_eye_prob);
200+
framed.setLeftBlinkProb(res.left_blink_prob);
201+
framed.setRightBlinkProb(res.right_blink_prob);
202+
framed.setSunglassesProb(res.sg_prob);
203+
framed.setPoorVision(res.poor_vision);
204+
framed.setPartialFace(res.partial_face);
205+
framed.setDistractedPose(res.distracted_pose);
206+
framed.setDistractedEyes(res.distracted_eyes);
207+
framed.setOccludedProb(res.occluded_prob);
208+
framed.setReadyProb(res.ready_prob);
209+
framed.setNotReadyProb(res.not_ready_prob);
126210
if (send_raw_pred) {
127211
framed.setRawPredictions(raw_pred.asBytes());
128212
}
129213

130-
pm.send("driverStateV2", msg);
214+
pm.send("driverState", msg);
131215
}
132216

133217
void dmonitoring_free(DMonitoringModelState* s) {

0 commit comments

Comments
 (0)