UFO 1.0.0
An Efficient Probabilistic 3D Mapping Framework That Embraces the Unknown
Loading...
Searching...
No Matches
map.hpp
1
42#ifndef UFO_VIZ_RENDERABLE_MAP_HPP
43#define UFO_VIZ_RENDERABLE_MAP_HPP
44
45// UFO
46#include <ufo/compute/compute.hpp>
47// #include <ufo/map/map/map.hpp>
48#include <ufo/utility/type_traits.hpp>
49#include <ufo/vision/camera.hpp>
50#include <ufo/viz/renderable.hpp>
51
52// STL
53#include <cstdlib>
54#include <iostream>
55
56namespace ufo
57{
58template <class Map>
60{
61 private:
62 struct Sample {
63 std::uint32_t step_size;
64 std::uint32_t offset;
65
66 bool operator==(Sample const& rhs) const
67 {
68 return step_size == rhs.step_size && offset == rhs.offset;
69 }
70
71 bool operator!=(Sample const& rhs) const { return !(*this == rhs); }
72 };
73
74 struct Uniform {
75 Mat4x4f projection;
76 Mat4x4f view;
77 Vec2u dim;
78 float near_clip;
79 float far_clip;
80 Sample sample;
81 float _pad0[2];
82 TreeIndex node;
83 float _pad1[2];
84 Vec3f node_center;
85 float node_half_length;
86
87 bool operator==(Uniform const& rhs) const
88 {
89 // clang-format off
90 return
91 rhs.projection == projection &&
92 rhs.view == view &&
93 rhs.dim == dim &&
94 rhs.near_clip == near_clip &&
95 rhs.far_clip == far_clip &&
96 rhs.sample == sample &&
97 rhs.node == node &&
98 rhs.node_center == node_center &&
99 rhs.node_half_length == node_half_length;
100 // clang-format on
101 }
102
103 bool operator!=(Uniform const& rhs) const { return !(*this == rhs); }
104 };
105 // Have the compiler check byte alignment
106 static_assert(sizeof(Uniform) % 16 == 0);
107
108 struct Hit {
109 TreeIndex node;
110 float distance;
111 float _pad0;
112 };
113 // Have the compiler check byte alignment
114 static_assert(sizeof(Hit) % 16 == 0);
115
116 public:
117 RenderableMap(Map const& map) // : map_(map)
118 {
119 // TODO: Implement
120 }
121
122 RenderableMap(RenderableMap const& other) // TODO: Implement
123 {
124 // TODO: Implement
125 }
126
127 ~RenderableMap() override = default;
128
129 void init(WGPUDevice device, WGPUTextureFormat texture_format) override
130 {
131 map_.gpuInit(device);
132
133 compute_bind_group_layout_ = createBindGroupLayout(map_.gpuDevice(), texture_format);
134 compute_pipeline_layout_ =
135 createPipelineLayout(map_.gpuDevice(), compute_bind_group_layout_);
136 compute_pipeline_ = createComputePipeline(map_.gpuDevice(), compute_pipeline_layout_);
137
138 uniform_buffer_ =
139 compute::createBuffer(map_.gpuDevice(), sizeof(uniform_),
140 WGPUBufferUsage_CopyDst | WGPUBufferUsage_Uniform, false);
141 }
142
143 void release() override
144 {
145 if (nullptr != compute_bind_group_) {
146 wgpuBindGroupRelease(compute_bind_group_);
147 compute_bind_group_ = nullptr;
148 }
149 if (nullptr != compute_pipeline_layout_) {
150 wgpuPipelineLayoutRelease(compute_pipeline_layout_);
151 compute_pipeline_layout_ = nullptr;
152 }
153 if (nullptr != compute_bind_group_layout_) {
154 wgpuBindGroupLayoutRelease(compute_bind_group_layout_);
155 compute_bind_group_layout_ = nullptr;
156 }
157
158 // TODO: Buffers
159
160 if (nullptr != compute_pipeline_) {
161 wgpuComputePipelineRelease(compute_pipeline_);
162 compute_pipeline_ = nullptr;
163 }
164 }
165
166 void update(WGPUDevice device, WGPUCommandEncoder encoder,
167 WGPUTextureView render_texture, WGPUTextureView depth_texture,
168 Camera const& camera) override
169 {
170 Uniform uniform = createUniform(map_, camera);
171
172 // TODO: This should return number of bytes that should be written, so we can know if
173 // the map has changed and so we can estimate how much time it will take to transfer
174 // the data
175 map_.gpuUpdateBuffers();
176
177 if (uniform_ != uniform) {
178 uniform_ = uniform;
179 hits_node_.resize(uniform.dim.x * uniform.dim.y);
180 hits_depth_.resize(uniform.dim.x * uniform.dim.y);
181
182 if (nullptr != hits_staging_buffer_) {
183 wgpuBufferRelease(hits_staging_buffer_);
184 }
185
186 if (nullptr != hits_storage_buffer_) {
187 wgpuBufferRelease(hits_storage_buffer_);
188 }
189
190 if (nullptr != compute_bind_group_) {
191 wgpuBindGroupRelease(compute_bind_group_);
192 }
193
194 // hits_staging_buffer_ = compute::createBuffer(
195 // map_.gpuDevice(), hits_.size() * sizeof(typename
196 // decltype(hits_)::value_type), WGPUBufferUsage_MapRead |
197 // WGPUBufferUsage_CopyDst, false);
198
199 // hits_storage_buffer_ = compute::createBuffer(
200 // map_.gpuDevice(), hits_.size() * sizeof(typename
201 // decltype(hits_)::value_type), WGPUBufferUsage_Storage |
202 // WGPUBufferUsage_CopySrc, false);
203
204 compute_bind_group_ = createBindGroup(device);
205 }
206
207 WGPUComputePassEncoder compute_pass_encoder =
208 wgpuCommandEncoderBeginComputePass(encoder, nullptr);
209
210 wgpuComputePassEncoderSetPipeline(compute_pass_encoder, compute_pipeline_);
211 wgpuComputePassEncoderSetBindGroup(compute_pass_encoder, 0, compute_bind_group_, 0,
212 nullptr);
213
214 std::uint32_t invocation_count_x = uniform.dim.x / uniform_.sample.step_size;
215 std::uint32_t invocation_count_y = uniform.dim.y / uniform_.sample.step_size;
216
217 // TODO: Do not hardcode here
218 std::uint32_t workgroup_size_x = 8;
219 std::uint32_t workgroup_size_y = 4;
220 std::uint32_t workgroup_count_x =
221 (invocation_count_x + workgroup_size_x - 1) / workgroup_size_x;
222 std::uint32_t workgroup_count_y =
223 (invocation_count_y + workgroup_size_y - 1) / workgroup_size_y;
224
225 wgpuComputePassEncoderDispatchWorkgroups(compute_pass_encoder, workgroup_count_x,
226 workgroup_count_y, 1);
227
228 wgpuComputePassEncoderEnd(compute_pass_encoder);
229 wgpuComputePassEncoderRelease(compute_pass_encoder);
230
231 // wgpuCommandEncoderCopyBufferToBuffer(
232 // encoder, hits_storage_buffer_, 0, hits_staging_buffer_, 0,
233 // hits_.size() * sizeof(typename decltype(hits_)::value_type));
234
235 WGPUCommandBuffer command_buffer = wgpuCommandEncoderFinish(encoder, nullptr);
236 assert(command_buffer);
237
238 wgpuQueueSubmit(map_.gpuQueue(), 1, &command_buffer);
239
240 // wgpuBufferMapAsync(hits_staging_buffer_, WGPUMapMode_Read, 0,
241 // hits_.size() * sizeof(typename decltype(hits_)::value_type),
242 // handle_buffer_map, nullptr);
243 // wgpuDevicePoll(device, true, NULL);
244
245 // Hit* buf = static_cast<Hit*>(wgpuBufferGetMappedRange(
246 // hits_staging_buffer_, 0,
247 // hits_.size() * sizeof(typename decltype(hits_)::value_type)));
248 // assert(buf);
249
250 // std::memcpy(hits_.data(), buf,
251 // hits_.size() * sizeof(typename decltype(hits_)::value_type));
252
253 wgpuBufferUnmap(hits_staging_buffer_);
254 wgpuCommandBufferRelease(command_buffer);
255 // wgpuCommandEncoderRelease(command_encoder);
256 }
257
258 void onGui() override
259 {
260 // TODO: Implement
261 }
262
263 [[nodiscard]] RenderableMap* clone() const override { return new RenderableMap(*this); }
264
265 private:
266 [[nodiscard]] WGPUBindGroupLayout createBindGroupLayout(
267 WGPUDevice device, WGPUTextureFormat /* texture_format */)
268 {
269 std::array<WGPUBindGroupLayoutEntry, 4> binding_layout{};
270
271 // Output texture
272 compute::setDefault(binding_layout[0]);
273 binding_layout[0].binding = 0;
274 binding_layout[0].visibility = WGPUShaderStage_Compute;
275 binding_layout[0].buffer.type = WGPUBufferBindingType_Storage;
276
277 // Uniform
278 compute::setDefault(binding_layout[1]);
279 binding_layout[1].binding = 1;
280 binding_layout[1].visibility = WGPUShaderStage_Compute;
281 binding_layout[1].buffer.type = WGPUBufferBindingType_Uniform;
282 binding_layout[1].buffer.minBindingSize = sizeof(uniform_);
283
284 // Tree buffer
285 compute::setDefault(binding_layout[2]);
286 binding_layout[2].binding = 2;
287 binding_layout[2].visibility = WGPUShaderStage_Compute;
288 binding_layout[2].buffer.type = WGPUBufferBindingType_ReadOnlyStorage;
289
290 // Occupancy buffer
291 compute::setDefault(binding_layout[3]);
292 binding_layout[3].binding = 3;
293 binding_layout[3].visibility = WGPUShaderStage_Compute;
294 binding_layout[3].buffer.type = WGPUBufferBindingType_ReadOnlyStorage;
295
296 // Create a bind group layout
297 WGPUBindGroupLayoutDescriptor bind_group_layout_desc{};
298 bind_group_layout_desc.label = "";
299 bind_group_layout_desc.nextInChain = nullptr;
300 bind_group_layout_desc.entryCount = binding_layout.size();
301 bind_group_layout_desc.entries = binding_layout.data();
302
303 return wgpuDeviceCreateBindGroupLayout(device, &bind_group_layout_desc);
304 }
305
306 [[nodiscard]] WGPUPipelineLayout createPipelineLayout(
307 WGPUDevice device, WGPUBindGroupLayout bind_group_layout)
308 {
309 WGPUPipelineLayoutDescriptor desc{};
310 desc.nextInChain = nullptr;
311 desc.bindGroupLayoutCount = 1;
312 desc.bindGroupLayouts = &bind_group_layout;
313 return wgpuDeviceCreatePipelineLayout(device, &desc);
314 }
315
316 [[nodiscard]] WGPUComputePipeline createComputePipeline(
317 WGPUDevice device, WGPUPipelineLayout pipeline_layout)
318 {
319 WGPUShaderModule shader_module;
320 if constexpr (2 == Map::dimensions()) {
321 shader_module =
322 compute::loadShaderModule(device, UFOVIZ_SHADER_DIR "/map_ray_trace_2d.wgsl");
323 } else if constexpr (3 == Map::dimensions()) {
324 shader_module =
325 compute::loadShaderModule(device, UFOVIZ_SHADER_DIR "/map_ray_trace_3d.wgsl");
326 } else if constexpr (4 == Map::dimensions()) {
327 shader_module =
328 compute::loadShaderModule(device, UFOVIZ_SHADER_DIR "/map_ray_trace_4d.wgsl");
329 } else {
330 static_assert(dependent_false_v<Map>, "Non-supported number of dimensions");
331 }
332
333 if (nullptr == shader_module) {
334 std::cerr << "Could not load shader!" << std::endl;
335 abort();
336 }
337
338 WGPUComputePipelineDescriptor desc{};
339 desc.nextInChain = nullptr;
340 desc.compute.constantCount = 0; // TODO: Change
341 desc.compute.constants = nullptr;
342 desc.compute.entryPoint = "main";
343 desc.compute.module = shader_module;
344 desc.layout = pipeline_layout;
345
346 WGPUComputePipeline pipeline = wgpuDeviceCreateComputePipeline(device, &desc);
347
348 wgpuShaderModuleRelease(shader_module);
349
350 return pipeline;
351 }
352
353 [[nodiscard]] WGPUBindGroup createBindGroup(WGPUDevice device)
354 {
355 // Create a binding
356 std::array<WGPUBindGroupEntry, 4> binding{};
357
358 // Hits buffer
359 binding[0].nextInChain = nullptr;
360 binding[0].binding = 0;
361 binding[0].buffer = hits_storage_buffer_;
362 binding[0].offset = 0;
363 // binding[0].size = hits_.size() * sizeof(typename
364 // decltype(hits_)::value_type);
365
366 // Uniform
367 binding[1].nextInChain = nullptr;
368 binding[1].binding = 1;
369 binding[1].buffer = uniform_buffer_;
370 binding[1].offset = 0;
371 binding[1].size = sizeof(uniform_);
372
373 // Tree
374 binding[2].nextInChain = nullptr;
375 binding[2].binding = 2;
376 binding[2].buffer = map_.gpuTreeBuffer();
377 binding[2].offset = 0;
378 binding[2].size = map_.gpuTreeBufferSize();
379
380 // Occupancy
381 binding[3].nextInChain = nullptr;
382 binding[3].binding = 3;
383 binding[3].buffer = map_.gpuOccupancyBuffer();
384 binding[3].offset = 0;
385 binding[3].size = map_.gpuOccupancyBufferSize();
386
387 // A bind group contains one or multiple bindings
388 WGPUBindGroupDescriptor desc{};
389 desc.nextInChain = nullptr;
390 desc.layout = compute_bind_group_layout_;
391 desc.entryCount = binding.size();
392 desc.entries = binding.data();
393 return wgpuDeviceCreateBindGroup(device, &desc);
394 }
395
396 [[nodiscard]] static Uniform createUniform(Map const& map, Camera const& camera)
397 {
398 Uniform uniform;
399 uniform.projection = inverse(camera.projection());
400 uniform.view = inverse(camera.view());
401 uniform.dim = Vec2u(camera.cols, camera.rows);
402 uniform.near_clip = camera.near_clip;
403 uniform.far_clip = camera.far_clip;
404 uniform.node = map.index();
405 uniform.node_center = map.center(uniform.node);
406 // FIXME: uniform.node_half_length = map.halfLength(uniform.node);
407 return uniform;
408 }
409
410 private:
411 Map map_;
412
413 WGPUBindGroupLayout compute_bind_group_layout_ = nullptr;
414 WGPUPipelineLayout compute_pipeline_layout_ = nullptr;
415 WGPUComputePipeline compute_pipeline_ = nullptr;
416 WGPUBindGroup compute_bind_group_ = nullptr;
417
418 WGPUBuffer hits_staging_buffer_ = nullptr;
419 WGPUBuffer hits_storage_buffer_ = nullptr;
420 WGPUBuffer uniform_buffer_ = nullptr;
421
422 WGPUTexture texture_ = nullptr;
423 WGPUTextureView texture_view_ = nullptr;
424
425 Uniform uniform_;
426
427 std::vector<TreeIndex> hits_node_;
428 std::vector<float> hits_depth_;
429};
430} // namespace ufo
431
432#endif // UFO_VIZ_RENDERABLE_MAP_HPP
Coord center() const
Returns the center of the tree (/ root node).
Definition tree.hpp:637
constexpr offset_type offset() const noexcept
Returns the offset of the root node.
Definition tree.hpp:770
constexpr Index index() const noexcept
Returns the index of the root node.
Definition tree.hpp:803
static constexpr std::size_t dimensions() noexcept
Returns the number of dimensions of the tree (i.e., 1 = binary tree, 2 = quadtree,...
Definition tree.hpp:211
All vision-related classes and functions.
Definition cloud.hpp:49
constexpr M inverse(M const &m) noexcept
Computes the inverse of a square floating-point matrix.
Definition mat.hpp:1097
constexpr auto distance(A const &a, B const &b)
Computes the minimum distance between two shapes.
Definition distance.hpp:61