planus/
builder.rs

1use core::{marker::PhantomData, mem::MaybeUninit};
2
3use crate::{backvec::BackVec, Offset, Primitive, WriteAsOffset};
4
5#[derive(Debug)]
6/// Builder for serializing flatbuffers.
7///
8///
9/// # Examples
10/// ```
11/// use planus::Builder;
12/// use planus_example::monster_generated::my_game::sample::Weapon;
13/// let mut builder = Builder::new();
14/// let weapon = Weapon::create(&mut builder, "Axe", 24);
15/// builder.finish(weapon, None);
16/// ```
17pub struct Builder {
18    inner: BackVec,
19    // This is a bit complicated. The buffer has support for guaranteeing a
20    // specific write gets a specific alignment. It has many writes and thus
21    // many promises, so how does keep track of this this across those promises, even
22    // when writing from the back?
23    //
24    // The algorithm works by aggregating all of the promises into one big promise.
25    // Specifically, we promise that the remaining part of the buffer will always
26    // be of size `self.delayed_bytes + self.alignment() * K` where we are free to
27    // choose K as we want.
28    //
29    // Initially we set `delayed_bytes` to 0 and `alignment` to 1, i.e. we have
30    // only promised to write `0 + 1 * K` bytes, for any `K` we choose, which will
31    // be true no matter how many bytes we write.
32    //
33    // Whenever we get a new request `(req_size, req_alignment)`, then that
34    // `req_size` will be counted towards the previous promises, i.e. we need
35    // to decrease `self.delayed_bytes()` by `req_bytes` and calculate the new value
36    // of `req_size` modulo `alignment`. However we also need to fulfil this new
37    // promise.
38    //
39    // To do this, we do two things. 1) We insert sufficient padding, before the
40    // current request, to make sure that the current request is compatible with
41    // the previous ones. 2) We set `alignment = alignment.max(req_alignment)`.
42    //
43    // One small wrinkle is that we do not store `alignment` directly for performance
44    // reasons. Instead we store `alignment_mask = alignment - 1`, so we can do use
45    // binary and (`&`) instead of modulo (`%`).
46    delayed_bytes: usize,
47    alignment_mask: usize,
48
49    #[cfg(debug_assertions)]
50    // Bytes missing to be written by a call to prepare_write
51    missing_bytes: usize,
52}
53
54impl Default for Builder {
55    fn default() -> Self {
56        Self::with_capacity(0)
57    }
58}
59
60impl Builder {
61    /// Creates a new Builder.
62    pub fn new() -> Self {
63        Self::with_capacity(0)
64    }
65
66    /// Gets the length of the internal buffer in bytes.
67    pub fn len(&self) -> usize {
68        self.inner.len()
69    }
70
71    /// Returns true if the internal buffer is empty.
72    pub fn is_empty(&self) -> bool {
73        self.len() == 0
74    }
75
76    /// Creates a new builder with a specific internal capacity already allocated.
77    pub fn with_capacity(capacity: usize) -> Self {
78        Self {
79            inner: BackVec::with_capacity(capacity),
80
81            delayed_bytes: 0,
82            alignment_mask: 0,
83
84            #[cfg(debug_assertions)]
85            missing_bytes: 0,
86        }
87    }
88
89    /// Resets the builders internal state and clears the internal buffer.
90    pub fn clear(&mut self) {
91        self.inner.clear();
92        self.delayed_bytes = 0;
93        self.alignment_mask = 0;
94        #[cfg(debug_assertions)]
95        {
96            self.missing_bytes = 0;
97        }
98    }
99
100    #[doc(hidden)]
101    pub fn prepare_write(&mut self, size: usize, alignment_mask: usize) {
102        debug_assert!((alignment_mask + 1) & alignment_mask == 0); // Check that the alignment is a power of two
103        #[cfg(debug_assertions)]
104        debug_assert_eq!(self.missing_bytes, 0);
105
106        let delayed_bytes = self.delayed_bytes.wrapping_sub(size) & self.alignment_mask;
107        let needed_padding = delayed_bytes & alignment_mask;
108        self.delayed_bytes = delayed_bytes.wrapping_sub(needed_padding);
109        self.alignment_mask |= alignment_mask;
110        self.inner.reserve(size.wrapping_add(needed_padding));
111        // TODO: investigate if it makes sense to use an extend_with_zeros_unchecked for performance, given
112        // that we know we have enough space
113        self.inner.extend_with_zeros(needed_padding);
114
115        debug_assert_eq!(self.delayed_bytes & alignment_mask, 0);
116
117        #[cfg(debug_assertions)]
118        {
119            self.missing_bytes = size;
120        }
121    }
122
123    #[doc(hidden)]
124    pub fn current_offset<T: ?Sized>(&self) -> Offset<T> {
125        Offset {
126            offset: self.inner.len() as u32,
127            phantom: PhantomData,
128        }
129    }
130
131    #[doc(hidden)]
132    pub fn write(&mut self, buffer: &[u8]) {
133        #[cfg(debug_assertions)]
134        {
135            self.missing_bytes = self.missing_bytes.checked_sub(buffer.len()).unwrap();
136        }
137        // TODO: investigate if it makes sense to use an extend_from_slice_unchecked for performance, given
138        // that we know we have enough space
139        self.inner.extend_from_slice(buffer);
140    }
141
142    #[doc(hidden)]
143    pub unsafe fn write_with(
144        &mut self,
145        size: usize,
146        alignment_mask: usize,
147        f: impl FnOnce(u32, &mut [MaybeUninit<u8>]),
148    ) {
149        self.prepare_write(size, alignment_mask);
150        let offset = (self.inner.len() + size) as u32;
151        self.inner.extend_write(size, |bytes| f(offset, bytes));
152        #[cfg(debug_assertions)]
153        {
154            self.missing_bytes = self.missing_bytes.checked_sub(size).unwrap();
155        }
156    }
157
158    #[doc(hidden)]
159    pub fn get_buffer_position_and_prepare_write(
160        &mut self,
161        vtable_size: usize,
162        object_size: usize,
163        object_alignment_mask: usize,
164    ) -> usize {
165        debug_assert!((object_alignment_mask + 1) & object_alignment_mask == 0); // Check that the alignment is a power of two
166
167        const VTABLE_ALIGNMENT: usize = 2;
168        const VTABLE_ALIGNMENT_MASK: usize = VTABLE_ALIGNMENT - 1;
169        self.prepare_write(vtable_size + 4, VTABLE_ALIGNMENT_MASK);
170
171        let delayed_bytes = self.delayed_bytes.wrapping_sub(object_size) & self.alignment_mask;
172        let needed_padding = delayed_bytes & object_alignment_mask;
173
174        self.inner.len() + vtable_size + 4 + needed_padding + object_size + 4
175    }
176
177    /// Finish writing the internal buffer and return a byte slice of it.
178    ///
179    /// This will make sure all alignment requirements are fullfilled and that
180    /// the file identifier has been written if specified.
181    ///
182    /// # Examples
183    /// ```
184    /// use planus::Builder;
185    /// use planus_example::monster_generated::my_game::sample::Weapon;
186    /// let mut builder = Builder::new();
187    /// let weapon = Weapon::create(&mut builder, "Axe", 24);
188    /// builder.finish(weapon, None);
189    /// ```
190    ///
191    /// It can also be used to directly serialize an owned flatbuffers struct
192    /// ```
193    /// use planus::Builder;
194    /// use planus_example::monster_generated::my_game::sample::Weapon;
195    /// let mut builder = Builder::new();
196    /// let weapon = Weapon { name: Some("Sword".to_string()), damage: 12 };
197    /// let data = builder.finish(&weapon, None);
198    /// ```
199    pub fn finish<T>(
200        &mut self,
201        root: impl WriteAsOffset<T>,
202        file_identifier: Option<[u8; 4]>,
203    ) -> &[u8] {
204        let root = root.prepare(self);
205
206        if let Some(file_identifier) = file_identifier {
207            // TODO: how does alignment interact with file identifiers? Is the alignment with out without the header?
208            self.prepare_write(
209                8,
210                <Offset<T> as Primitive>::ALIGNMENT_MASK.max(self.alignment_mask),
211            );
212            self.write(&(4 + self.inner.len() as u32 - root.offset).to_le_bytes());
213            self.write(&file_identifier);
214        } else {
215            self.prepare_write(
216                4,
217                <Offset<T> as Primitive>::ALIGNMENT_MASK.max(self.alignment_mask),
218            );
219            self.write(&(4 + self.inner.len() as u32 - root.offset).to_le_bytes());
220        }
221        debug_assert_eq!(self.delayed_bytes, 0);
222        self.inner.as_slice()
223    }
224}
225
226#[cfg(test)]
227mod tests {
228    use alloc::vec::Vec;
229    use rand::{thread_rng, Rng};
230
231    use super::*;
232
233    #[test]
234    fn test_buffer_random() {
235        let mut slice = [0; 128];
236        let mut rng = thread_rng();
237        let mut back_offsets: Vec<(usize, usize, usize)> = Vec::new();
238
239        for _ in 0..50 {
240            let mut builder = Builder::new();
241            back_offsets.clear();
242
243            for byte in 1..50 {
244                let size: usize = rng.gen::<usize>() % slice.len();
245                let slice = &mut slice[..size];
246                for p in &mut *slice {
247                    *p = byte;
248                }
249                let alignment: usize = 1 << (rng.gen::<u32>() % 5);
250                let alignment_mask = alignment - 1;
251                builder.prepare_write(size, alignment_mask);
252                let len_before = builder.inner.len();
253                builder.write(slice);
254                assert!(builder.inner.len() < len_before + slice.len() + alignment);
255                back_offsets.push((builder.inner.len(), size, alignment));
256            }
257            let random_padding: usize = rng.gen::<usize>() % slice.len();
258            let slice = &mut slice[..random_padding];
259            for p in &mut *slice {
260                *p = rng.gen();
261            }
262            builder.prepare_write(random_padding, 1);
263            builder.write(slice);
264            let buffer = builder.finish(builder.current_offset::<()>(), None);
265
266            for (i, (back_offset, size, alignment)) in back_offsets.iter().enumerate() {
267                let byte = (i + 1) as u8;
268                let offset = buffer.len() - back_offset;
269                assert_eq!(offset % alignment, 0);
270                assert!(buffer[offset..offset + size].iter().all(|&b| b == byte));
271            }
272        }
273    }
274
275    #[test]
276    fn test_buffer_align() {
277        let mut builder = Builder::new();
278        builder.prepare_write(3, 0);
279        builder.write(b"MNO");
280        assert_eq!(builder.delayed_bytes, 0);
281        builder.prepare_write(4, 1);
282        builder.write(b"IJKL");
283        assert_eq!(builder.delayed_bytes, 0);
284        builder.prepare_write(8, 3);
285        builder.write(b"ABCDEFGH");
286        assert_eq!(builder.delayed_bytes, 0);
287        builder.prepare_write(7, 0);
288        builder.write(b"0123456");
289        assert_eq!(
290            builder.finish(builder.current_offset::<()>(), None),
291            b"\x05\x00\x00\x00\x000123456ABCDEFGHIJKLMNO"
292        );
293
294        builder.clear();
295        builder.prepare_write(4, 3);
296        builder.write(b"IJKL");
297        assert_eq!(builder.delayed_bytes, 0);
298        builder.prepare_write(1, 0);
299        builder.write(b"X");
300        assert_eq!(builder.delayed_bytes, 3);
301        builder.prepare_write(1, 0);
302        builder.write(b"Y");
303        assert_eq!(builder.delayed_bytes, 2);
304        builder.prepare_write(8, 7);
305        builder.write(b"ABCDEFGH");
306        assert_eq!(builder.delayed_bytes, 0);
307        assert_eq!(
308            builder.finish(builder.current_offset::<()>(), None),
309            b"\x08\x00\x00\x00\x00\x00\x00\x00ABCDEFGH\x00\x00YXIJKL"
310        );
311    }
312}