Commit db972be2 authored by jean-airoldie's avatar jean-airoldie Committed by Robert Winslow

[rust] Ran rustfmt against library code (#5389)

parent e304f8c1
......@@ -21,13 +21,13 @@ use std::marker::PhantomData;
use std::ptr::write_bytes;
use std::slice::from_raw_parts;
use endian_scalar::{read_scalar_at, emplace_scalar};
use endian_scalar::{emplace_scalar, read_scalar_at};
use primitives::*;
use push::{Push, PushAlignment};
use table::Table;
use vtable::{VTable, field_index_to_field_offset};
use vtable_writer::VTableWriter;
use vector::{SafeSliceAccess, Vector};
use vtable::{field_index_to_field_offset, VTable};
use vtable_writer::VTableWriter;
pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
......@@ -69,8 +69,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
pub fn new_with_capacity(size: usize) -> Self {
// we need to check the size here because we create the backing buffer
// directly, bypassing the typical way of using grow_owned_buf:
assert!(size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes");
assert!(
size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
FlatBufferBuilder {
owned_buf: vec![0u8; size],
......@@ -104,7 +106,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
{
let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe { write_bytes(ptr, 0, to_clear); }
unsafe {
write_bytes(ptr, 0, to_clear);
}
}
self.head = self.owned_buf.len();
......@@ -173,7 +177,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Users probably want to use `push_slot` to add values after calling this.
#[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested("start_table can not be called when a table or vector is under construction");
self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT)
......@@ -183,7 +189,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
///
/// Asserts that the builder is in a nested state.
#[inline]
pub fn end_table(&mut self, off: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<TableFinishedWIPOffset> {
pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table");
let o = self.write_vtable(off);
......@@ -203,7 +212,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// function will want to use `push` to add values.
#[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested("start_vector can not be called when a table or vector is under construction");
self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
}
......@@ -227,14 +238,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// The wire format represents this as a zero-terminated byte vector.
#[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested("create_string can not be called when a table or vector is under construction");
self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
}
/// Create a zero-terminated byte vector.
#[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested("create_byte_string can not be called when a table or vector is under construction");
self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8);
self.push_bytes_unprefixed(data);
......@@ -249,8 +264,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct.
#[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested("create_vector_direct can not be called when a table or vector is under construction");
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
......@@ -269,12 +289,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector_of_strings<'a, 'b>(&'a mut self, xs: &'b [&'b str]) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> = smallvec::SmallVec::with_capacity(xs.len());
unsafe { offsets.set_len(xs.len()); }
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() {
......@@ -289,7 +315,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T::Output>> {
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() {
......@@ -315,10 +344,12 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
///
/// This is somewhat low-level and is mostly used by the generated code.
#[inline]
pub fn required(&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str) {
pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT,
assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize;
......@@ -366,7 +397,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
}
/// Write the VTable, if it is new.
fn write_vtable(&mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<VTableWIPOffset> {
fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table.
......@@ -433,9 +467,11 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
// serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times");
debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0,
"tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos);
}
}
......@@ -461,8 +497,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0F0F0);
emplace_scalar::<SOffsetT>(&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT);
emplace_scalar::<SOffsetT>(
&mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
}
self.field_locs.clear();
......@@ -473,7 +511,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
#[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(&self.owned_buf[..], self.head + self.used_space() - revloc as usize);
let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o {
return Some(revloc);
}
......@@ -510,18 +551,24 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
// finally, zero out the old end data.
{
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe { write_bytes(ptr, 0, middle); }
unsafe {
write_bytes(ptr, 0, middle);
}
}
}
// with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines.
fn finish_with_opts<T>(&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool) {
fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>,
file_identifier: Option<&str>,
size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested("buffer cannot be finished when a table or vector is under construction");
self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear();
let to_align = {
......@@ -589,8 +636,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
if self.unused_ready_space() >= want {
return want;
}
assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot grow buffer beyond 2 gigabytes");
assert!(
want <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot grow buffer beyond 2 gigabytes"
);
while self.unused_ready_space() < want {
self.grow_owned_buf();
......@@ -605,7 +654,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn assert_nested(&self, fn_name: &'static str) {
// we don't assert that self.field_locs.len() >0 because the vtable
// could be empty (e.g. for empty tables, or for all-default values).
debug_assert!(self.nested, format!("incorrect FlatBufferBuilder usage: {} must be called while in a nested state", fn_name));
debug_assert!(
self.nested,
format!(
"incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
fn_name
)
);
}
#[inline]
fn assert_not_nested(&self, msg: &'static str) {
......@@ -619,7 +674,6 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn assert_not_finished(&self, msg: &'static str) {
debug_assert!(!self.finished, msg);
}
}
/// Compute the length of the vtable needed to represent the provided FieldLocs.
......@@ -629,8 +683,8 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
let max_voffset = field_locs.iter().map(|fl| fl.id).max();
match max_voffset {
None => { field_index_to_field_offset(0) as usize }
Some(mv) => { mv as usize + SIZE_VOFFSET }
None => field_index_to_field_offset(0) as usize,
Some(mv) => mv as usize + SIZE_VOFFSET,
}
}
......
......@@ -34,7 +34,7 @@ pub trait EndianScalar: Sized + PartialEq + Copy + Clone {
/// Macro for implementing a no-op endian conversion. This is used for types
/// that are one byte wide.
macro_rules! impl_endian_scalar_noop {
($ty:ident) => (
($ty:ident) => {
impl EndianScalar for $ty {
#[inline]
fn to_little_endian(self) -> Self {
......@@ -45,7 +45,7 @@ macro_rules! impl_endian_scalar_noop {
self
}
}
)
};
}
/// Macro for implementing an endian conversion using the stdlib `to_le` and
......@@ -53,7 +53,7 @@ macro_rules! impl_endian_scalar_noop {
/// floats, because the `to_le` and `from_le` are not implemented for them in
/// the stdlib.
macro_rules! impl_endian_scalar_stdlib_le_conversion {
($ty:ident) => (
($ty:ident) => {
impl EndianScalar for $ty {
#[inline]
fn to_little_endian(self) -> Self {
......@@ -64,7 +64,7 @@ macro_rules! impl_endian_scalar_stdlib_le_conversion {
Self::from_le(self)
}
}
)
};
}
impl_endian_scalar_noop!(bool);
......@@ -177,4 +177,3 @@ pub fn read_scalar<T: EndianScalar>(s: &[u8]) -> T {
x.from_little_endian()
}
......@@ -39,12 +39,14 @@ mod vtable;
mod vtable_writer;
pub use builder::FlatBufferBuilder;
pub use endian_scalar::{EndianScalar, emplace_scalar, read_scalar, read_scalar_at, byte_swap_f32, byte_swap_f64};
pub use endian_scalar::{
byte_swap_f32, byte_swap_f64, emplace_scalar, read_scalar, read_scalar_at, EndianScalar,
};
pub use follow::{Follow, FollowStart};
pub use primitives::*;
pub use push::Push;
pub use table::{Table, buffer_has_identifier, get_root, get_size_prefixed_root};
pub use vector::{SafeSliceAccess, Vector, follow_cast_ref};
pub use table::{buffer_has_identifier, get_root, get_size_prefixed_root, Table};
pub use vector::{follow_cast_ref, SafeSliceAccess, Vector};
pub use vtable::field_index_to_field_offset;
// TODO(rw): Unify `create_vector` and `create_vector_direct` by using
......
......@@ -274,7 +274,7 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipFileIdentifier<T> {
/// EndianScalar, but implementing Follow that way causes a conflict with
/// other impls.
macro_rules! impl_follow_for_endian_scalar {
($ty:ident) => (
($ty:ident) => {
impl<'a> Follow<'a> for $ty {
type Inner = $ty;
#[inline(always)]
......@@ -282,7 +282,7 @@ macro_rules! impl_follow_for_endian_scalar {
read_scalar_at::<$ty>(buf, loc)
}
}
)
};
}
impl_follow_for_endian_scalar!(bool);
......
......@@ -55,7 +55,7 @@ impl PushAlignment {
/// Macro to implement Push for EndianScalar types.
macro_rules! impl_push_for_endian_scalar {
($ty:ident) => (
($ty:ident) => {
impl Push for $ty {
type Output = $ty;
......@@ -63,9 +63,8 @@ macro_rules! impl_push_for_endian_scalar {
fn push(&self, dst: &mut [u8], _rest: &[u8]) {
emplace_scalar::<$ty>(dst, *self);
}
}
)
};
}
impl_push_for_endian_scalar!(bool);
......
......@@ -134,4 +134,3 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for Vector<'a, T> {
Vector::new(buf, loc)
}
}
......@@ -34,10 +34,7 @@ impl<'a> PartialEq for VTable<'a> {
impl<'a> VTable<'a> {
pub fn init(buf: &'a [u8], loc: usize) -> Self {
VTable {
buf: buf,
loc: loc,
}
VTable { buf: buf, loc: loc }
}
pub fn num_fields(&self) -> usize {
(self.num_bytes() / SIZE_VOFFSET) - 2
......@@ -72,7 +69,6 @@ impl<'a> VTable<'a> {
}
}
#[allow(dead_code)]
pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT {
// Should correspond to what end_table() below builds up.
......
......@@ -82,4 +82,3 @@ impl<'a> VTableWriter<'a> {
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment