Commit db972be2 authored by jean-airoldie's avatar jean-airoldie Committed by Robert Winslow

[rust] Ran rustfmt against library code (#5389)

parent e304f8c1
...@@ -21,13 +21,13 @@ use std::marker::PhantomData; ...@@ -21,13 +21,13 @@ use std::marker::PhantomData;
use std::ptr::write_bytes; use std::ptr::write_bytes;
use std::slice::from_raw_parts; use std::slice::from_raw_parts;
use endian_scalar::{read_scalar_at, emplace_scalar}; use endian_scalar::{emplace_scalar, read_scalar_at};
use primitives::*; use primitives::*;
use push::{Push, PushAlignment}; use push::{Push, PushAlignment};
use table::Table; use table::Table;
use vtable::{VTable, field_index_to_field_offset};
use vtable_writer::VTableWriter;
use vector::{SafeSliceAccess, Vector}; use vector::{SafeSliceAccess, Vector};
use vtable::{field_index_to_field_offset, VTable};
use vtable_writer::VTableWriter;
pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16; pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
...@@ -69,8 +69,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -69,8 +69,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
pub fn new_with_capacity(size: usize) -> Self { pub fn new_with_capacity(size: usize) -> Self {
// we need to check the size here because we create the backing buffer // we need to check the size here because we create the backing buffer
// directly, bypassing the typical way of using grow_owned_buf: // directly, bypassing the typical way of using grow_owned_buf:
assert!(size <= FLATBUFFERS_MAX_BUFFER_SIZE, assert!(
"cannot initialize buffer bigger than 2 gigabytes"); size <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot initialize buffer bigger than 2 gigabytes"
);
FlatBufferBuilder { FlatBufferBuilder {
owned_buf: vec![0u8; size], owned_buf: vec![0u8; size],
...@@ -104,7 +106,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -104,7 +106,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
{ {
let to_clear = self.owned_buf.len() - self.head; let to_clear = self.owned_buf.len() - self.head;
let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr(); let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
unsafe { write_bytes(ptr, 0, to_clear); } unsafe {
write_bytes(ptr, 0, to_clear);
}
} }
self.head = self.owned_buf.len(); self.head = self.owned_buf.len();
...@@ -173,7 +177,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -173,7 +177,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Users probably want to use `push_slot` to add values after calling this. /// Users probably want to use `push_slot` to add values after calling this.
#[inline] #[inline]
pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> { pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
self.assert_not_nested("start_table can not be called when a table or vector is under construction"); self.assert_not_nested(
"start_table can not be called when a table or vector is under construction",
);
self.nested = true; self.nested = true;
WIPOffset::new(self.used_space() as UOffsetT) WIPOffset::new(self.used_space() as UOffsetT)
...@@ -183,7 +189,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -183,7 +189,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// ///
/// Asserts that the builder is in a nested state. /// Asserts that the builder is in a nested state.
#[inline] #[inline]
pub fn end_table(&mut self, off: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<TableFinishedWIPOffset> { pub fn end_table(
&mut self,
off: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<TableFinishedWIPOffset> {
self.assert_nested("end_table"); self.assert_nested("end_table");
let o = self.write_vtable(off); let o = self.write_vtable(off);
...@@ -203,7 +212,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -203,7 +212,9 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// function will want to use `push` to add values. /// function will want to use `push` to add values.
#[inline] #[inline]
pub fn start_vector<T: Push>(&mut self, num_items: usize) { pub fn start_vector<T: Push>(&mut self, num_items: usize) {
self.assert_not_nested("start_vector can not be called when a table or vector is under construction"); self.assert_not_nested(
"start_vector can not be called when a table or vector is under construction",
);
self.nested = true; self.nested = true;
self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET)); self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
} }
...@@ -227,14 +238,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -227,14 +238,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// The wire format represents this as a zero-terminated byte vector. /// The wire format represents this as a zero-terminated byte vector.
#[inline] #[inline]
pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> { pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
self.assert_not_nested("create_string can not be called when a table or vector is under construction"); self.assert_not_nested(
"create_string can not be called when a table or vector is under construction",
);
WIPOffset::new(self.create_byte_string(s.as_bytes()).value()) WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
} }
/// Create a zero-terminated byte vector. /// Create a zero-terminated byte vector.
#[inline] #[inline]
pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> { pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
self.assert_not_nested("create_byte_string can not be called when a table or vector is under construction"); self.assert_not_nested(
"create_byte_string can not be called when a table or vector is under construction",
);
self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET)); self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
self.push(0u8); self.push(0u8);
self.push_bytes_unprefixed(data); self.push_bytes_unprefixed(data);
...@@ -249,8 +264,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -249,8 +264,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// always safe, on any platform: bool, u8, i8, and any /// always safe, on any platform: bool, u8, i8, and any
/// FlatBuffers-generated struct. /// FlatBuffers-generated struct.
#[inline] #[inline]
pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T>> { pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
self.assert_not_nested("create_vector_direct can not be called when a table or vector is under construction"); &'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T>> {
self.assert_not_nested(
"create_vector_direct can not be called when a table or vector is under construction",
);
let elem_size = T::size(); let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET)); self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
...@@ -269,12 +289,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -269,12 +289,18 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Speed-sensitive users may wish to reduce memory usage by creating the /// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`. /// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline] #[inline]
pub fn create_vector_of_strings<'a, 'b>(&'a mut self, xs: &'b [&'b str]) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> { pub fn create_vector_of_strings<'a, 'b>(
&'a mut self,
xs: &'b [&'b str],
) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction"); self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
// internally, smallvec can be a stack-allocated or heap-allocated vector: // internally, smallvec can be a stack-allocated or heap-allocated vector:
// if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap. // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> = smallvec::SmallVec::with_capacity(xs.len()); let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
unsafe { offsets.set_len(xs.len()); } smallvec::SmallVec::with_capacity(xs.len());
unsafe {
offsets.set_len(xs.len());
}
// note that this happens in reverse, because the buffer is built back-to-front: // note that this happens in reverse, because the buffer is built back-to-front:
for (i, &s) in xs.iter().enumerate().rev() { for (i, &s) in xs.iter().enumerate().rev() {
...@@ -289,7 +315,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -289,7 +315,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// Speed-sensitive users may wish to reduce memory usage by creating the /// Speed-sensitive users may wish to reduce memory usage by creating the
/// vector manually: use `start_vector`, `push`, and `end_vector`. /// vector manually: use `start_vector`, `push`, and `end_vector`.
#[inline] #[inline]
pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(&'a mut self, items: &'b [T]) -> WIPOffset<Vector<'fbb, T::Output>> { pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
&'a mut self,
items: &'b [T],
) -> WIPOffset<Vector<'fbb, T::Output>> {
let elem_size = T::size(); let elem_size = T::size();
self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET)); self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
for i in (0..items.len()).rev() { for i in (0..items.len()).rev() {
...@@ -315,10 +344,12 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -315,10 +344,12 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
/// ///
/// This is somewhat low-level and is mostly used by the generated code. /// This is somewhat low-level and is mostly used by the generated code.
#[inline] #[inline]
pub fn required(&self, pub fn required(
&self,
tab_revloc: WIPOffset<TableFinishedWIPOffset>, tab_revloc: WIPOffset<TableFinishedWIPOffset>,
slot_byte_loc: VOffsetT, slot_byte_loc: VOffsetT,
assert_msg_name: &'static str) { assert_msg_name: &'static str,
) {
let idx = self.used_space() - tab_revloc.value() as usize; let idx = self.used_space() - tab_revloc.value() as usize;
let tab = Table::new(&self.owned_buf[self.head..], idx); let tab = Table::new(&self.owned_buf[self.head..], idx);
let o = tab.vtable().get(slot_byte_loc) as usize; let o = tab.vtable().get(slot_byte_loc) as usize;
...@@ -366,7 +397,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -366,7 +397,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
} }
/// Write the VTable, if it is new. /// Write the VTable, if it is new.
fn write_vtable(&mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>) -> WIPOffset<VTableWIPOffset> { fn write_vtable(
&mut self,
table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
) -> WIPOffset<VTableWIPOffset> {
self.assert_nested("write_vtable"); self.assert_nested("write_vtable");
// Write the vtable offset, which is the start of any Table. // Write the vtable offset, which is the start of any Table.
...@@ -433,9 +467,11 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -433,9 +467,11 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
// serialize every FieldLoc to the vtable: // serialize every FieldLoc to the vtable:
for &fl in self.field_locs.iter() { for &fl in self.field_locs.iter() {
let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT; let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
debug_assert_eq!(vtfw.get_field_offset(fl.id), debug_assert_eq!(
vtfw.get_field_offset(fl.id),
0, 0,
"tried to write a vtable field multiple times"); "tried to write a vtable field multiple times"
);
vtfw.write_field_offset(fl.id, pos); vtfw.write_field_offset(fl.id, pos);
} }
} }
...@@ -461,8 +497,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -461,8 +497,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize; let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n); let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
debug_assert_eq!(saw, 0xF0F0F0F0); debug_assert_eq!(saw, 0xF0F0F0F0);
emplace_scalar::<SOffsetT>(&mut self.owned_buf[n..n + SIZE_SOFFSET], emplace_scalar::<SOffsetT>(
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT); &mut self.owned_buf[n..n + SIZE_SOFFSET],
vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
);
} }
self.field_locs.clear(); self.field_locs.clear();
...@@ -473,7 +511,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -473,7 +511,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
#[inline] #[inline]
fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> { fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
for &revloc in self.written_vtable_revpos.iter().rev() { for &revloc in self.written_vtable_revpos.iter().rev() {
let o = VTable::init(&self.owned_buf[..], self.head + self.used_space() - revloc as usize); let o = VTable::init(
&self.owned_buf[..],
self.head + self.used_space() - revloc as usize,
);
if needle == o { if needle == o {
return Some(revloc); return Some(revloc);
} }
...@@ -510,18 +551,24 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -510,18 +551,24 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
// finally, zero out the old end data. // finally, zero out the old end data.
{ {
let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr(); let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
unsafe { write_bytes(ptr, 0, middle); } unsafe {
write_bytes(ptr, 0, middle);
}
} }
} }
// with or without a size prefix changes how we load the data, so finish* // with or without a size prefix changes how we load the data, so finish*
// functions are split along those lines. // functions are split along those lines.
fn finish_with_opts<T>(&mut self, fn finish_with_opts<T>(
&mut self,
root: WIPOffset<T>, root: WIPOffset<T>,
file_identifier: Option<&str>, file_identifier: Option<&str>,
size_prefixed: bool) { size_prefixed: bool,
) {
self.assert_not_finished("buffer cannot be finished when it is already finished"); self.assert_not_finished("buffer cannot be finished when it is already finished");
self.assert_not_nested("buffer cannot be finished when a table or vector is under construction"); self.assert_not_nested(
"buffer cannot be finished when a table or vector is under construction",
);
self.written_vtable_revpos.clear(); self.written_vtable_revpos.clear();
let to_align = { let to_align = {
...@@ -589,8 +636,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -589,8 +636,10 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
if self.unused_ready_space() >= want { if self.unused_ready_space() >= want {
return want; return want;
} }
assert!(want <= FLATBUFFERS_MAX_BUFFER_SIZE, assert!(
"cannot grow buffer beyond 2 gigabytes"); want <= FLATBUFFERS_MAX_BUFFER_SIZE,
"cannot grow buffer beyond 2 gigabytes"
);
while self.unused_ready_space() < want { while self.unused_ready_space() < want {
self.grow_owned_buf(); self.grow_owned_buf();
...@@ -605,7 +654,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -605,7 +654,13 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn assert_nested(&self, fn_name: &'static str) { fn assert_nested(&self, fn_name: &'static str) {
// we don't assert that self.field_locs.len() >0 because the vtable // we don't assert that self.field_locs.len() >0 because the vtable
// could be empty (e.g. for empty tables, or for all-default values). // could be empty (e.g. for empty tables, or for all-default values).
debug_assert!(self.nested, format!("incorrect FlatBufferBuilder usage: {} must be called while in a nested state", fn_name)); debug_assert!(
self.nested,
format!(
"incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
fn_name
)
);
} }
#[inline] #[inline]
fn assert_not_nested(&self, msg: &'static str) { fn assert_not_nested(&self, msg: &'static str) {
...@@ -619,7 +674,6 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -619,7 +674,6 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn assert_not_finished(&self, msg: &'static str) { fn assert_not_finished(&self, msg: &'static str) {
debug_assert!(!self.finished, msg); debug_assert!(!self.finished, msg);
} }
} }
/// Compute the length of the vtable needed to represent the provided FieldLocs. /// Compute the length of the vtable needed to represent the provided FieldLocs.
...@@ -629,8 +683,8 @@ impl<'fbb> FlatBufferBuilder<'fbb> { ...@@ -629,8 +683,8 @@ impl<'fbb> FlatBufferBuilder<'fbb> {
fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize { fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
let max_voffset = field_locs.iter().map(|fl| fl.id).max(); let max_voffset = field_locs.iter().map(|fl| fl.id).max();
match max_voffset { match max_voffset {
None => { field_index_to_field_offset(0) as usize } None => field_index_to_field_offset(0) as usize,
Some(mv) => { mv as usize + SIZE_VOFFSET } Some(mv) => mv as usize + SIZE_VOFFSET,
} }
} }
......
...@@ -34,7 +34,7 @@ pub trait EndianScalar: Sized + PartialEq + Copy + Clone { ...@@ -34,7 +34,7 @@ pub trait EndianScalar: Sized + PartialEq + Copy + Clone {
/// Macro for implementing a no-op endian conversion. This is used for types /// Macro for implementing a no-op endian conversion. This is used for types
/// that are one byte wide. /// that are one byte wide.
macro_rules! impl_endian_scalar_noop { macro_rules! impl_endian_scalar_noop {
($ty:ident) => ( ($ty:ident) => {
impl EndianScalar for $ty { impl EndianScalar for $ty {
#[inline] #[inline]
fn to_little_endian(self) -> Self { fn to_little_endian(self) -> Self {
...@@ -45,7 +45,7 @@ macro_rules! impl_endian_scalar_noop { ...@@ -45,7 +45,7 @@ macro_rules! impl_endian_scalar_noop {
self self
} }
} }
) };
} }
/// Macro for implementing an endian conversion using the stdlib `to_le` and /// Macro for implementing an endian conversion using the stdlib `to_le` and
...@@ -53,7 +53,7 @@ macro_rules! impl_endian_scalar_noop { ...@@ -53,7 +53,7 @@ macro_rules! impl_endian_scalar_noop {
/// floats, because the `to_le` and `from_le` are not implemented for them in /// floats, because the `to_le` and `from_le` are not implemented for them in
/// the stdlib. /// the stdlib.
macro_rules! impl_endian_scalar_stdlib_le_conversion { macro_rules! impl_endian_scalar_stdlib_le_conversion {
($ty:ident) => ( ($ty:ident) => {
impl EndianScalar for $ty { impl EndianScalar for $ty {
#[inline] #[inline]
fn to_little_endian(self) -> Self { fn to_little_endian(self) -> Self {
...@@ -64,7 +64,7 @@ macro_rules! impl_endian_scalar_stdlib_le_conversion { ...@@ -64,7 +64,7 @@ macro_rules! impl_endian_scalar_stdlib_le_conversion {
Self::from_le(self) Self::from_le(self)
} }
} }
) };
} }
impl_endian_scalar_noop!(bool); impl_endian_scalar_noop!(bool);
...@@ -177,4 +177,3 @@ pub fn read_scalar<T: EndianScalar>(s: &[u8]) -> T { ...@@ -177,4 +177,3 @@ pub fn read_scalar<T: EndianScalar>(s: &[u8]) -> T {
x.from_little_endian() x.from_little_endian()
} }
...@@ -39,12 +39,14 @@ mod vtable; ...@@ -39,12 +39,14 @@ mod vtable;
mod vtable_writer; mod vtable_writer;
pub use builder::FlatBufferBuilder; pub use builder::FlatBufferBuilder;
pub use endian_scalar::{EndianScalar, emplace_scalar, read_scalar, read_scalar_at, byte_swap_f32, byte_swap_f64}; pub use endian_scalar::{
byte_swap_f32, byte_swap_f64, emplace_scalar, read_scalar, read_scalar_at, EndianScalar,
};
pub use follow::{Follow, FollowStart}; pub use follow::{Follow, FollowStart};
pub use primitives::*; pub use primitives::*;
pub use push::Push; pub use push::Push;
pub use table::{Table, buffer_has_identifier, get_root, get_size_prefixed_root}; pub use table::{buffer_has_identifier, get_root, get_size_prefixed_root, Table};
pub use vector::{SafeSliceAccess, Vector, follow_cast_ref}; pub use vector::{follow_cast_ref, SafeSliceAccess, Vector};
pub use vtable::field_index_to_field_offset; pub use vtable::field_index_to_field_offset;
// TODO(rw): Unify `create_vector` and `create_vector_direct` by using // TODO(rw): Unify `create_vector` and `create_vector_direct` by using
......
...@@ -274,7 +274,7 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipFileIdentifier<T> { ...@@ -274,7 +274,7 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for SkipFileIdentifier<T> {
/// EndianScalar, but implementing Follow that way causes a conflict with /// EndianScalar, but implementing Follow that way causes a conflict with
/// other impls. /// other impls.
macro_rules! impl_follow_for_endian_scalar { macro_rules! impl_follow_for_endian_scalar {
($ty:ident) => ( ($ty:ident) => {
impl<'a> Follow<'a> for $ty { impl<'a> Follow<'a> for $ty {
type Inner = $ty; type Inner = $ty;
#[inline(always)] #[inline(always)]
...@@ -282,7 +282,7 @@ macro_rules! impl_follow_for_endian_scalar { ...@@ -282,7 +282,7 @@ macro_rules! impl_follow_for_endian_scalar {
read_scalar_at::<$ty>(buf, loc) read_scalar_at::<$ty>(buf, loc)
} }
} }
) };
} }
impl_follow_for_endian_scalar!(bool); impl_follow_for_endian_scalar!(bool);
......
...@@ -55,7 +55,7 @@ impl PushAlignment { ...@@ -55,7 +55,7 @@ impl PushAlignment {
/// Macro to implement Push for EndianScalar types. /// Macro to implement Push for EndianScalar types.
macro_rules! impl_push_for_endian_scalar { macro_rules! impl_push_for_endian_scalar {
($ty:ident) => ( ($ty:ident) => {
impl Push for $ty { impl Push for $ty {
type Output = $ty; type Output = $ty;
...@@ -63,9 +63,8 @@ macro_rules! impl_push_for_endian_scalar { ...@@ -63,9 +63,8 @@ macro_rules! impl_push_for_endian_scalar {
fn push(&self, dst: &mut [u8], _rest: &[u8]) { fn push(&self, dst: &mut [u8], _rest: &[u8]) {
emplace_scalar::<$ty>(dst, *self); emplace_scalar::<$ty>(dst, *self);
} }
} }
) };
} }
impl_push_for_endian_scalar!(bool); impl_push_for_endian_scalar!(bool);
......
...@@ -134,4 +134,3 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for Vector<'a, T> { ...@@ -134,4 +134,3 @@ impl<'a, T: Follow<'a> + 'a> Follow<'a> for Vector<'a, T> {
Vector::new(buf, loc) Vector::new(buf, loc)
} }
} }
...@@ -34,10 +34,7 @@ impl<'a> PartialEq for VTable<'a> { ...@@ -34,10 +34,7 @@ impl<'a> PartialEq for VTable<'a> {
impl<'a> VTable<'a> { impl<'a> VTable<'a> {
pub fn init(buf: &'a [u8], loc: usize) -> Self { pub fn init(buf: &'a [u8], loc: usize) -> Self {
VTable { VTable { buf: buf, loc: loc }
buf: buf,
loc: loc,
}
} }
pub fn num_fields(&self) -> usize { pub fn num_fields(&self) -> usize {
(self.num_bytes() / SIZE_VOFFSET) - 2 (self.num_bytes() / SIZE_VOFFSET) - 2
...@@ -72,7 +69,6 @@ impl<'a> VTable<'a> { ...@@ -72,7 +69,6 @@ impl<'a> VTable<'a> {
} }
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT { pub fn field_index_to_field_offset(field_id: VOffsetT) -> VOffsetT {
// Should correspond to what end_table() below builds up. // Should correspond to what end_table() below builds up.
......
...@@ -82,4 +82,3 @@ impl<'a> VTableWriter<'a> { ...@@ -82,4 +82,3 @@ impl<'a> VTableWriter<'a> {
} }
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment