use std::{
cmp,
mem::{size_of, transmute_copy},
};
use errors::{ParquetError, Result};
use util::{
bit_util::{self, BitReader, BitWriter},
memory::ByteBufferPtr,
};
const MAX_GROUPS_PER_BIT_PACKED_RUN: usize = 1 << 6;
const MAX_VALUES_PER_BIT_PACKED_RUN: usize = MAX_GROUPS_PER_BIT_PACKED_RUN * 8;
const MAX_WRITER_BUF_SIZE: usize = 1 << 10;
pub struct RleEncoder {
bit_width: u8,
bit_writer: BitWriter,
buffer_full: bool,
max_run_byte_size: usize,
buffered_values: [u64; 8],
num_buffered_values: usize,
current_value: u64,
repeat_count: usize,
bit_packed_count: usize,
indicator_byte_pos: i64,
}
impl RleEncoder {
pub fn new(bit_width: u8, buffer_len: usize) -> Self {
let buffer = vec![0; buffer_len];
RleEncoder::new_from_buf(bit_width, buffer, 0)
}
pub fn new_from_buf(bit_width: u8, buffer: Vec<u8>, start: usize) -> Self {
assert!(bit_width <= 64, "bit_width ({}) out of range.", bit_width);
let max_run_byte_size = RleEncoder::min_buffer_size(bit_width);
assert!(
buffer.len() >= max_run_byte_size,
"buffer length {} must be greater than {}",
buffer.len(),
max_run_byte_size
);
let bit_writer = BitWriter::new_from_buf(buffer, start);
RleEncoder {
bit_width,
bit_writer,
buffer_full: false,
max_run_byte_size,
buffered_values: [0; 8],
num_buffered_values: 0,
current_value: 0,
repeat_count: 0,
bit_packed_count: 0,
indicator_byte_pos: -1,
}
}
pub fn min_buffer_size(bit_width: u8) -> usize {
let max_bit_packed_run_size = 1
+ bit_util::ceil(
(MAX_VALUES_PER_BIT_PACKED_RUN * bit_width as usize) as i64,
8,
);
let max_rle_run_size =
bit_util::MAX_VLQ_BYTE_LEN + bit_util::ceil(bit_width as i64, 8) as usize;
::std::cmp::max(max_bit_packed_run_size as usize, max_rle_run_size)
}
pub fn max_buffer_size(bit_width: u8, num_values: usize) -> usize {
let bytes_per_run = bit_width;
let num_runs = bit_util::ceil(num_values as i64, 8) as usize;
let bit_packed_max_size = num_runs + num_runs * bytes_per_run as usize;
let min_rle_run_size = 1 + bit_util::ceil(bit_width as i64, 8) as usize;
let rle_max_size = bit_util::ceil(num_values as i64, 8) as usize * min_rle_run_size;
::std::cmp::max(bit_packed_max_size, rle_max_size) as usize
}
#[inline]
pub fn put(&mut self, value: u64) -> Result<bool> {
if self.buffer_full {
return Ok(false);
}
if self.current_value == value {
self.repeat_count += 1;
if self.repeat_count > 8 {
return Ok(true);
}
} else {
if self.repeat_count >= 8 {
assert_eq!(self.bit_packed_count, 0);
self.flush_rle_run()?;
}
self.repeat_count = 1;
self.current_value = value;
}
self.buffered_values[self.num_buffered_values] = value;
self.num_buffered_values += 1;
if self.num_buffered_values == 8 {
assert_eq!(self.bit_packed_count % 8, 0);
self.flush_buffered_values()?;
}
Ok(true)
}
#[inline]
pub fn buffer(&self) -> &[u8] { self.bit_writer.buffer() }
#[inline]
pub fn len(&self) -> usize { self.bit_writer.bytes_written() }
#[inline]
pub fn consume(mut self) -> Result<Vec<u8>> {
self.flush()?;
Ok(self.bit_writer.consume())
}
#[inline]
pub fn flush_buffer(&mut self) -> Result<&[u8]> {
self.flush()?;
Ok(self.bit_writer.flush_buffer())
}
#[inline]
pub fn clear(&mut self) {
self.bit_writer.clear();
self.buffer_full = false;
self.num_buffered_values = 0;
self.current_value = 0;
self.repeat_count = 0;
self.bit_packed_count = 0;
self.indicator_byte_pos = -1;
}
#[inline]
pub fn flush(&mut self) -> Result<()> {
if self.bit_packed_count > 0 || self.repeat_count > 0 || self.num_buffered_values > 0
{
let all_repeat = self.bit_packed_count == 0
&& (self.repeat_count == self.num_buffered_values
|| self.num_buffered_values == 0);
if self.repeat_count > 0 && all_repeat {
self.flush_rle_run()?;
} else {
if self.num_buffered_values > 0 {
while self.num_buffered_values < 8 {
self.buffered_values[self.num_buffered_values] = 0;
self.num_buffered_values += 1;
}
}
self.bit_packed_count += self.num_buffered_values;
self.flush_bit_packed_run(true)?;
self.repeat_count = 0;
}
}
Ok(())
}
#[inline]
fn flush_rle_run(&mut self) -> Result<()> {
assert!(self.repeat_count > 0);
let indicator_value = self.repeat_count << 1 | 0;
let mut result = self.bit_writer.put_vlq_int(indicator_value as u64);
result &= self.bit_writer.put_aligned(
self.current_value,
bit_util::ceil(self.bit_width as i64, 8) as usize,
);
if !result {
return Err(general_err!("Failed to write RLE run"));
}
self.num_buffered_values = 0;
self.repeat_count = 0;
Ok(())
}
#[inline]
fn flush_bit_packed_run(&mut self, update_indicator_byte: bool) -> Result<()> {
if self.indicator_byte_pos < 0 {
self.indicator_byte_pos = self.bit_writer.skip(1)? as i64;
}
for i in 0..self.num_buffered_values {
let _ = self
.bit_writer
.put_value(self.buffered_values[i], self.bit_width as usize);
}
self.num_buffered_values = 0;
if update_indicator_byte {
let num_groups = self.bit_packed_count / 8;
let indicator_byte = ((num_groups << 1) | 1) as u8;
if !self.bit_writer.put_aligned_offset(
indicator_byte,
1,
self.indicator_byte_pos as usize,
) {
return Err(general_err!("Not enough space to write indicator byte"));
}
self.indicator_byte_pos = -1;
self.bit_packed_count = 0;
}
Ok(())
}
#[inline]
fn flush_buffered_values(&mut self) -> Result<()> {
if self.repeat_count >= 8 {
self.num_buffered_values = 0;
if self.bit_packed_count > 0 {
assert_eq!(self.bit_packed_count % 8, 0);
self.flush_bit_packed_run(true)?
}
return Ok(());
}
self.bit_packed_count += self.num_buffered_values;
let num_groups = self.bit_packed_count / 8;
if num_groups + 1 >= MAX_GROUPS_PER_BIT_PACKED_RUN {
assert!(self.indicator_byte_pos >= 0);
self.flush_bit_packed_run(true)?;
} else {
self.flush_bit_packed_run(false)?;
}
self.repeat_count = 0;
Ok(())
}
}
pub struct RleDecoder {
bit_width: u8,
bit_reader: Option<BitReader>,
index_buf: Option<[i32; 1024]>,
rle_left: u32,
bit_packed_left: u32,
current_value: Option<u64>,
}
impl RleDecoder {
pub fn new(bit_width: u8) -> Self {
RleDecoder {
bit_width,
rle_left: 0,
bit_packed_left: 0,
bit_reader: None,
index_buf: None,
current_value: None,
}
}
pub fn set_data(&mut self, data: ByteBufferPtr) {
if let Some(ref mut bit_reader) = self.bit_reader {
bit_reader.reset(data);
} else {
self.bit_reader = Some(BitReader::new(data));
self.index_buf = Some([0; 1024]);
}
let _ = self.reload();
}
#[inline]
pub fn get<T: Default>(&mut self) -> Result<Option<T>> {
assert!(size_of::<T>() <= 8);
while self.rle_left <= 0 && self.bit_packed_left <= 0 {
if !self.reload() {
return Ok(None);
}
}
let value = if self.rle_left > 0 {
let rle_value = unsafe {
transmute_copy::<u64, T>(
self
.current_value
.as_mut()
.expect("current_value should be Some"),
)
};
self.rle_left -= 1;
rle_value
} else {
let bit_reader = self.bit_reader.as_mut().expect("bit_reader should be Some");
let bit_packed_value = bit_reader
.get_value(self.bit_width as usize)
.ok_or(eof_err!("Not enough data for 'bit_packed_value'"))?;
self.bit_packed_left -= 1;
bit_packed_value
};
Ok(Some(value))
}
#[inline]
pub fn get_batch<T: Default>(&mut self, buffer: &mut [T]) -> Result<usize> {
assert!(self.bit_reader.is_some());
assert!(size_of::<T>() <= 8);
let mut values_read = 0;
while values_read < buffer.len() {
if self.rle_left > 0 {
assert!(self.current_value.is_some());
let num_values = cmp::min(buffer.len() - values_read, self.rle_left as usize);
for i in 0..num_values {
let repeated_value =
unsafe { transmute_copy::<u64, T>(self.current_value.as_mut().unwrap()) };
buffer[values_read + i] = repeated_value;
}
self.rle_left -= num_values as u32;
values_read += num_values;
} else if self.bit_packed_left > 0 {
assert!(self.bit_reader.is_some());
let mut num_values =
cmp::min(buffer.len() - values_read, self.bit_packed_left as usize);
if let Some(ref mut bit_reader) = self.bit_reader {
num_values = bit_reader.get_batch::<T>(
&mut buffer[values_read..values_read + num_values],
self.bit_width as usize,
);
self.bit_packed_left -= num_values as u32;
values_read += num_values;
}
} else {
if !self.reload() {
break;
}
}
}
Ok(values_read)
}
#[inline]
pub fn get_batch_with_dict<T>(
&mut self,
dict: &[T],
buffer: &mut [T],
max_values: usize,
) -> Result<usize>
where
T: Default + Clone,
{
assert!(buffer.len() >= max_values);
let mut values_read = 0;
while values_read < max_values {
if self.rle_left > 0 {
assert!(self.current_value.is_some());
let num_values = cmp::min(max_values - values_read, self.rle_left as usize);
let dict_idx = self.current_value.unwrap() as usize;
for i in 0..num_values {
buffer[values_read + i] = dict[dict_idx].clone();
}
self.rle_left -= num_values as u32;
values_read += num_values;
} else if self.bit_packed_left > 0 {
assert!(self.bit_reader.is_some());
let mut num_values =
cmp::min(max_values - values_read, self.bit_packed_left as usize);
if let Some(ref mut bit_reader) = self.bit_reader {
let mut index_buf = self.index_buf.unwrap();
num_values = cmp::min(num_values, index_buf.len());
loop {
num_values = bit_reader
.get_batch::<i32>(&mut index_buf[..num_values], self.bit_width as usize);
for i in 0..num_values {
buffer[values_read + i] = dict[index_buf[i] as usize].clone();
}
self.bit_packed_left -= num_values as u32;
values_read += num_values;
if num_values < index_buf.len() {
break;
}
}
}
} else {
if !self.reload() {
break;
}
}
}
Ok(values_read)
}
#[inline]
fn reload(&mut self) -> bool {
assert!(self.bit_reader.is_some());
if let Some(ref mut bit_reader) = self.bit_reader {
if let Some(indicator_value) = bit_reader.get_vlq_int() {
if indicator_value & 1 == 1 {
self.bit_packed_left = ((indicator_value >> 1) * 8) as u32;
} else {
self.rle_left = (indicator_value >> 1) as u32;
let value_width = bit_util::ceil(self.bit_width as i64, 8);
self.current_value = bit_reader.get_aligned::<u64>(value_width as usize);
assert!(self.current_value.is_some());
}
return true;
} else {
return false;
}
}
return false;
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::{
self,
distributions::{Distribution, Standard},
thread_rng, Rng, SeedableRng,
};
use util::memory::ByteBufferPtr;
const MAX_WIDTH: usize = 32;
#[test]
fn test_rle_decode_int32() {
let data = ByteBufferPtr::new(vec![0x03, 0x88, 0xC6, 0xFA]);
let mut decoder: RleDecoder = RleDecoder::new(3);
decoder.set_data(data);
let mut buffer = vec![0; 8];
let expected = vec![0, 1, 2, 3, 4, 5, 6, 7];
let result = decoder.get_batch::<i32>(&mut buffer);
assert!(result.is_ok());
assert_eq!(buffer, expected);
}
#[test]
fn test_rle_consume_flush_buffer() {
let data = vec![1, 1, 1, 2, 2, 3, 3, 3];
let mut encoder1 = RleEncoder::new(3, 256);
let mut encoder2 = RleEncoder::new(3, 256);
for value in data {
encoder1.put(value as u64).unwrap();
encoder2.put(value as u64).unwrap();
}
let res1 = encoder1.flush_buffer().unwrap();
let res2 = encoder2.consume().unwrap();
assert_eq!(res1, &res2[..]);
}
#[test]
fn test_rle_decode_bool() {
let data1 = ByteBufferPtr::new(vec![0x64, 0x01, 0x64, 0x00]);
let data2 = ByteBufferPtr::new(vec![
0x1B, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0x0A,
]);
let mut decoder: RleDecoder = RleDecoder::new(1);
decoder.set_data(data1);
let mut buffer = vec![false; 100];
let mut expected = vec![];
for i in 0..100 {
if i < 50 {
expected.push(true);
} else {
expected.push(false);
}
}
let result = decoder.get_batch::<bool>(&mut buffer);
assert!(result.is_ok());
assert_eq!(buffer, expected);
decoder.set_data(data2);
let mut buffer = vec![false; 100];
let mut expected = vec![];
for i in 0..100 {
if i % 2 == 0 {
expected.push(false);
} else {
expected.push(true);
}
}
let result = decoder.get_batch::<bool>(&mut buffer);
assert!(result.is_ok());
assert_eq!(buffer, expected);
}
#[test]
fn test_rle_decode_with_dict_int32() {
let dict = vec![10, 20, 30];
let data = ByteBufferPtr::new(vec![0x06, 0x00, 0x08, 0x01, 0x0A, 0x02]);
let mut decoder: RleDecoder = RleDecoder::new(3);
decoder.set_data(data);
let mut buffer = vec![0; 12];
let expected = vec![10, 10, 10, 20, 20, 20, 20, 30, 30, 30, 30, 30];
let result = decoder.get_batch_with_dict::<i32>(&dict, &mut buffer, 12);
assert!(result.is_ok());
assert_eq!(buffer, expected);
let dict = vec!["aaa", "bbb", "ccc", "ddd", "eee", "fff"];
let data = ByteBufferPtr::new(vec![0x03, 0x63, 0xC7, 0x8E, 0x03, 0x65, 0x0B]);
let mut decoder: RleDecoder = RleDecoder::new(3);
decoder.set_data(data);
let mut buffer = vec![""; 12];
let expected = vec![
"ddd", "eee", "fff", "ddd", "eee", "fff", "ddd", "eee", "fff", "eee", "fff", "fff",
];
let result =
decoder.get_batch_with_dict::<&str>(dict.as_slice(), buffer.as_mut_slice(), 12);
assert!(result.is_ok());
assert_eq!(buffer, expected);
}
fn validate_rle(
values: &[i64],
bit_width: u8,
expected_encoding: Option<&[u8]>,
expected_len: i32,
)
{
let buffer_len = 64 * 1024;
let mut encoder = RleEncoder::new(bit_width, buffer_len);
for v in values {
let result = encoder.put(*v as u64);
assert!(result.is_ok());
}
let buffer = ByteBufferPtr::new(encoder.consume().expect("Expect consume() OK"));
if expected_len != -1 {
assert_eq!(buffer.len(), expected_len as usize);
}
match expected_encoding {
Some(b) => assert_eq!(buffer.as_ref(), b),
_ => (),
}
let mut decoder = RleDecoder::new(bit_width);
decoder.set_data(buffer.all());
for v in values {
let val: i64 = decoder
.get()
.expect("get() should be OK")
.expect("get() should return more value");
assert_eq!(val, *v);
}
decoder.set_data(buffer);
let mut values_read: Vec<i64> = vec![0; values.len()];
decoder
.get_batch(&mut values_read[..])
.expect("get_batch() should be OK");
assert_eq!(&values_read[..], values);
}
#[test]
fn test_rle_specific_sequences() {
let mut expected_buffer = Vec::new();
let mut values = Vec::new();
for _ in 0..50 {
values.push(0);
}
for _ in 0..50 {
values.push(1);
}
expected_buffer.push(50 << 1);
expected_buffer.push(0);
expected_buffer.push(50 << 1);
expected_buffer.push(1);
for width in 1..9 {
validate_rle(&values[..], width, Some(&expected_buffer[..]), 4);
}
for width in 9..MAX_WIDTH + 1 {
validate_rle(
&values[..],
width as u8,
None,
2 * (1 + bit_util::ceil(width as i64, 8) as i32),
);
}
values.clear();
expected_buffer.clear();
for i in 0..101 {
values.push(i % 2);
}
let num_groups = bit_util::ceil(100, 8) as u8;
expected_buffer.push(((num_groups << 1) as u8) | 1);
for _ in 1..(100 / 8) + 1 {
expected_buffer.push(0b10101010);
}
expected_buffer.push(0b00001010);
validate_rle(
&values,
1,
Some(&expected_buffer[..]),
1 + num_groups as i32,
);
for width in 2..MAX_WIDTH + 1 {
let num_values = bit_util::ceil(100, 8) * 8;
validate_rle(
&values,
width as u8,
None,
1 + bit_util::ceil(width as i64 * num_values, 8) as i32,
);
}
}
fn test_rle_values(bit_width: usize, num_vals: usize, value: i32) {
let mod_val = if bit_width == 64 {
1
} else {
1u64 << bit_width
};
let mut values: Vec<i64> = vec![];
for v in 0..num_vals {
let val = if value == -1 {
v as i64 % mod_val as i64
} else {
value as i64
};
values.push(val);
}
validate_rle(&values, bit_width as u8, None, -1);
}
#[test]
fn test_values() {
for width in 1..MAX_WIDTH + 1 {
test_rle_values(width, 1, -1);
test_rle_values(width, 1024, -1);
test_rle_values(width, 1024, 0);
test_rle_values(width, 1024, 1);
}
}
#[test]
fn test_rle_specific_roundtrip() {
let bit_width = 1;
let buffer_len = RleEncoder::min_buffer_size(bit_width);
let values: Vec<i16> = vec![0, 1, 1, 1, 1, 0, 0, 0, 0, 1];
let mut encoder = RleEncoder::new(bit_width, buffer_len);
for v in &values {
assert!(encoder.put(*v as u64).expect("put() should be OK"));
}
let buffer = encoder.consume().expect("consume() should be OK");
let mut decoder = RleDecoder::new(bit_width);
decoder.set_data(ByteBufferPtr::new(buffer));
let mut actual_values: Vec<i16> = vec![0; values.len()];
decoder
.get_batch(&mut actual_values)
.expect("get_batch() should be OK");
assert_eq!(actual_values, values);
}
fn test_round_trip(values: &[i32], bit_width: u8) {
let buffer_len = 64 * 1024;
let mut encoder = RleEncoder::new(bit_width, buffer_len);
for v in values {
let result = encoder.put(*v as u64).expect("put() should be OK");
assert!(result, "put() should not return false");
}
let buffer = ByteBufferPtr::new(encoder.consume().expect("consume() should be OK"));
let mut decoder = RleDecoder::new(bit_width);
decoder.set_data(buffer.all());
for v in values {
let val = decoder
.get::<i32>()
.expect("get() should be OK")
.expect("get() should return value");
assert_eq!(val, *v);
}
let mut decoder = RleDecoder::new(bit_width);
decoder.set_data(buffer);
let mut values_read: Vec<i32> = vec![0; values.len()];
decoder
.get_batch(&mut values_read[..])
.expect("get_batch() should be OK");
assert_eq!(&values_read[..], values);
}
#[test]
fn test_random() {
let seed_len = 32;
let niters = 50;
let ngroups = 1000;
let max_group_size = 15;
let mut values = vec![];
for _ in 0..niters {
values.clear();
let mut rng = thread_rng();
let seed_vec: Vec<u8> = Standard.sample_iter(&mut rng).take(seed_len).collect();
let mut seed = [0u8; 32];
seed.copy_from_slice(&seed_vec[0..seed_len]);
let mut gen = rand::StdRng::from_seed(seed);
let mut parity = false;
for _ in 0..ngroups {
let mut group_size = gen.gen_range::<u32>(1, 20);
if group_size > max_group_size {
group_size = 1;
}
for _ in 0..group_size {
values.push(parity as i32);
}
parity = !parity;
}
let bit_width = bit_util::num_required_bits(values.len() as u64);
assert!(bit_width < 64);
test_round_trip(&values[..], bit_width as u8);
}
}
}