Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Delta #1

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions parquet/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ hashbrown = { version = "0.14", default-features = false }
twox-hash = { version = "1.6", default-features = false }
paste = { version = "1.0" }
half = { version = "2.1", default-features = false, features = ["num-traits"] }
ring = { version = "0.17", default-features = false, features = ["std"]}

[dev-dependencies]
base64 = { version = "0.22", default-features = false, features = ["std"] }
Expand Down Expand Up @@ -104,6 +105,7 @@ experimental = []
async = ["futures", "tokio"]
# Enable object_store integration
object_store = ["dep:object_store", "async"]
#encryption = ["aes-gcm", "base64"]

[[example]]
name = "read_parquet"
Expand Down
39 changes: 38 additions & 1 deletion parquet/src/arrow/arrow_reader/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ use crate::file::footer;
use crate::file::page_index::index_reader;
pub use filter::{ArrowPredicate, ArrowPredicateFn, RowFilter};
pub use selection::{RowSelection, RowSelector};
use crate::encryption::ciphers;
use crate::encryption::ciphers::FileDecryptionProperties;

/// A generic builder for constructing sync or async arrow parquet readers. This is not intended
/// to be used directly, instead you should use the specialization for the type of reader
Expand Down Expand Up @@ -250,7 +252,12 @@ impl ArrowReaderMetadata {
///
/// See [`ParquetRecordBatchReaderBuilder::new_with_metadata`] for how this can be used
pub fn load<T: ChunkReader>(reader: &T, options: ArrowReaderOptions) -> Result<Self> {
let mut metadata = footer::parse_metadata(reader)?;
Self::load_with_decryption(reader, options, FileDecryptionProperties::builder().build())
}

pub fn load_with_decryption<T: ChunkReader>(reader: &T, options: ArrowReaderOptions,
file_decryption_properties: FileDecryptionProperties) -> Result<Self> {
let mut metadata = footer::parse_metadata_with_decryption(reader, file_decryption_properties)?;
if options.page_index {
let column_index = metadata
.row_groups()
Expand Down Expand Up @@ -358,6 +365,11 @@ impl<T: ChunkReader + 'static> ParquetRecordBatchReaderBuilder<T> {
Ok(Self::new_with_metadata(reader, metadata))
}

pub fn try_new_with_decryption(reader: T, options: ArrowReaderOptions, file_decryption_properties: FileDecryptionProperties) -> Result<Self> {
let metadata = ArrowReaderMetadata::load_with_decryption(&reader, options, file_decryption_properties)?;
Ok(Self::new_with_metadata(reader, metadata))
}

/// Create a [`ParquetRecordBatchReaderBuilder`] from the provided [`ArrowReaderMetadata`]
///
/// This allows loading metadata once and using it to create multiple builders with
Expand Down Expand Up @@ -606,6 +618,13 @@ impl ParquetRecordBatchReader {
.build()
}

pub fn try_new_with_decryption<T: ChunkReader + 'static>(reader: T, batch_size: usize,
file_decryption_properties: FileDecryptionProperties) -> Result<Self> {
ParquetRecordBatchReaderBuilder::try_new_with_decryption(reader, Default::default(), file_decryption_properties)?
.with_batch_size(batch_size)
.build()
}

/// Create a new [`ParquetRecordBatchReader`] from the provided [`RowGroups`]
///
/// Note: this is a low-level interface see [`ParquetRecordBatchReader::try_new`] for a
Expand Down Expand Up @@ -770,6 +789,7 @@ mod tests {
BoolType, ByteArray, ByteArrayType, DataType, FixedLenByteArray, FixedLenByteArrayType,
FloatType, Int32Type, Int64Type, Int96Type,
};
use crate::encryption::ciphers;
use crate::errors::Result;
use crate::file::properties::{EnabledStatistics, WriterProperties, WriterVersion};
use crate::file::writer::SerializedFileWriter;
Expand Down Expand Up @@ -1410,6 +1430,23 @@ mod tests {
assert!(col.value(2).is_nan());
}

#[test]
fn test_uniform_encryption() {
let path = format!(
"{}/uniform_encryption.parquet.encrypted",
arrow::util::test_util::parquet_test_data(),
);
let file = File::open(path).unwrap();
// todo
let key_code: &[u8] = "0123456789012345".as_bytes();
// todo
let decryption_properties = ciphers::FileDecryptionProperties::builder()
.with_footer_key(key_code.to_vec())
.build();
let record_reader = ParquetRecordBatchReader::try_new_with_decryption(file, 128, decryption_properties).unwrap();
// todo check contents
}

#[test]
fn test_read_float32_float64_byte_stream_split() {
let path = format!(
Expand Down
282 changes: 282 additions & 0 deletions parquet/src/encryption/ciphers.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,282 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Encryption implementation specific to Parquet, as described
//! in the [spec](https://github.com/apache/parquet-format/blob/master/Encryption.md).

use ring::aead::{Aad, LessSafeKey, NonceSequence, UnboundKey, AES_128_GCM};
use ring::rand::{SecureRandom, SystemRandom};
use crate::errors::{ParquetError, Result};

pub trait BlockEncryptor {
fn encrypt(&mut self, plaintext: &[u8], aad: &[u8]) -> Vec<u8>;
}

pub trait BlockDecryptor {
fn decrypt(&self, length_and_ciphertext: &[u8], aad: &[u8]) -> Vec<u8>;
}

const RIGHT_TWELVE: u128 = 0x0000_0000_ffff_ffff_ffff_ffff_ffff_ffff;
const NONCE_LEN: usize = 12;
const TAG_LEN: usize = 16;
const SIZE_LEN: usize = 4;

struct CounterNonce {
start: u128,
counter: u128,
}

impl CounterNonce {
pub fn new(rng: &SystemRandom) -> Self {
let mut buf = [0; 16];
rng.fill(&mut buf).unwrap();

// Since this is a random seed value, endianess doesn't matter at all,
// and we can use whatever is platform-native.
let start = u128::from_ne_bytes(buf) & RIGHT_TWELVE;
let counter = start.wrapping_add(1);

Self { start, counter }
}

/// One accessor for the nonce bytes to avoid potentially flipping endianess
#[inline]
pub fn get_bytes(&self) -> [u8; NONCE_LEN] {
self.counter.to_le_bytes()[0..NONCE_LEN].try_into().unwrap()
}
}

impl NonceSequence for CounterNonce {
fn advance(&mut self) -> Result<ring::aead::Nonce, ring::error::Unspecified> {
// If we've wrapped around, we've exhausted this nonce sequence
if (self.counter & RIGHT_TWELVE) == (self.start & RIGHT_TWELVE) {
Err(ring::error::Unspecified)
} else {
// Otherwise, just advance and return the new value
let buf: [u8; NONCE_LEN] = self.get_bytes();
self.counter = self.counter.wrapping_add(1);
Ok(ring::aead::Nonce::assume_unique_for_key(buf))
}
}
}

pub(crate) struct RingGcmBlockEncryptor {
key: LessSafeKey,
nonce_sequence: CounterNonce,
}

impl RingGcmBlockEncryptor {
// todo TBD: some KMS systems produce data keys, need to be able to pass them to Encryptor.
// todo TBD: for other KMSs, we will create data keys inside arrow-rs, making sure to use SystemRandom
/// Create a new `RingGcmBlockEncryptor` with a given key and random nonce.
/// The nonce will advance appropriately with each block encryption and
/// return an error if it wraps around.
pub(crate) fn new(key_bytes: &[u8]) -> Self {
let rng = SystemRandom::new();

// todo support other key sizes
let key = UnboundKey::new(&AES_128_GCM, key_bytes.as_ref()).unwrap();
let nonce = CounterNonce::new(&rng);

Self {
key: LessSafeKey::new(key),
nonce_sequence: nonce,
}
}
}

impl BlockEncryptor for RingGcmBlockEncryptor {
fn encrypt(&mut self, plaintext: &[u8], aad: &[u8]) -> Vec<u8> {
let nonce = self.nonce_sequence.advance().unwrap();
let ciphertext_len = plaintext.len() + NONCE_LEN + TAG_LEN;
// todo TBD: add first 4 bytes with the length, per https://github.com/apache/parquet-format/blob/master/Encryption.md#51-encrypted-module-serialization
let mut result = Vec::with_capacity(SIZE_LEN + ciphertext_len);
result.extend_from_slice((ciphertext_len as i32).to_le_bytes().as_ref());
result.extend_from_slice(nonce.as_ref());
result.extend_from_slice(plaintext);

let tag = self
.key
.seal_in_place_separate_tag(nonce, Aad::from(aad), &mut result[SIZE_LEN + NONCE_LEN..])
.unwrap();
result.extend_from_slice(tag.as_ref());

result
}
}

pub(crate) struct RingGcmBlockDecryptor {
key: LessSafeKey,
}

impl RingGcmBlockDecryptor {
pub(crate) fn new(key_bytes: &[u8]) -> Self {
// todo support other key sizes
let key = UnboundKey::new(&AES_128_GCM, key_bytes).unwrap();

Self {
key: LessSafeKey::new(key),
}
}
}

impl BlockDecryptor for RingGcmBlockDecryptor {
fn decrypt(&self, length_and_ciphertext: &[u8], aad: &[u8]) -> Vec<u8> {
let mut result = Vec::with_capacity(
length_and_ciphertext.len() - SIZE_LEN - NONCE_LEN - TAG_LEN,
);
result.extend_from_slice(&length_and_ciphertext[SIZE_LEN + NONCE_LEN..]);

let nonce = ring::aead::Nonce::try_assume_unique_for_key(
&length_and_ciphertext[SIZE_LEN..SIZE_LEN + NONCE_LEN],
)
.unwrap();

self.key
.open_in_place(nonce, Aad::from(aad), &mut result)
.unwrap();

result
}
}

pub(crate) enum ModuleType {
Footer = 0,
ColumnMetaData = 1,
DataPage = 2,
DictionaryPage = 3,
DataPageHeader = 4,
DictionaryPageHeader = 5,
ColumnIndex = 6,
OffsetIndex = 7,
BloomFilterHeader = 8,
BloomFilterBitset = 9,
}

pub fn create_footer_aad(file_aad: &[u8]) -> Result<Vec<u8>> {
create_module_aad(file_aad, ModuleType::Footer, -1, -1, -1)
}

pub fn create_module_aad(file_aad: &[u8], module_type: ModuleType, row_group_ordinal: i32,
column_ordinal: i32, page_ordinal: i32) -> Result<Vec<u8>> {

let module_buf = [module_type as u8];

if module_buf[0] == (ModuleType::Footer as u8) {
let mut aad = Vec::with_capacity(file_aad.len() + 1);
aad.extend_from_slice(file_aad);
aad.extend_from_slice(module_buf.as_ref());
return Ok(aad)
}

if row_group_ordinal < 0 {
return Err(general_err!("Wrong row group ordinal: {}", row_group_ordinal));
}
if row_group_ordinal > u16::MAX as i32 {
return Err(general_err!("Encrypted parquet files can't have more than {} row groups: {}",
u16::MAX, row_group_ordinal));
}

if column_ordinal < 0 {
return Err(general_err!("Wrong column ordinal: {}", column_ordinal));
}
if column_ordinal > u16::MAX as i32 {
return Err(general_err!("Encrypted parquet files can't have more than {} columns: {}",
u16::MAX, column_ordinal));
}

if module_buf[0] != (ModuleType::DataPageHeader as u8) &&
module_buf[0] != (ModuleType::DataPage as u8) {
let mut aad = Vec::with_capacity(file_aad.len() + 5);
aad.extend_from_slice(file_aad);
aad.extend_from_slice(module_buf.as_ref());
aad.extend_from_slice((row_group_ordinal as u16).to_le_bytes().as_ref());
aad.extend_from_slice((column_ordinal as u16).to_le_bytes().as_ref());
return Ok(aad)
}

if page_ordinal < 0 {
return Err(general_err!("Wrong column ordinal: {}", page_ordinal));
}
if page_ordinal > u16::MAX as i32 {
return Err(general_err!("Encrypted parquet files can't have more than {} pages in a chunk: {}",
u16::MAX, page_ordinal));
}

let mut aad = Vec::with_capacity(file_aad.len() + 7);
aad.extend_from_slice(file_aad);
aad.extend_from_slice(module_buf.as_ref());
aad.extend_from_slice((row_group_ordinal as u16).to_le_bytes().as_ref());
aad.extend_from_slice((column_ordinal as u16).to_le_bytes().as_ref());
aad.extend_from_slice((page_ordinal as u16).to_le_bytes().as_ref());
Ok(aad)
}

pub struct FileDecryptionProperties {
footer_key: Option<Vec<u8>>
}

impl FileDecryptionProperties {
pub fn builder() -> DecryptionPropertiesBuilder {
DecryptionPropertiesBuilder::with_defaults()
}
}

pub struct DecryptionPropertiesBuilder {
footer_key: Option<Vec<u8>>
}

impl DecryptionPropertiesBuilder {
pub fn with_defaults() -> Self {
Self {
footer_key: None
}
}

pub fn build(self) -> FileDecryptionProperties {
FileDecryptionProperties {
footer_key: self.footer_key
}
}

// todo decr: doc comment
pub fn with_footer_key(mut self, value: Vec<u8>) -> Self {
self.footer_key = Some(value);
self
}
}

pub struct FileDecryptor {
decryption_properties: FileDecryptionProperties,
// todo decr: change to BlockDecryptor
footer_decryptor: RingGcmBlockDecryptor
}

impl FileDecryptor {
pub(crate) fn new(decryption_properties: FileDecryptionProperties) -> Self {
Self {
// todo decr: if no key available yet (not set in properties, will be retrieved from metadata)
footer_decryptor: RingGcmBlockDecryptor::new(decryption_properties.footer_key.clone().unwrap().as_ref()),
decryption_properties
}
}

// todo decr: change to BlockDecryptor
pub(crate) fn get_footer_decryptor(self) -> RingGcmBlockDecryptor {
self.footer_decryptor
}
}
Loading