- Replace form_urlencoded with RFC 3986 compliant URI encoding - Implement aws_uri_encode() matching AWS SigV4 spec exactly - Unreserved chars (A-Z,a-z,0-9,-,_,.,~) not encoded - All other chars percent-encoded with uppercase hex - Preserve slashes in paths, encode in query params - Normalize empty paths to '/' per AWS spec - Fix test expectations (body hash, HMAC values) - Add comprehensive SigV4 signature determinism test This fixes the canonicalization mismatch that caused signature validation failures in T047. Auth can now be enabled for production. Refs: T058.S1
494 lines
14 KiB
Rust
494 lines
14 KiB
Rust
//! Raft log storage implementation
|
|
//!
|
|
//! This module provides persistent storage for Raft log entries using RocksDB.
|
|
|
|
use crate::{cf, meta_keys, RocksStore};
|
|
use chainfire_types::error::StorageError;
|
|
use rocksdb::WriteBatch;
|
|
use serde::{Deserialize, Serialize};
|
|
use std::ops::RangeBounds;
|
|
use tracing::{debug, trace};
|
|
|
|
/// Log entry index type
|
|
pub type LogIndex = u64;
|
|
|
|
/// Raft term type
|
|
pub type Term = u64;
|
|
|
|
/// Log ID combining term and index
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
|
pub struct LogId {
|
|
pub term: Term,
|
|
pub index: LogIndex,
|
|
}
|
|
|
|
impl LogId {
|
|
pub fn new(term: Term, index: LogIndex) -> Self {
|
|
Self { term, index }
|
|
}
|
|
}
|
|
|
|
impl Default for LogId {
|
|
fn default() -> Self {
|
|
Self { term: 0, index: 0 }
|
|
}
|
|
}
|
|
|
|
/// A log entry stored in the Raft log
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub struct LogEntry<D> {
|
|
pub log_id: LogId,
|
|
pub payload: EntryPayload<D>,
|
|
}
|
|
|
|
/// Payload of a log entry
|
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
|
pub enum EntryPayload<D> {
|
|
/// A blank entry for leader establishment
|
|
Blank,
|
|
/// A normal data entry
|
|
Normal(D),
|
|
/// Membership change entry
|
|
Membership(Vec<u64>), // Just node IDs for simplicity
|
|
}
|
|
|
|
impl<D> LogEntry<D> {
|
|
pub fn blank(log_id: LogId) -> Self {
|
|
Self {
|
|
log_id,
|
|
payload: EntryPayload::Blank,
|
|
}
|
|
}
|
|
|
|
pub fn normal(log_id: LogId, data: D) -> Self {
|
|
Self {
|
|
log_id,
|
|
payload: EntryPayload::Normal(data),
|
|
}
|
|
}
|
|
}
|
|
|
|
/// Persisted vote information
|
|
#[derive(Debug, Clone, Copy, Serialize, Deserialize, Default)]
|
|
pub struct Vote {
|
|
pub term: Term,
|
|
pub node_id: Option<u64>,
|
|
pub committed: bool,
|
|
}
|
|
|
|
/// Log storage state
|
|
#[derive(Debug, Clone, Default)]
|
|
pub struct LogState {
|
|
/// Last purged log ID
|
|
pub last_purged_log_id: Option<LogId>,
|
|
/// Last log ID in storage
|
|
pub last_log_id: Option<LogId>,
|
|
}
|
|
|
|
/// Raft log storage backed by RocksDB
|
|
pub struct LogStorage {
|
|
store: RocksStore,
|
|
}
|
|
|
|
impl LogStorage {
|
|
/// Create a new log storage
|
|
pub fn new(store: RocksStore) -> Self {
|
|
Self { store }
|
|
}
|
|
|
|
/// Encode log index as bytes for storage
|
|
fn encode_index(index: LogIndex) -> [u8; 8] {
|
|
index.to_be_bytes()
|
|
}
|
|
|
|
/// Decode log index from bytes
|
|
fn decode_index(bytes: &[u8]) -> LogIndex {
|
|
let arr: [u8; 8] = bytes.try_into().unwrap_or_default();
|
|
LogIndex::from_be_bytes(arr)
|
|
}
|
|
|
|
/// Get log state (first and last log IDs)
|
|
pub fn get_log_state(&self) -> Result<LogState, StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::LOGS)
|
|
.ok_or_else(|| StorageError::RocksDb("LOGS cf not found".into()))?;
|
|
|
|
// Get first and last entries
|
|
let mut iter = self
|
|
.store
|
|
.db()
|
|
.iterator_cf(&cf, rocksdb::IteratorMode::Start);
|
|
|
|
let _first = iter.next();
|
|
let last_purged_log_id = self.get_last_purged_log_id()?;
|
|
|
|
// Get last log ID
|
|
let mut last_iter = self
|
|
.store
|
|
.db()
|
|
.iterator_cf(&cf, rocksdb::IteratorMode::End);
|
|
|
|
let last_log_id = if let Some(Ok((_, value))) = last_iter.next() {
|
|
// Skip empty or corrupt entries - treat as empty log
|
|
if value.is_empty() {
|
|
last_purged_log_id
|
|
} else {
|
|
match bincode::deserialize::<LogEntry<Vec<u8>>>(&value) {
|
|
Ok(entry) => Some(entry.log_id),
|
|
Err(e) => {
|
|
eprintln!("Warning: Failed to deserialize log entry: {}, treating as empty log", e);
|
|
last_purged_log_id
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
last_purged_log_id
|
|
};
|
|
|
|
Ok(LogState {
|
|
last_purged_log_id,
|
|
last_log_id,
|
|
})
|
|
}
|
|
|
|
/// Save vote to persistent storage
|
|
pub fn save_vote(&self, vote: Vote) -> Result<(), StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::META)
|
|
.ok_or_else(|| StorageError::RocksDb("META cf not found".into()))?;
|
|
|
|
let bytes =
|
|
bincode::serialize(&vote).map_err(|e| StorageError::Serialization(e.to_string()))?;
|
|
|
|
self.store
|
|
.db()
|
|
.put_cf(&cf, meta_keys::VOTE, bytes)
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
|
|
debug!(?vote, "Saved vote");
|
|
Ok(())
|
|
}
|
|
|
|
/// Read vote from persistent storage
|
|
pub fn read_vote(&self) -> Result<Option<Vote>, StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::META)
|
|
.ok_or_else(|| StorageError::RocksDb("META cf not found".into()))?;
|
|
|
|
match self
|
|
.store
|
|
.db()
|
|
.get_cf(&cf, meta_keys::VOTE)
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?
|
|
{
|
|
Some(bytes) => {
|
|
let vote: Vote = bincode::deserialize(&bytes)
|
|
.map_err(|e| StorageError::Serialization(e.to_string()))?;
|
|
Ok(Some(vote))
|
|
}
|
|
None => Ok(None),
|
|
}
|
|
}
|
|
|
|
/// Append log entries
|
|
pub fn append<D: Serialize>(&self, entries: &[LogEntry<D>]) -> Result<(), StorageError> {
|
|
if entries.is_empty() {
|
|
return Ok(());
|
|
}
|
|
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::LOGS)
|
|
.ok_or_else(|| StorageError::RocksDb("LOGS cf not found".into()))?;
|
|
|
|
let mut batch = WriteBatch::default();
|
|
|
|
for entry in entries {
|
|
let key = Self::encode_index(entry.log_id.index);
|
|
let value = bincode::serialize(entry)
|
|
.map_err(|e| StorageError::Serialization(e.to_string()))?;
|
|
batch.put_cf(&cf, key, value);
|
|
}
|
|
|
|
self.store
|
|
.db()
|
|
.write(batch)
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
|
|
debug!(
|
|
first = entries.first().map(|e| e.log_id.index),
|
|
last = entries.last().map(|e| e.log_id.index),
|
|
count = entries.len(),
|
|
"Appended log entries"
|
|
);
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Get log entries in a range
|
|
pub fn get_log_entries<D: for<'de> Deserialize<'de>>(
|
|
&self,
|
|
range: impl RangeBounds<LogIndex>,
|
|
) -> Result<Vec<LogEntry<D>>, StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::LOGS)
|
|
.ok_or_else(|| StorageError::RocksDb("LOGS cf not found".into()))?;
|
|
|
|
let start = match range.start_bound() {
|
|
std::ops::Bound::Included(&idx) => idx,
|
|
std::ops::Bound::Excluded(&idx) => idx + 1,
|
|
std::ops::Bound::Unbounded => 0,
|
|
};
|
|
|
|
let end = match range.end_bound() {
|
|
std::ops::Bound::Included(&idx) => Some(idx + 1),
|
|
std::ops::Bound::Excluded(&idx) => Some(idx),
|
|
std::ops::Bound::Unbounded => None,
|
|
};
|
|
|
|
let mut entries = Vec::new();
|
|
let iter = self.store.db().iterator_cf(
|
|
&cf,
|
|
rocksdb::IteratorMode::From(&Self::encode_index(start), rocksdb::Direction::Forward),
|
|
);
|
|
|
|
for item in iter {
|
|
let (key, value) = item.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
|
|
let idx = Self::decode_index(&key);
|
|
if let Some(end_idx) = end {
|
|
if idx >= end_idx {
|
|
break;
|
|
}
|
|
}
|
|
|
|
let entry: LogEntry<D> = bincode::deserialize(&value)
|
|
.map_err(|e| StorageError::Serialization(e.to_string()))?;
|
|
entries.push(entry);
|
|
}
|
|
|
|
trace!(start, ?end, count = entries.len(), "Get log entries");
|
|
Ok(entries)
|
|
}
|
|
|
|
/// Truncate log from the given index (inclusive)
|
|
pub fn truncate(&self, from_index: LogIndex) -> Result<(), StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::LOGS)
|
|
.ok_or_else(|| StorageError::RocksDb("LOGS cf not found".into()))?;
|
|
|
|
let mut batch = WriteBatch::default();
|
|
|
|
let iter = self.store.db().iterator_cf(
|
|
&cf,
|
|
rocksdb::IteratorMode::From(
|
|
&Self::encode_index(from_index),
|
|
rocksdb::Direction::Forward,
|
|
),
|
|
);
|
|
|
|
for item in iter {
|
|
let (key, _) = item.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
batch.delete_cf(&cf, key);
|
|
}
|
|
|
|
self.store
|
|
.db()
|
|
.write(batch)
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
|
|
debug!(from_index, "Truncated log");
|
|
Ok(())
|
|
}
|
|
|
|
/// Purge log entries up to the given index (inclusive)
|
|
pub fn purge(&self, up_to_index: LogIndex) -> Result<(), StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::LOGS)
|
|
.ok_or_else(|| StorageError::RocksDb("LOGS cf not found".into()))?;
|
|
|
|
// First, get the log ID of the entry we're purging to
|
|
let entries: Vec<LogEntry<Vec<u8>>> = self.get_log_entries(up_to_index..=up_to_index)?;
|
|
let last_purged = entries.first().map(|e| e.log_id);
|
|
|
|
let mut batch = WriteBatch::default();
|
|
|
|
let iter = self
|
|
.store
|
|
.db()
|
|
.iterator_cf(&cf, rocksdb::IteratorMode::Start);
|
|
|
|
for item in iter {
|
|
let (key, _) = item.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
let idx = Self::decode_index(&key);
|
|
if idx > up_to_index {
|
|
break;
|
|
}
|
|
batch.delete_cf(&cf, key);
|
|
}
|
|
|
|
// Save last purged log ID
|
|
if let Some(log_id) = last_purged {
|
|
let meta_cf = self
|
|
.store
|
|
.cf_handle(cf::META)
|
|
.ok_or_else(|| StorageError::RocksDb("META cf not found".into()))?;
|
|
let bytes = bincode::serialize(&log_id)
|
|
.map_err(|e| StorageError::Serialization(e.to_string()))?;
|
|
batch.put_cf(&meta_cf, b"last_purged", bytes);
|
|
}
|
|
|
|
self.store
|
|
.db()
|
|
.write(batch)
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
|
|
|
|
debug!(up_to_index, "Purged log");
|
|
Ok(())
|
|
}
|
|
|
|
/// Get last purged log ID
|
|
fn get_last_purged_log_id(&self) -> Result<Option<LogId>, StorageError> {
|
|
let cf = self
|
|
.store
|
|
.cf_handle(cf::META)
|
|
.ok_or_else(|| StorageError::RocksDb("META cf not found".into()))?;
|
|
|
|
match self
|
|
.store
|
|
.db()
|
|
.get_cf(&cf, b"last_purged")
|
|
.map_err(|e| StorageError::RocksDb(e.to_string()))?
|
|
{
|
|
Some(bytes) => {
|
|
if bytes.is_empty() {
|
|
return Ok(None);
|
|
}
|
|
match bincode::deserialize::<LogId>(&bytes) {
|
|
Ok(log_id) => Ok(Some(log_id)),
|
|
Err(e) => {
|
|
eprintln!("Warning: Failed to deserialize last_purged: {}, treating as None", e);
|
|
Ok(None)
|
|
}
|
|
}
|
|
}
|
|
None => Ok(None),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use tempfile::tempdir;
|
|
|
|
fn create_test_storage() -> LogStorage {
|
|
let dir = tempdir().unwrap();
|
|
let store = RocksStore::new(dir.path()).unwrap();
|
|
LogStorage::new(store)
|
|
}
|
|
|
|
#[test]
|
|
fn test_vote_persistence() {
|
|
let storage = create_test_storage();
|
|
|
|
let vote = Vote {
|
|
term: 5,
|
|
node_id: Some(1),
|
|
committed: true,
|
|
};
|
|
|
|
storage.save_vote(vote).unwrap();
|
|
let loaded = storage.read_vote().unwrap().unwrap();
|
|
|
|
assert_eq!(loaded.term, 5);
|
|
assert_eq!(loaded.node_id, Some(1));
|
|
assert!(loaded.committed);
|
|
}
|
|
|
|
#[test]
|
|
fn test_append_and_get_entries() {
|
|
let storage = create_test_storage();
|
|
|
|
let entries = vec![
|
|
LogEntry::<Vec<u8>>::blank(LogId::new(1, 1)),
|
|
LogEntry::normal(LogId::new(1, 2), b"data1".to_vec()),
|
|
LogEntry::normal(LogId::new(1, 3), b"data2".to_vec()),
|
|
];
|
|
|
|
storage.append(&entries).unwrap();
|
|
|
|
let loaded: Vec<LogEntry<Vec<u8>>> = storage.get_log_entries(1..=3).unwrap();
|
|
assert_eq!(loaded.len(), 3);
|
|
assert_eq!(loaded[0].log_id.index, 1);
|
|
assert_eq!(loaded[2].log_id.index, 3);
|
|
}
|
|
|
|
#[test]
|
|
fn test_log_state() {
|
|
let storage = create_test_storage();
|
|
|
|
// Initially empty
|
|
let state = storage.get_log_state().unwrap();
|
|
assert!(state.last_log_id.is_none());
|
|
|
|
// Add entries
|
|
let entries = vec![
|
|
LogEntry::<Vec<u8>>::blank(LogId::new(1, 1)),
|
|
LogEntry::normal(LogId::new(1, 2), b"data".to_vec()),
|
|
];
|
|
storage.append(&entries).unwrap();
|
|
|
|
let state = storage.get_log_state().unwrap();
|
|
assert_eq!(state.last_log_id, Some(LogId::new(1, 2)));
|
|
}
|
|
|
|
#[test]
|
|
fn test_truncate() {
|
|
let storage = create_test_storage();
|
|
|
|
let entries = vec![
|
|
LogEntry::<Vec<u8>>::blank(LogId::new(1, 1)),
|
|
LogEntry::normal(LogId::new(1, 2), b"data1".to_vec()),
|
|
LogEntry::normal(LogId::new(1, 3), b"data2".to_vec()),
|
|
LogEntry::normal(LogId::new(1, 4), b"data3".to_vec()),
|
|
];
|
|
storage.append(&entries).unwrap();
|
|
|
|
// Truncate from index 3
|
|
storage.truncate(3).unwrap();
|
|
|
|
let loaded: Vec<LogEntry<Vec<u8>>> = storage.get_log_entries(1..=4).unwrap();
|
|
assert_eq!(loaded.len(), 2);
|
|
assert_eq!(loaded.last().unwrap().log_id.index, 2);
|
|
}
|
|
|
|
#[test]
|
|
fn test_purge() {
|
|
let storage = create_test_storage();
|
|
|
|
let entries = vec![
|
|
LogEntry::<Vec<u8>>::blank(LogId::new(1, 1)),
|
|
LogEntry::normal(LogId::new(1, 2), b"data1".to_vec()),
|
|
LogEntry::normal(LogId::new(1, 3), b"data2".to_vec()),
|
|
LogEntry::normal(LogId::new(1, 4), b"data3".to_vec()),
|
|
];
|
|
storage.append(&entries).unwrap();
|
|
|
|
// Purge up to index 2
|
|
storage.purge(2).unwrap();
|
|
|
|
let loaded: Vec<LogEntry<Vec<u8>>> = storage.get_log_entries(1..=4).unwrap();
|
|
assert_eq!(loaded.len(), 2);
|
|
assert_eq!(loaded.first().unwrap().log_id.index, 3);
|
|
|
|
let state = storage.get_log_state().unwrap();
|
|
assert_eq!(state.last_purged_log_id, Some(LogId::new(1, 2)));
|
|
}
|
|
}
|