Skip to content

Commit

Permalink
Merge #386
Browse files Browse the repository at this point in the history
386: [Closes #383] Refactoring bio.rs and log.rs r=jeehoonkang a=Medowhill

Closes #383

* `read_head`에서 `buf_unforget` 대신 `disk.read` 사용. `Arena::unforget` 삭제.
* `Buf`가 `ManuallyDrop<BufUnlocked>`를 가지게 있게 하여 `transmute` 없이 `unlock` 구현.
* 필요없는 `unsafe` 제거. `unsafe` 블록에 주석 추가.
* 가능한 경우 포인터 대신 slice 사용.

Co-authored-by: Jaemin Hong <hjm0901@gmail.com>
  • Loading branch information
kaist-cp-bors[bot] and Medowhill authored Feb 2, 2021
2 parents a15fa0c + a278468 commit 5a78547
Show file tree
Hide file tree
Showing 7 changed files with 155 additions and 201 deletions.
47 changes: 0 additions & 47 deletions kernel-rs/src/arena.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,6 @@ pub trait Arena: Sized {
/// The guard type for arena.
type Guard<'s>;

/// Creates handle from condition without increasing reference count.
fn unforget<C: Fn(&Self::Data) -> bool>(&self, c: C) -> Option<Self::Handle>;

/// Find or alloc.
fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
Expand Down Expand Up @@ -122,21 +119,6 @@ impl<T: 'static + ArenaObject, const CAPACITY: usize> Arena for Spinlock<ArrayAr
type Handle = ArrayPtr<T>;
type Guard<'s> = SpinlockGuard<'s, ArrayArena<T, CAPACITY>>;

fn unforget<C: Fn(&Self::Data) -> bool>(&self, c: C) -> Option<Self::Handle> {
let mut this = self.lock();

for entry in &mut this.entries {
if entry.refcnt != 0 && c(&entry.data) {
return Some(Self::Handle {
ptr: entry,
_marker: PhantomData,
});
}
}

None
}

fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
Expand Down Expand Up @@ -283,29 +265,6 @@ impl<T: 'static + ArenaObject, const CAPACITY: usize> Arena for Spinlock<MruAren
type Handle = MruPtr<T>;
type Guard<'s> = SpinlockGuard<'s, MruArena<T, CAPACITY>>;

fn unforget<C: Fn(&Self::Data) -> bool>(&self, c: C) -> Option<Self::Handle> {
let this = self.lock();

// Is the block already cached?
let mut list_entry = this.head.next();
while list_entry as *const _ != &this.head as *const _ {
let entry = unsafe {
&mut *((list_entry as *const _ as usize - Self::LIST_ENTRY_OFFSET)
as *mut MruEntry<T>)
};
if c(&entry.data) {
debug_assert!(entry.refcnt != 0);
return Some(Self::Handle {
ptr: entry,
_marker: PhantomData,
});
}
list_entry = list_entry.next();
}

None
}

fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
Expand Down Expand Up @@ -453,12 +412,6 @@ impl<A: Arena, T: Clone + Deref<Target = A>> Arena for T {
type Handle = Rc<A, T>;
type Guard<'s> = <A as Arena>::Guard<'s>;

fn unforget<C: Fn(&Self::Data) -> bool>(&self, c: C) -> Option<Self::Handle> {
let tag = self.clone();
let inner = ManuallyDrop::new(tag.deref().unforget(c)?);
Some(Self::Handle { tag, inner })
}

fn find_or_alloc<C: Fn(&Self::Data) -> bool, N: FnOnce(&mut Self::Data)>(
&self,
c: C,
Expand Down
70 changes: 30 additions & 40 deletions kernel-rs/src/bio.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::{
};

use array_macro::array;
use core::mem;
use core::mem::{self, ManuallyDrop};
use core::ops::{Deref, DerefMut};

pub struct BufEntry {
Expand Down Expand Up @@ -93,48 +93,51 @@ impl BufInner {

pub type Bcache = Spinlock<MruArena<BufEntry, NBUF>>;

/// We can consider it as BufEntry.
pub type BufUnlocked<'s> = Rc<Bcache, &'s Bcache>;

/// # Safety
///
/// (inner: BufEntry).inner is locked.
pub struct Buf<'s> {
inner: BufUnlocked<'s>,
}

impl<'s> Deref for Buf<'s> {
type Target = BufUnlocked<'s>;

fn deref(&self) -> &Self::Target {
&self.inner
}
}

impl DerefMut for Buf<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
inner: ManuallyDrop<BufUnlocked<'s>>,
}

impl<'s> Buf<'s> {
pub fn deref_inner(&self) -> &BufInner {
// It is safe becuase inner.inner is locked.
unsafe { self.inner.inner.get_mut_unchecked() }
}

pub fn deref_inner_mut(&mut self) -> &mut BufInner {
// It is safe becuase inner.inner is locked and &mut self is exclusive.
unsafe { self.inner.inner.get_mut_unchecked() }
}

pub fn unlock(self) -> BufUnlocked<'s> {
unsafe {
self.inner.inner.unlock();
mem::transmute(self)
}
pub fn unlock(mut self) -> BufUnlocked<'s> {
// It is safe because this method consumes self and self.inner will not
// be used again.
let inner = unsafe { ManuallyDrop::take(&mut self.inner) };
// It is safe because this method consumes self.
unsafe { inner.inner.unlock() };
mem::forget(self);
inner
}
}

impl Deref for Buf<'_> {
type Target = BufEntry;

fn deref(&self) -> &Self::Target {
&self.inner
}
}

impl Drop for Buf<'_> {
fn drop(&mut self) {
unsafe {
self.inner.inner.unlock();
}
// It is safe because self will be dropped and self.inner will not be
// used again.
unsafe { ManuallyDrop::take(&mut self.inner).inner.unlock() };
}
}

Expand All @@ -161,26 +164,13 @@ impl Bcache {

unsafe { Rc::from_unchecked(self, inner) }
}

/// Retrieves BufUnlocked without increasing reference count.
pub fn buf_unforget(&self, dev: u32, blockno: u32) -> Option<BufUnlocked<'_>> {
let inner = self.unforget(|buf| buf.dev == dev && buf.blockno == blockno)?;

Some(unsafe { Rc::from_unchecked(self, inner) })
}
}

impl<'s> BufUnlocked<'s> {
pub fn lock(self) -> Buf<'s> {
mem::forget(self.inner.lock());
Buf { inner: self }
}

pub fn deref_inner(&self) -> &BufInner {
unsafe { self.inner.get_mut_unchecked() }
}

pub fn deref_mut_inner(&mut self) -> &mut BufInner {
unsafe { self.inner.get_mut_unchecked() }
Buf {
inner: ManuallyDrop::new(self),
}
}
}
46 changes: 22 additions & 24 deletions kernel-rs/src/fs/inode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ impl InodeGuard<'_> {
// * dip is inside bp.data.
// * dip will not be read.
let dip = unsafe {
&mut *(bp.deref_mut_inner().data.as_mut_ptr() as *mut Dinode)
&mut *(bp.deref_inner_mut().data.as_mut_ptr() as *mut Dinode)
.add(self.inum as usize % IPB)
};

Expand Down Expand Up @@ -342,11 +342,11 @@ impl InodeGuard<'_> {
}
}

dip.nlink = inner.nlink;
dip.size = inner.size;
dip.addr_direct.copy_from_slice(&inner.addr_direct);
dip.addr_indirect = inner.addr_indirect;
unsafe { tx.write(bp) };
(*dip).nlink = inner.nlink;
(*dip).size = inner.size;
(*dip).addr_direct.copy_from_slice(&inner.addr_direct);
(*dip).addr_indirect = inner.addr_indirect;
tx.write(bp);
}

/// Truncate inode (discard contents).
Expand All @@ -355,7 +355,7 @@ impl InodeGuard<'_> {
let dev = self.dev;
for addr in &mut self.deref_inner_mut().addr_direct {
if *addr != 0 {
unsafe { tx.bfree(dev, *addr) };
tx.bfree(dev, *addr);
*addr = 0;
}
}
Expand All @@ -366,15 +366,15 @@ impl InodeGuard<'_> {
.disk
.read(dev, self.deref_inner().addr_indirect);
// It is safe because u32 does not have internal structure.
let (prefix, data, _) = unsafe { bp.deref_mut_inner().data.align_to_mut::<u32>() };
let (prefix, data, _) = unsafe { bp.deref_inner_mut().data.align_to_mut::<u32>() };
debug_assert_eq!(prefix.len(), 0, "itrunc: Buf data unaligned");
for a in data {
if *a != 0 {
unsafe { tx.bfree(dev, *a) };
tx.bfree(dev, *a);
}
}
drop(bp);
unsafe { tx.bfree(dev, self.deref_inner().addr_indirect) };
tx.bfree(dev, self.deref_inner().addr_indirect);
self.deref_inner_mut().addr_indirect = 0
}

Expand Down Expand Up @@ -449,14 +449,14 @@ impl InodeGuard<'_> {
}
let mut tot: u32 = 0;
while tot < n {
let mut bp = kernel()
let bp = kernel()
.file_system
.disk
.read(self.dev, self.bmap(off as usize / BSIZE));
let m = core::cmp::min(n - tot, BSIZE as u32 - off % BSIZE as u32);
let begin = (off % BSIZE as u32) as usize;
let end = begin + m as usize;
f(tot, &bp.deref_mut_inner().data[begin..end])?;
f(tot, &bp.deref_inner().data[begin..end])?;
tot += m;
off += m;
}
Expand Down Expand Up @@ -563,12 +563,10 @@ impl InodeGuard<'_> {
let m = core::cmp::min(n - tot, BSIZE as u32 - off % BSIZE as u32);
let begin = (off % BSIZE as u32) as usize;
let end = begin + m as usize;
if f(tot, &mut bp.deref_mut_inner().data[begin..end]).is_err() {
if f(tot, &mut bp.deref_inner_mut().data[begin..end]).is_err() {
break;
}
unsafe {
tx.write(bp);
}
tx.write(bp);
tot += m;
off += m;
}
Expand Down Expand Up @@ -606,7 +604,7 @@ impl InodeGuard<'_> {
if bn < NDIRECT {
let mut addr = inner.addr_direct[bn];
if addr == 0 {
addr = unsafe { tx_opt.expect("bmap: out of range").balloc(self.dev) };
addr = tx_opt.expect("bmap: out of range").balloc(self.dev);
self.deref_inner_mut().addr_direct[bn] = addr;
}
addr
Expand All @@ -616,19 +614,19 @@ impl InodeGuard<'_> {

let mut indirect = inner.addr_indirect;
if indirect == 0 {
indirect = unsafe { tx_opt.expect("bmap: out of range").balloc(self.dev) };
indirect = tx_opt.expect("bmap: out of range").balloc(self.dev);
self.deref_inner_mut().addr_indirect = indirect;
}

let mut bp = kernel().file_system.disk.read(self.dev, indirect);
let (prefix, data, _) = unsafe { bp.deref_mut_inner().data.align_to_mut::<u32>() };
let (prefix, data, _) = unsafe { bp.deref_inner_mut().data.align_to_mut::<u32>() };
debug_assert_eq!(prefix.len(), 0, "bmap: Buf data unaligned");
let mut addr = data[bn];
if addr == 0 {
let tx = tx_opt.expect("bmap: out of range");
addr = unsafe { tx.balloc(self.dev) };
addr = tx.balloc(self.dev);
data[bn] = addr;
unsafe { tx.write(bp) };
tx.write(bp);
}
addr
}
Expand Down Expand Up @@ -706,7 +704,7 @@ impl Inode {

// It is safe because dip is inside bp.data.
let dip = unsafe {
(bp.deref_mut_inner().data.as_mut_ptr() as *mut Dinode)
(bp.deref_inner_mut().data.as_mut_ptr() as *mut Dinode)
.add(self.inum as usize % IPB)
};
// It is safe because i16 does not have internal structure.
Expand Down Expand Up @@ -809,7 +807,7 @@ impl Itable {
const_assert!(mem::align_of::<BufData>() % mem::align_of::<Dinode>() == 0);
// It is safe because dip is inside bp.data.
let dip = unsafe {
(bp.deref_mut_inner().data.as_mut_ptr() as *mut Dinode).add(inum as usize % IPB)
(bp.deref_inner_mut().data.as_mut_ptr() as *mut Dinode).add(inum as usize % IPB)
};
// It is safe because i16 does not have internal structure.
let t = unsafe { *(dip as *const i16) };
Expand All @@ -833,7 +831,7 @@ impl Itable {
}

// mark it allocated on the disk
unsafe { tx.write(bp) };
tx.write(bp);
return self.get_inode(dev, inum);
}
}
Expand Down
Loading

0 comments on commit 5a78547

Please sign in to comment.