Skip to content

Commit

Permalink
net: convert sk_buff_fclones.fclone_ref from atomic_t to refcount_t
Browse files Browse the repository at this point in the history
refcount_t type and corresponding API should be
used instead of atomic_t when the variable is used as
a reference counter. This allows to avoid accidental
refcounter overflows that might lead to use-after-free
situations.

Signed-off-by: Elena Reshetova <elena.reshetova@intel.com>
Signed-off-by: Hans Liljestrand <ishkamiel@gmail.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Signed-off-by: David Windsor <dwindsor@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
ereshetova authored and davem330 committed Jul 1, 2017
1 parent 6335479 commit 2638595
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 7 deletions.
4 changes: 2 additions & 2 deletions include/linux/skbuff.h
Original file line number Diff line number Diff line change
Expand Up @@ -915,7 +915,7 @@ struct sk_buff_fclones {

struct sk_buff skb2;

atomic_t fclone_ref;
refcount_t fclone_ref;
};

/**
Expand All @@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones = container_of(skb, struct sk_buff_fclones, skb1);

return skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) > 1 &&
refcount_read(&fclones->fclone_ref) > 1 &&
fclones->skb2.sk == sk;
}

Expand Down
10 changes: 5 additions & 5 deletions net/core/skbuff.c
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,

kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1);
refcount_set(&fclones->fclone_ref, 1);

fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
Expand Down Expand Up @@ -629,15 +629,15 @@ static void kfree_skbmem(struct sk_buff *skb)
* This test would have no chance to be true for the clone,
* while here, branch prediction will be good.
*/
if (atomic_read(&fclones->fclone_ref) == 1)
if (refcount_read(&fclones->fclone_ref) == 1)
goto fastpath;
break;

default: /* SKB_FCLONE_CLONE */
fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
if (!atomic_dec_and_test(&fclones->fclone_ref))
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones);
Expand Down Expand Up @@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL;

if (skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) == 1) {
refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
atomic_set(&fclones->fclone_ref, 2);
refcount_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
Expand Down

0 comments on commit 2638595

Please sign in to comment.