Skip to content

Commit

Permalink
checkpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
choppsv1 committed Sep 9, 2023
1 parent 4068fb2 commit 931ebd1
Showing 1 changed file with 52 additions and 14 deletions.
66 changes: 52 additions & 14 deletions net/xfrm/xfrm_iptfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1798,6 +1798,26 @@ static struct sk_buff *iptfs_alloc_header_skb(void)
}
#endif

/**
* Allocate an skb to hold the headers for a cloned data skb
*/
static struct sk_buff *iptfs_alloc_header_for_skb(struct sk_buff *data_skb,
uint data_len)
{
struct sk_buff *skb;

skb = alloc_skb(extralen + XFRM_IPTFS_MIN_HEADROOM, GFP_ATOMIC);
if (!skb) {
XFRM_INC_STATS(dev_net(skb->dev), LINUX_MIB_XFRMINERROR);
pr_err_ratelimited("failed to alloc skb\n");
return NULL;
}
pr_devinf("resv=%u data_len=%u skb %p\n", resv, data_len, skb);
skb_reserve(skb, resv);
return skb;
}
#endif

static int iptfs_xfrm_output(struct sk_buff *skb, uint remaining)
{
int err;
Expand Down Expand Up @@ -1964,11 +1984,38 @@ static int iptfs_copy_create_frags(struct sk_buff **skbp,
return 0;
}

static bool iptfs_first_should_copy(struct sk_buff *first_skb, uint mtu)
{
uint frag_copy_max;

/* If we have less than frag_copy_max for remaining packet we copy
* those tail bytes as it is more efficient.
*/
frag_copy_max = mtu <= IPTFS_FRAG_COPY_MAX ? mtu : IPTFS_FRAG_COPY_MAX;
if (first_skb->len - mtu < frag_copy_max)
return true;

/* We actually want to use our nice clone algorithm here */
if (!skb_is_nonlinear(first_skb))
return false;

/* If we have skb fragment lists, as these should be uncommon and
* dealing with lists and page fragments is not worth the complexity
* given that.
*/
if (skb_shinfo(first_skb)->frag_list)
return true;

/* Here we have nr_frags which we can share with split and share */

/* For now we also copy under all other conditions */
return true;
}

static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs,
uint mtu)
{
struct sk_buff *skb = *skbp;
uint frag_copy_max;
int err;

/*
Expand Down Expand Up @@ -2004,20 +2051,9 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs,

BUG_ON(xtfs->cfg.dont_frag);

/* If we have less than frag_copy_max for remaining packet we copy
* those tail bytes as it is more efficient.
*
* Also if we have skb fragment lists, as these should be uncommon and
* dealing with lists and page fragments is not worth the complexity
* given that.
*/

frag_copy_max = mtu <= IPTFS_FRAG_COPY_MAX ? mtu : IPTFS_FRAG_COPY_MAX;
if (1 || (skb->len - mtu < frag_copy_max) ||
(skb_is_nonlinear(skb) && skb_shinfo(skb)->frag_list)) {
/* create frags with copies */
if (iptfs_first_should_copy(skb, mtu))
return iptfs_copy_create_frags(skbp, xtfs, mtu);
}

#if 0

/* A user packet has come in on from an interface with larger
Expand Down Expand Up @@ -2045,6 +2081,7 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs,
* testing, if this is a problem try really complex page sharing
* thing if we have to. This is not a common code path, though.
*/
/* XXX we don't want to be here with non-linear for clones below */
if (skb_is_nonlinear(skb)) {
pr_info_once("LINEARIZE: skb len %u\n", skb->len);
err = __skb_linearize(skb);
Expand Down Expand Up @@ -2075,6 +2112,7 @@ static int iptfs_first_skb(struct sk_buff **skbp, struct xfrm_iptfs_data *xtfs,
head_skb->dev = skb->dev;
memcpy(head_skb->cb, skb->cb, sizeof(skb->cb));
skb_dst_copy(head_skb, skb);
/* XXX need to inc secpath refcnt */
__skb_ext_copy(head_skb, skb);
__nf_copy(head_skb, skb, false);
}
Expand Down

0 comments on commit 931ebd1

Please sign in to comment.