Skip to content

Commit

Permalink
add jumpstart to cubic
Browse files Browse the repository at this point in the history
  • Loading branch information
kazuho committed Nov 6, 2023
1 parent 0e54658 commit 9b23a60
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 10 deletions.
3 changes: 2 additions & 1 deletion include/quicly/cc.h
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,8 @@ inline void quicly_cc_jumpstart_on_first_loss(quicly_cc_t *cc, uint64_t lost_pn,
cc->cwnd = cc->cwnd_initial;
if (cc->jumpstart.exit_pn == UINT64_MAX)
cc->jumpstart.exit_pn = lost_pn;
*beta = 1; /* jumpstart makes accurate guess of CWND - there is no need to reduce CWND */
if (beta != NULL)
*beta = 1; /* jumpstart makes accurate guess of CWND - there is no need to reduce CWND */
}
}

Expand Down
19 changes: 15 additions & 4 deletions lib/cc-cubic.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,9 +65,13 @@ static void cubic_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t
uint64_t next_pn, int64_t now, uint32_t max_udp_payload_size)
{
assert(inflight >= bytes);
/* Do not increase congestion window while in recovery. */
if (largest_acked < cc->recovery_end)
/* Do not increase congestion window while in recovery (but jumpstart may do something different). */
if (largest_acked < cc->recovery_end) {
quicly_cc_jumpstart_on_acked(cc, 1, bytes, largest_acked, inflight, next_pn);
return;
}

quicly_cc_jumpstart_on_acked(cc, 0, bytes, largest_acked, inflight, next_pn);

/* Slow start. */
if (cc->cwnd < cc->ssthresh) {
Expand Down Expand Up @@ -114,6 +118,10 @@ static void cubic_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t b
return;
cc->recovery_end = next_pn;

/* if detected loss before receiving all acks for jumpstart, restore original CWND */
if (cc->ssthresh == UINT32_MAX)
quicly_cc_jumpstart_on_first_loss(cc, lost_pn, NULL /* do we want to adopt beta == 1 as other CCs do? */);

++cc->num_loss_episodes;
if (cc->cwnd_exiting_slow_start == 0)
cc->cwnd_exiting_slow_start = cc->cwnd;
Expand Down Expand Up @@ -168,6 +176,8 @@ static void cubic_reset(quicly_cc_t *cc, uint32_t initcwnd)
cc->cwnd = cc->cwnd_initial = cc->cwnd_maximum = initcwnd;
cc->ssthresh = cc->cwnd_minimum = UINT32_MAX;
cc->pacer_multiplier = QUICLY_PACER_CALC_MULTIPLIER(2);

quicly_cc_jumpstart_reset(cc);
}

static int cubic_on_switch(quicly_cc_t *cc)
Expand All @@ -193,6 +203,7 @@ static void cubic_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwn
cubic_reset(cc, initcwnd);
}

quicly_cc_type_t quicly_cc_type_cubic = {
"cubic", &quicly_cc_cubic_init, cubic_on_acked, cubic_on_lost, cubic_on_persistent_congestion, cubic_on_sent, cubic_on_switch};
quicly_cc_type_t quicly_cc_type_cubic = {"cubic", &quicly_cc_cubic_init, cubic_on_acked,
cubic_on_lost, cubic_on_persistent_congestion, cubic_on_sent,
cubic_on_switch, quicly_cc_jumpstart_enter};
quicly_init_cc_t quicly_cc_cubic_init = {cubic_init};
25 changes: 20 additions & 5 deletions lib/cc-pico.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,13 @@ static void pico_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t b
{
assert(inflight >= bytes);

/* Do not increase congestion window while in recovery. */
if (largest_acked < cc->recovery_end)
/* Do not increase congestion window while in recovery (but jumpstart may do something different). */
if (largest_acked < cc->recovery_end) {
quicly_cc_jumpstart_on_acked(cc, 1, bytes, largest_acked, inflight, next_pn);
return;
}

quicly_cc_jumpstart_on_acked(cc, 0, bytes, largest_acked, inflight, next_pn);

cc->state.pico.stash += bytes;

Expand Down Expand Up @@ -96,6 +100,10 @@ static void pico_on_acked(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t b
static void pico_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t bytes, uint64_t lost_pn, uint64_t next_pn,
int64_t now, uint32_t max_udp_payload_size)
{
/* when exiting slow start, use inverse of exponential growth ratio, as loss is detected 1 RTT later, at which point CWND has
* overshot as much as the growth ratio */
double beta = cc->ssthresh == UINT32_MAX ? 0.5 : QUICLY_RENO_BETA;

quicly_cc__update_ecn_episodes(cc, bytes, lost_pn);

/* Nothing to do if loss is in recovery window. */
Expand All @@ -106,6 +114,10 @@ static void pico_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t by
/* switch pacer to congestion avoidance mode the moment loss is observed */
cc->pacer_multiplier = QUICLY_PACER_CALC_MULTIPLIER(1.2);

/* if detected loss before receiving all acks for jumpstart, restore original CWND */
if (cc->ssthresh == UINT32_MAX)
quicly_cc_jumpstart_on_first_loss(cc, lost_pn, &beta);

++cc->num_loss_episodes;
if (cc->cwnd_exiting_slow_start == 0)
cc->cwnd_exiting_slow_start = cc->cwnd;
Expand All @@ -114,7 +126,7 @@ static void pico_on_lost(quicly_cc_t *cc, const quicly_loss_t *loss, uint32_t by
cc->state.pico.bytes_per_mtu_increase = calc_bytes_per_mtu_increase(cc->cwnd, loss->rtt.smoothed, max_udp_payload_size);

/* Reduce congestion window. */
cc->cwnd *= QUICLY_RENO_BETA;
cc->cwnd *= beta;
if (cc->cwnd < QUICLY_MIN_CWND * max_udp_payload_size)
cc->cwnd = QUICLY_MIN_CWND * max_udp_payload_size;
cc->ssthresh = cc->cwnd;
Expand Down Expand Up @@ -151,6 +163,8 @@ static void pico_reset(quicly_cc_t *cc, uint32_t initcwnd)
.pacer_multiplier = QUICLY_PACER_CALC_MULTIPLIER(2),
};
pico_init_pico_state(cc, 0);

quicly_cc_jumpstart_reset(cc);
}

static int pico_on_switch(quicly_cc_t *cc)
Expand Down Expand Up @@ -180,6 +194,7 @@ static void pico_init(quicly_init_cc_t *self, quicly_cc_t *cc, uint32_t initcwnd
pico_reset(cc, initcwnd);
}

quicly_cc_type_t quicly_cc_type_pico = {
"pico", &quicly_cc_pico_init, pico_on_acked, pico_on_lost, pico_on_persistent_congestion, pico_on_sent, pico_on_switch};
quicly_cc_type_t quicly_cc_type_pico = {"pico", &quicly_cc_pico_init, pico_on_acked,
pico_on_lost, pico_on_persistent_congestion, pico_on_sent,
pico_on_switch, quicly_cc_jumpstart_enter};
quicly_init_cc_t quicly_cc_pico_init = {pico_init};

0 comments on commit 9b23a60

Please sign in to comment.