Skip to content

Commit

Permalink
Merge pull request from GHSA-9h2v-5p8c-4wrr
Browse files Browse the repository at this point in the history
fix state exhaustion vulnerability
  • Loading branch information
kazuho committed Dec 12, 2023
2 parents 43f86e5 + 4b2f102 commit db5d54a
Showing 1 changed file with 33 additions and 51 deletions.
84 changes: 33 additions & 51 deletions lib/quicly.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,6 @@ struct st_quicly_cipher_context_t {
ptls_cipher_context_t *header_protection;
};

struct st_quicly_pending_path_challenge_t {
struct st_quicly_pending_path_challenge_t *next;
uint8_t is_response;
uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN];
};

struct st_quicly_pn_space_t {
/**
* acks to be sent to remote peer
Expand Down Expand Up @@ -279,8 +273,9 @@ struct st_quicly_conn_t {
*
*/
struct {
struct st_quicly_pending_path_challenge_t *head, **tail_ref;
} path_challenge;
uint8_t send_;
uint8_t data[QUICLY_PATH_CHALLENGE_DATA_LEN];
} path_response;
/**
*
*/
Expand Down Expand Up @@ -936,25 +931,6 @@ void quicly_stream_sync_recvbuf(quicly_stream_t *stream, size_t shift_amount)
}
}

static int schedule_path_challenge_frame(quicly_conn_t *conn, int is_response, const uint8_t *data)
{
struct st_quicly_pending_path_challenge_t *pending;

if ((pending = malloc(sizeof(struct st_quicly_pending_path_challenge_t))) == NULL)
return PTLS_ERROR_NO_MEMORY;

pending->next = NULL;
pending->is_response = is_response;
memcpy(pending->data, data, QUICLY_PATH_CHALLENGE_DATA_LEN);

*conn->egress.path_challenge.tail_ref = pending;
conn->egress.path_challenge.tail_ref = &pending->next;

conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;

return 0;
}

/**
* calculate how many CIDs we provide to the remote peer
*/
Expand Down Expand Up @@ -1699,11 +1675,6 @@ void quicly_free(quicly_conn_t *conn)
quicly_maxsender_dispose(&conn->ingress.max_data.sender);
quicly_maxsender_dispose(&conn->ingress.max_streams.uni);
quicly_maxsender_dispose(&conn->ingress.max_streams.bidi);
while (conn->egress.path_challenge.head != NULL) {
struct st_quicly_pending_path_challenge_t *pending = conn->egress.path_challenge.head;
conn->egress.path_challenge.head = pending->next;
free(pending);
}
quicly_loss_dispose(&conn->egress.loss);

kh_destroy(quicly_stream_t, conn->streams);
Expand Down Expand Up @@ -2248,7 +2219,6 @@ static quicly_conn_t *create_connection(quicly_context_t *ctx, uint32_t protocol
conn->egress.max_udp_payload_size = conn->super.ctx->initial_egress_max_udp_payload_size;
init_max_streams(&conn->egress.max_streams.uni);
init_max_streams(&conn->egress.max_streams.bidi);
conn->egress.path_challenge.tail_ref = &conn->egress.path_challenge.head;
conn->egress.ack_frequency.update_at = INT64_MAX;
conn->egress.send_ack_at = INT64_MAX;
conn->super.ctx->init_cc->cb(conn->super.ctx->init_cc, &conn->egress.cc, initcwnd, conn->stash.now);
Expand Down Expand Up @@ -4487,6 +4457,25 @@ static int send_retire_connection_id(quicly_conn_t *conn, quicly_send_context_t
return 0;
}

static int send_path_challenge(quicly_conn_t *conn, quicly_send_context_t *s, int is_response, const uint8_t *data)
{
int ret;

if ((ret = do_allocate_frame(conn, s, QUICLY_PATH_CHALLENGE_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_ACK_ELICITING_NO_CC)) != 0)
return ret;

s->dst = quicly_encode_path_challenge_frame(s->dst, is_response, data);
s->target.full_size = 1; /* ensure that the path can transfer full-size packets */

if (!is_response) {
++conn->super.stats.num_frames_sent.path_challenge;
} else {
++conn->super.stats.num_frames_sent.path_response;
}

return 0;
}

static int update_traffic_key_cb(ptls_update_traffic_key_t *self, ptls_t *tls, int is_enc, size_t epoch, const void *secret)
{
quicly_conn_t *conn = *ptls_get_data_ptr(tls);
Expand Down Expand Up @@ -4592,23 +4581,10 @@ static int send_other_control_frames(quicly_conn_t *conn, quicly_send_context_t
int ret;

/* respond to all pending received PATH_CHALLENGE frames */
if (conn->egress.path_challenge.head != NULL) {
do {
struct st_quicly_pending_path_challenge_t *c = conn->egress.path_challenge.head;
if ((ret = do_allocate_frame(conn, s, QUICLY_PATH_CHALLENGE_FRAME_CAPACITY, ALLOCATE_FRAME_TYPE_NON_ACK_ELICITING)) !=
0)
return ret;
s->dst = quicly_encode_path_challenge_frame(s->dst, c->is_response, c->data);
if (c->is_response) {
++conn->super.stats.num_frames_sent.path_response;
} else {
++conn->super.stats.num_frames_sent.path_challenge;
}
conn->egress.path_challenge.head = c->next;
free(c);
} while (conn->egress.path_challenge.head != NULL);
conn->egress.path_challenge.tail_ref = &conn->egress.path_challenge.head;
s->target.full_size = 1; /* datagrams carrying PATH_CHALLENGE / PATH_RESPONSE have to be full-sized */
if (conn->egress.path_response.send_) {
if ((ret = send_path_challenge(conn, s, 1, conn->egress.path_response.data)) != 0)
return ret;
conn->egress.path_response.send_ = 0;
}

/* MAX_STREAMS */
Expand Down Expand Up @@ -5552,7 +5528,13 @@ static int handle_path_challenge_frame(quicly_conn_t *conn, struct st_quicly_han

if ((ret = quicly_decode_path_challenge_frame(&state->src, state->end, &frame)) != 0)
return ret;
return schedule_path_challenge_frame(conn, 1, frame.data);

/* schedule the emission of PATH_RESPONSE frame */
memcpy(conn->egress.path_response.data, frame.data, QUICLY_PATH_CHALLENGE_DATA_LEN);
conn->egress.path_response.send_ = 1;
conn->egress.pending_flows |= QUICLY_PENDING_FLOW_OTHERS_BIT;

return 0;
}

static int handle_path_response_frame(quicly_conn_t *conn, struct st_quicly_handle_payload_state_t *state)
Expand Down

0 comments on commit db5d54a

Please sign in to comment.