Skip to content

Commit

Permalink
Fix refleaks
Browse files Browse the repository at this point in the history
  • Loading branch information
mgmacias95 committed May 20, 2023
1 parent f1a5090 commit 7fb58b0
Show file tree
Hide file tree
Showing 5 changed files with 32 additions and 9 deletions.
4 changes: 2 additions & 2 deletions Parser/pegen.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ int
_PyPegen_fill_token(Parser *p)
{
struct token new_token;
new_token.metadata = NULL;
_PyToken_Init(&new_token);
int type = _PyTokenizer_Get(p->tok, &new_token);

// Record and skip '# type: ignore' comments
Expand Down Expand Up @@ -251,7 +251,7 @@ _PyPegen_fill_token(Parser *p)
Token *t = p->tokens[p->fill];
return initialize_token(p, t, &new_token, type);
error:
Py_XDECREF(new_token.metadata);
_PyToken_Free(&new_token);
return -1;
}

Expand Down
4 changes: 2 additions & 2 deletions Parser/pegen_errors.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {

int ret = 0;
struct token new_token;
new_token.metadata = NULL;
_PyToken_Init(&new_token);

for (;;) {
switch (_PyTokenizer_Get(p->tok, &new_token)) {
Expand Down Expand Up @@ -193,7 +193,7 @@ _PyPegen_tokenize_full_source_to_check_for_errors(Parser *p) {


exit:
Py_XDECREF(new_token.metadata);
_PyToken_Free(&new_token);
// If we're in an f-string, we want the syntax error in the expression part
// to propagate, so that tokenizer errors (like expecting '}') that happen afterwards
// do not swallow it.
Expand Down
15 changes: 15 additions & 0 deletions Parser/tokenizer.c
Original file line number Diff line number Diff line change
Expand Up @@ -982,6 +982,16 @@ _PyTokenizer_Free(struct tok_state *tok)
PyMem_Free(tok);
}

void
_PyToken_Free(struct token *token) {
Py_XDECREF(token->metadata);
}

void
_PyToken_Init(struct token *token) {
token->metadata = NULL;
}

static int
tok_readline_raw(struct tok_state *tok)
{
Expand Down Expand Up @@ -1973,6 +1983,7 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t

struct tok_state ahead_tok;
struct token ahead_token;
_PyToken_Init(&ahead_token);
int ahead_tok_kind;

memcpy(&ahead_tok, tok, sizeof(ahead_tok));
Expand All @@ -1988,8 +1999,10 @@ tok_get_normal_mode(struct tok_state *tok, tokenizer_mode* current_tok, struct t
returning a plain NAME token, return ASYNC. */
tok->async_def_indent = tok->indent;
tok->async_def = 1;
_PyToken_Free(&ahead_token);
return MAKE_TOKEN(ASYNC);
}
_PyToken_Free(&ahead_token);
}
}

Expand Down Expand Up @@ -2823,7 +2836,9 @@ _PyTokenizer_FindEncodingFilename(int fd, PyObject *filename)
// if fetching the encoding shows a warning.
tok->report_warnings = 0;
while (tok->lineno < 2 && tok->done == E_OK) {
_PyToken_Init(&token);
_PyTokenizer_Get(tok, &token);
_PyToken_Free(&token);
}
fclose(fp);
if (tok->encoding) {
Expand Down
2 changes: 2 additions & 0 deletions Parser/tokenizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,8 @@ extern struct tok_state *_PyTokenizer_FromUTF8(const char *, int);
extern struct tok_state *_PyTokenizer_FromFile(FILE *, const char*,
const char *, const char *);
extern void _PyTokenizer_Free(struct tok_state *);
extern void _PyToken_Free(struct token *);
extern void _PyToken_Init(struct token *);
extern int _PyTokenizer_Get(struct tok_state *, struct token *);

#define tok_dump _Py_tok_dump
Expand Down
16 changes: 11 additions & 5 deletions Python/Python-tokenize.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,18 +162,21 @@ _tokenizer_error(struct tok_state *tok)
static PyObject *
tokenizeriter_next(tokenizeriterobject *it)
{
PyObject* result = NULL;
struct token token;
_PyToken_Init(&token);

int type = _PyTokenizer_Get(it->tok, &token);
if (type == ERRORTOKEN) {
if(!PyErr_Occurred()) {
_tokenizer_error(it->tok);
assert(PyErr_Occurred());
}
return NULL;
goto exit;
}
if (type == ERRORTOKEN || type == ENDMARKER) {
PyErr_SetString(PyExc_StopIteration, "EOF");
return NULL;
goto exit;
}
PyObject *str = NULL;
if (token.start == NULL || token.end == NULL) {
Expand All @@ -183,14 +186,14 @@ tokenizeriter_next(tokenizeriterobject *it)
str = PyUnicode_FromStringAndSize(token.start, token.end - token.start);
}
if (str == NULL) {
return NULL;
goto exit;
}

Py_ssize_t size = it->tok->inp - it->tok->buf;
PyObject *line = PyUnicode_DecodeUTF8(it->tok->buf, size, "replace");
if (line == NULL) {
Py_DECREF(str);
return NULL;
goto exit;
}
const char *line_start = ISSTRINGLIT(type) ? it->tok->multi_line_start : it->tok->line_start;
Py_ssize_t lineno = ISSTRINGLIT(type) ? it->tok->first_lineno : it->tok->lineno;
Expand All @@ -204,7 +207,10 @@ tokenizeriter_next(tokenizeriterobject *it)
end_col_offset = _PyPegen_byte_offset_to_character_offset(line, token.end - it->tok->line_start);
}

return Py_BuildValue("(NinnnnN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
result = Py_BuildValue("(NinnnnN)", str, type, lineno, end_lineno, col_offset, end_col_offset, line);
exit:
_PyToken_Free(&token);
return result;
}

static void
Expand Down

0 comments on commit 7fb58b0

Please sign in to comment.