Skip to content

Commit

Permalink
Handle escaped braces in f-strings
Browse files Browse the repository at this point in the history
To use a curly brace in an f-string, you must escape it. For example:

  >>> k = 1
  >>> f'{{{k}'
  '{1'

Saving this as a script and running the 'tokenize' module highlights
something odd around the counting of tokens:

  ❯ python -m tokenize wow.py
  0,0-0,0:            ENCODING       'utf-8'
  1,0-1,1:            NAME           'k'
  1,2-1,3:            OP             '='
  1,4-1,5:            NUMBER         '1'
  1,5-1,6:            NEWLINE        '\n'
  2,0-2,2:            FSTRING_START  "f'"
  2,2-2,3:            FSTRING_MIDDLE '{'     # <-- here...
  2,4-2,5:            OP             '{'     # <-- and here
  2,5-2,6:            NAME           'k'
  2,6-2,7:            OP             '}'
  2,7-2,8:            FSTRING_END    "'"
  2,8-2,9:            NEWLINE        '\n'
  3,0-3,0:            ENDMARKER      ''

The FSTRING_MIDDLE character we have is the escaped/post-parse single
curly brace rather than the raw double curly brace, however, while our
end index of this token accounts for the parsed form, the start index of
the next token does not (put another way, it jumps from 3 -> 4). This
triggers some existing, unrelated code that we need to bypass. Do just
that.

Signed-off-by: Stephen Finucane <stephen@that.guru>
Closes: #1948
  • Loading branch information
stephenfin authored and asottile committed Aug 4, 2024
1 parent 2a811cc commit bdcd5c2
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 1 deletion.
8 changes: 7 additions & 1 deletion src/flake8/processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,13 @@ def build_logical_line_tokens(self) -> _Logical: # noqa: C901
if token_type == tokenize.STRING:
text = mutate_string(text)
elif token_type == FSTRING_MIDDLE: # pragma: >=3.12 cover
text = "x" * len(text)
# A curly brace in an FSTRING_MIDDLE token must be an escaped
# curly brace. Both 'text' and 'end' will account for the
# escaped version of the token (i.e. a single brace) rather
# than the raw double brace version, so we must counteract this
brace_offset = text.count("{") + text.count("}")
text = "x" * (len(text) + brace_offset)
end = (end[0], end[1] + brace_offset)
if previous_row:
(start_row, start_column) = start
if previous_row != start_row:
Expand Down
35 changes: 35 additions & 0 deletions tests/integration/test_plugins.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
"""Integration tests for plugin loading."""
from __future__ import annotations

import sys

import pytest

from flake8.main.cli import main
Expand Down Expand Up @@ -261,3 +263,36 @@ def test_logical_line_plugin(tmpdir, capsys):
"""
out, err = capsys.readouterr()
assert out == expected


def test_escaping_of_fstrings_in_string_redacter(tmpdir, capsys):
cfg_s = f"""\
[flake8]
extend-ignore = F
[flake8:local-plugins]
extension =
T = {yields_logical_line.__module__}:{yields_logical_line.__name__}
"""

cfg = tmpdir.join("tox.ini")
cfg.write(cfg_s)

src = """\
f'{{"{hello}": "{world}"}}'
"""
t_py = tmpdir.join("t.py")
t_py.write_binary(src.encode())

with tmpdir.as_cwd():
assert main(("t.py", "--config", str(cfg))) == 1

if sys.version_info >= (3, 12): # pragma: >=3.12 cover
expected = """\
t.py:1:1: T001 "f'xxx{hello}xxxx{world}xxx'"
"""
else: # pragma: <3.12 cover
expected = """\
t.py:1:1: T001 "f'xxxxxxxxxxxxxxxxxxxxxxxx'"
"""
out, err = capsys.readouterr()
assert out == expected

0 comments on commit bdcd5c2

Please sign in to comment.