Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use support.sleeping_retry() and support.busy_retry() #93848

Merged
merged 1 commit into from
Jun 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions Lib/test/_test_eintr.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,11 +403,9 @@ def check_sigwait(self, wait_func):
old_mask = signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
self.addCleanup(signal.pthread_sigmask, signal.SIG_UNBLOCK, [signum])

t0 = time.monotonic()
proc = self.subprocess(code)
with kill_on_error(proc):
wait_func(signum)
dt = time.monotonic() - t0
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch with the unused variables. I wonder why they are here though.

Perhaps the intention was to include something like self.assertGreaterEqual(dt, self.sleep_time)? at the bottom of check_sigwait?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch with the unused variables. I wonder why they are here though.

Copy/paste mistake. I likely wrote this code a few years ago.


self.assertEqual(proc.wait(), 0)

Expand Down Expand Up @@ -497,16 +495,18 @@ def _lock(self, lock_func, lock_name):
proc = self.subprocess(code)
with kill_on_error(proc):
with open(os_helper.TESTFN, 'wb') as f:
while True: # synchronize the subprocess
dt = time.monotonic() - start_time
if dt > 60.0:
raise Exception("failed to sync child in %.1f sec" % dt)
# synchronize the subprocess
start_time = time.monotonic()
for _ in support.sleeping_retry(60.0, error=False):
try:
lock_func(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
lock_func(f, fcntl.LOCK_UN)
time.sleep(0.01)
except BlockingIOError:
break
else:
dt = time.monotonic() - start_time
raise Exception("failed to sync child in %.1f sec" % dt)

# the child locked the file just a moment ago for 'sleep_time' seconds
# that means that the lock below will block for 'sleep_time' minus some
# potential context switch delay
Expand Down
13 changes: 6 additions & 7 deletions Lib/test/signalinterproctester.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,15 @@ def wait_signal(self, child, signame):
# (if set)
child.wait()

timeout = support.SHORT_TIMEOUT
deadline = time.monotonic() + timeout

while time.monotonic() < deadline:
start_time = time.monotonic()
for _ in support.busy_retry(support.SHORT_TIMEOUT, error=False):
if self.got_signals[signame]:
return
signal.pause()

self.fail('signal %s not received after %s seconds'
% (signame, timeout))
else:
dt = time.monotonic() - start_time
self.fail('signal %s not received after %.1f seconds'
% (signame, dt))

def subprocess_send_signal(self, pid, signame):
code = 'import os, signal; os.kill(%s, signal.%s)' % (pid, signame)
Expand Down
35 changes: 15 additions & 20 deletions Lib/test/support/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2072,31 +2072,26 @@ def wait_process(pid, *, exitcode, timeout=None):

if timeout is None:
timeout = SHORT_TIMEOUT
t0 = time.monotonic()
sleep = 0.001
max_sleep = 0.1
while True:

start_time = time.monotonic()
for _ in sleeping_retry(timeout, error=False):
pid2, status = os.waitpid(pid, os.WNOHANG)
if pid2 != 0:
break
# process is still running

dt = time.monotonic() - t0
if dt > SHORT_TIMEOUT:
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError:
# Ignore errors like ChildProcessError or PermissionError
pass

raise AssertionError(f"process {pid} is still running "
f"after {dt:.1f} seconds")
# rety: the process is still running
else:
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError:
# Ignore errors like ChildProcessError or PermissionError
pass

sleep = min(sleep * 2, max_sleep)
time.sleep(sleep)
dt = time.monotonic() - start_time
raise AssertionError(f"process {pid} is still running "
f"after {dt:.1f} seconds")
else:
# Windows implementation
# Windows implementation: don't support timeout :-(
pid2, status = os.waitpid(pid, 0)

exitcode2 = os.waitstatus_to_exitcode(status)
Expand Down
18 changes: 8 additions & 10 deletions Lib/test/support/threading_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,19 +88,17 @@ def wait_threads_exit(timeout=None):
yield
finally:
start_time = time.monotonic()
deadline = start_time + timeout
while True:
for _ in support.sleeping_retry(timeout, error=False):
support.gc_collect()
count = _thread._count()
if count <= old_count:
break
if time.monotonic() > deadline:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)
time.sleep(0.010)
support.gc_collect()
else:
dt = time.monotonic() - start_time
msg = (f"wait_threads() failed to cleanup {count - old_count} "
f"threads after {dt:.1f} seconds "
f"(count: {count}, old count: {old_count})")
raise AssertionError(msg)


def join_thread(thread, timeout=None):
Expand Down
11 changes: 5 additions & 6 deletions Lib/test/test_asyncio/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,13 +109,12 @@ async def once():


def run_until(loop, pred, timeout=support.SHORT_TIMEOUT):
deadline = time.monotonic() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.monotonic()
if timeout <= 0:
raise futures.TimeoutError()
for _ in support.busy_retry(timeout, error=False):
if pred():
break
loop.run_until_complete(tasks.sleep(0.001))
else:
raise futures.TimeoutError()


def run_once(loop):
Expand Down
5 changes: 3 additions & 2 deletions Lib/test/test_asyncore.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,7 @@ def capture_server(evt, buf, serv):
pass
else:
n = 200
start = time.monotonic()
while n > 0 and time.monotonic() - start < 3.0:
for _ in support.busy_retry(3.0, error=False):
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
Expand All @@ -86,6 +85,8 @@ def capture_server(evt, buf, serv):
buf.write(data.replace(b'\n', b''))
if b'\n' in data:
break
if n <= 0:
break
time.sleep(0.01)

conn.close()
Expand Down
13 changes: 7 additions & 6 deletions Lib/test/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -3602,7 +3602,6 @@ def do_queuehandler_configuration(self, qspec, lspec):
if lspec is not None:
cd['handlers']['ah']['listener'] = lspec
qh = None
delay = 0.01
try:
self.apply_config(cd)
qh = logging.getHandlerByName('ah')
Expand All @@ -3612,12 +3611,14 @@ def do_queuehandler_configuration(self, qspec, lspec):
logging.debug('foo')
logging.info('bar')
logging.warning('baz')

# Need to let the listener thread finish its work
deadline = time.monotonic() + support.LONG_TIMEOUT
while not qh.listener.queue.empty():
time.sleep(delay)
if time.monotonic() > deadline:
self.fail("queue not empty")
while support.sleeping_retry(support.LONG_TIMEOUT, error=False):
if qh.listener.queue.empty():
break
else:
self.fail("queue not empty")

with open(fn, encoding='utf-8') as f:
data = f.read().splitlines()
self.assertEqual(data, ['foo', 'bar', 'baz'])
Expand Down