aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/Lib/test/test_concurrent_futures/test_shutdown.py
blob: 99b315b47e253004aca382590aac426b29e9927f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
import signal
import sys
import threading
import time
import unittest
from concurrent import futures

from test import support
from test.support.script_helper import assert_python_ok

from .util import (
    BaseTestCase, ThreadPoolMixin, ProcessPoolForkMixin,
    ProcessPoolForkserverMixin, ProcessPoolSpawnMixin,
    create_executor_tests, setup_module)


def sleep_and_print(t, msg):
    time.sleep(t)
    print(msg)
    sys.stdout.flush()


class ExecutorShutdownTest:
    def test_run_after_shutdown(self):
        self.executor.shutdown()
        self.assertRaises(RuntimeError,
                          self.executor.submit,
                          pow, 2, 5)

    def test_interpreter_shutdown(self):
        # Test the atexit hook for shutdown of worker threads and processes
        rc, out, err = assert_python_ok('-c', """if 1:
            from concurrent.futures import {executor_type}
            from time import sleep
            from test.test_concurrent_futures.test_shutdown import sleep_and_print
            if __name__ == "__main__":
                context = '{context}'
                if context == "":
                    t = {executor_type}(5)
                else:
                    from multiprocessing import get_context
                    context = get_context(context)
                    t = {executor_type}(5, mp_context=context)
                t.submit(sleep_and_print, 1.0, "apple")
            """.format(executor_type=self.executor_type.__name__,
                       context=getattr(self, "ctx", "")))
        # Errors in atexit hooks don't change the process exit code, check
        # stderr manually.
        self.assertFalse(err)
        self.assertEqual(out.strip(), b"apple")

    def test_submit_after_interpreter_shutdown(self):
        # Test the atexit hook for shutdown of worker threads and processes
        rc, out, err = assert_python_ok('-c', """if 1:
            import atexit
            @atexit.register
            def run_last():
                try:
                    t.submit(id, None)
                except RuntimeError:
                    print("runtime-error")
                    raise
            from concurrent.futures import {executor_type}
            if __name__ == "__main__":
                context = '{context}'
                if not context:
                    t = {executor_type}(5)
                else:
                    from multiprocessing import get_context
                    context = get_context(context)
                    t = {executor_type}(5, mp_context=context)
                    t.submit(id, 42).result()
            """.format(executor_type=self.executor_type.__name__,
                       context=getattr(self, "ctx", "")))
        # Errors in atexit hooks don't change the process exit code, check
        # stderr manually.
        self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
        self.assertEqual(out.strip(), b"runtime-error")

    def test_hang_issue12364(self):
        fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
        self.executor.shutdown()
        for f in fs:
            f.result()

    def test_cancel_futures(self):
        assert self.worker_count <= 5, "test needs few workers"
        fs = [self.executor.submit(time.sleep, .1) for _ in range(50)]
        self.executor.shutdown(cancel_futures=True)
        # We can't guarantee the exact number of cancellations, but we can
        # guarantee that *some* were cancelled. With few workers, many of
        # the submitted futures should have been cancelled.
        cancelled = [fut for fut in fs if fut.cancelled()]
        self.assertGreater(len(cancelled), 20)

        # Ensure the other futures were able to finish.
        # Use "not fut.cancelled()" instead of "fut.done()" to include futures
        # that may have been left in a pending state.
        others = [fut for fut in fs if not fut.cancelled()]
        for fut in others:
            self.assertTrue(fut.done(), msg=f"{fut._state=}")
            self.assertIsNone(fut.exception())

        # Similar to the number of cancelled futures, we can't guarantee the
        # exact number that completed. But, we can guarantee that at least
        # one finished.
        self.assertGreater(len(others), 0)

    def test_hang_gh83386(self):
        """shutdown(wait=False) doesn't hang at exit with running futures.

        See https://github.com/python/cpython/issues/83386.
        """
        if self.executor_type == futures.ProcessPoolExecutor:
            raise unittest.SkipTest(
                "Hangs, see https://github.com/python/cpython/issues/83386")

        rc, out, err = assert_python_ok('-c', """if True:
            from concurrent.futures import {executor_type}
            from test.test_concurrent_futures.test_shutdown import sleep_and_print
            if __name__ == "__main__":
                if {context!r}: multiprocessing.set_start_method({context!r})
                t = {executor_type}(max_workers=3)
                t.submit(sleep_and_print, 1.0, "apple")
                t.shutdown(wait=False)
            """.format(executor_type=self.executor_type.__name__,
                       context=getattr(self, 'ctx', None)))
        self.assertFalse(err)
        self.assertEqual(out.strip(), b"apple")

    def test_hang_gh94440(self):
        """shutdown(wait=True) doesn't hang when a future was submitted and
        quickly canceled right before shutdown.

        See https://github.com/python/cpython/issues/94440.
        """
        if not hasattr(signal, 'alarm'):
            raise unittest.SkipTest(
                "Tested platform does not support the alarm signal")

        def timeout(_signum, _frame):
            raise RuntimeError("timed out waiting for shutdown")

        kwargs = {}
        if getattr(self, 'ctx', None):
            kwargs['mp_context'] = self.get_context()
        executor = self.executor_type(max_workers=1, **kwargs)
        executor.submit(int).result()
        old_handler = signal.signal(signal.SIGALRM, timeout)
        try:
            signal.alarm(5)
            executor.submit(int).cancel()
            executor.shutdown(wait=True)
        finally:
            signal.alarm(0)
            signal.signal(signal.SIGALRM, old_handler)


class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
    def test_threads_terminate(self):
        def acquire_lock(lock):
            lock.acquire()

        sem = threading.Semaphore(0)
        for i in range(3):
            self.executor.submit(acquire_lock, sem)
        self.assertEqual(len(self.executor._threads), 3)
        for i in range(3):
            sem.release()
        self.executor.shutdown()
        for t in self.executor._threads:
            t.join()

    def test_context_manager_shutdown(self):
        with futures.ThreadPoolExecutor(max_workers=5) as e:
            executor = e
            self.assertEqual(list(e.map(abs, range(-5, 5))),
                             [5, 4, 3, 2, 1, 0, 1, 2, 3, 4])

        for t in executor._threads:
            t.join()

    def test_del_shutdown(self):
        executor = futures.ThreadPoolExecutor(max_workers=5)
        res = executor.map(abs, range(-5, 5))
        threads = executor._threads
        del executor

        for t in threads:
            t.join()

        # Make sure the results were all computed before the
        # executor got shutdown.
        assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])

    def test_shutdown_no_wait(self):
        # Ensure that the executor cleans up the threads when calling
        # shutdown with wait=False
        executor = futures.ThreadPoolExecutor(max_workers=5)
        res = executor.map(abs, range(-5, 5))
        threads = executor._threads
        executor.shutdown(wait=False)
        for t in threads:
            t.join()

        # Make sure the results were all computed before the
        # executor got shutdown.
        assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])


    def test_thread_names_assigned(self):
        executor = futures.ThreadPoolExecutor(
            max_workers=5, thread_name_prefix='SpecialPool')
        executor.map(abs, range(-5, 5))
        threads = executor._threads
        del executor
        support.gc_collect()  # For PyPy or other GCs.

        for t in threads:
            self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
            t.join()

    def test_thread_names_default(self):
        executor = futures.ThreadPoolExecutor(max_workers=5)
        executor.map(abs, range(-5, 5))
        threads = executor._threads
        del executor
        support.gc_collect()  # For PyPy or other GCs.

        for t in threads:
            # Ensure that our default name is reasonably sane and unique when
            # no thread_name_prefix was supplied.
            self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
            t.join()

    def test_cancel_futures_wait_false(self):
        # Can only be reliably tested for TPE, since PPE often hangs with
        # `wait=False` (even without *cancel_futures*).
        rc, out, err = assert_python_ok('-c', """if True:
            from concurrent.futures import ThreadPoolExecutor
            from test.test_concurrent_futures.test_shutdown import sleep_and_print
            if __name__ == "__main__":
                t = ThreadPoolExecutor()
                t.submit(sleep_and_print, .1, "apple")
                t.shutdown(wait=False, cancel_futures=True)
            """)
        # Errors in atexit hooks don't change the process exit code, check
        # stderr manually.
        self.assertFalse(err)
        # gh-116682: stdout may be empty if shutdown happens before task
        # starts executing.
        self.assertIn(out.strip(), [b"apple", b""])


class ProcessPoolShutdownTest(ExecutorShutdownTest):
    def test_processes_terminate(self):
        def acquire_lock(lock):
            lock.acquire()

        mp_context = self.get_context()
        if mp_context.get_start_method(allow_none=False) == "fork":
            # fork pre-spawns, not on demand.
            expected_num_processes = self.worker_count
        else:
            expected_num_processes = 3

        sem = mp_context.Semaphore(0)
        for _ in range(3):
            self.executor.submit(acquire_lock, sem)
        self.assertEqual(len(self.executor._processes), expected_num_processes)
        for _ in range(3):
            sem.release()
        processes = self.executor._processes
        self.executor.shutdown()

        for p in processes.values():
            p.join()

    def test_context_manager_shutdown(self):
        with futures.ProcessPoolExecutor(
                max_workers=5, mp_context=self.get_context()) as e:
            processes = e._processes
            self.assertEqual(list(e.map(abs, range(-5, 5))),
                             [5, 4, 3, 2, 1, 0, 1, 2, 3, 4])

        for p in processes.values():
            p.join()

    def test_del_shutdown(self):
        executor = futures.ProcessPoolExecutor(
                max_workers=5, mp_context=self.get_context())
        res = executor.map(abs, range(-5, 5))
        executor_manager_thread = executor._executor_manager_thread
        processes = executor._processes
        call_queue = executor._call_queue
        executor_manager_thread = executor._executor_manager_thread
        del executor
        support.gc_collect()  # For PyPy or other GCs.

        # Make sure that all the executor resources were properly cleaned by
        # the shutdown process
        executor_manager_thread.join()
        for p in processes.values():
            p.join()
        call_queue.join_thread()

        # Make sure the results were all computed before the
        # executor got shutdown.
        assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])

    def test_shutdown_no_wait(self):
        # Ensure that the executor cleans up the processes when calling
        # shutdown with wait=False
        executor = futures.ProcessPoolExecutor(
                max_workers=5, mp_context=self.get_context())
        res = executor.map(abs, range(-5, 5))
        processes = executor._processes
        call_queue = executor._call_queue
        executor_manager_thread = executor._executor_manager_thread
        executor.shutdown(wait=False)

        # Make sure that all the executor resources were properly cleaned by
        # the shutdown process
        executor_manager_thread.join()
        for p in processes.values():
            p.join()
        call_queue.join_thread()

        # Make sure the results were all computed before the executor got
        # shutdown.
        assert all([r == abs(v) for r, v in zip(res, range(-5, 5))])

    @classmethod
    def _failing_task_gh_132969(cls, n):
        raise ValueError("failing task")

    @classmethod
    def _good_task_gh_132969(cls, n):
        time.sleep(0.1 * n)
        return n

    def _run_test_issue_gh_132969(self, max_workers):
        # max_workers=2 will repro exception
        # max_workers=4 will repro exception and then hang

        # Repro conditions
        #   max_tasks_per_child=1
        #   a task ends abnormally
        #   shutdown(wait=False) is called
        start_method = self.get_context().get_start_method()
        if (start_method == "fork" or
           (start_method == "forkserver" and sys.platform.startswith("win"))):
                self.skipTest(f"Skipping test for {start_method = }")
        executor = futures.ProcessPoolExecutor(
                max_workers=max_workers,
                max_tasks_per_child=1,
                mp_context=self.get_context())
        f1 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 1)
        f2 = executor.submit(ProcessPoolShutdownTest._failing_task_gh_132969, 2)
        f3 = executor.submit(ProcessPoolShutdownTest._good_task_gh_132969, 3)
        result = 0
        try:
            result += f1.result()
            result += f2.result()
            result += f3.result()
        except ValueError:
            # stop processing results upon first exception
            pass

        # Ensure that the executor cleans up after called
        # shutdown with wait=False
        executor_manager_thread = executor._executor_manager_thread
        executor.shutdown(wait=False)
        time.sleep(0.2)
        executor_manager_thread.join()
        return result

    def test_shutdown_gh_132969_case_1(self):
        # gh-132969: test that exception "object of type 'NoneType' has no len()"
        # is not raised when shutdown(wait=False) is called.
        result = self._run_test_issue_gh_132969(2)
        self.assertEqual(result, 1)

    def test_shutdown_gh_132969_case_2(self):
        # gh-132969: test that process does not hang and
        # exception "object of type 'NoneType' has no len()" is not raised
        # when shutdown(wait=False) is called.
        result = self._run_test_issue_gh_132969(4)
        self.assertEqual(result, 1)


create_executor_tests(globals(), ProcessPoolShutdownTest,
                      executor_mixins=(ProcessPoolForkMixin,
                                       ProcessPoolForkserverMixin,
                                       ProcessPoolSpawnMixin))


def setUpModule():
    setup_module()


if __name__ == "__main__":
    unittest.main()