Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[RFC] Decompose server class #8468

Draft
wants to merge 6 commits into
base: main
Choose a base branch
from

minor fixes

f8ccddf
Select commit
Loading
Failed to load commit list.
Sign in for the full log view
Draft

[RFC] Decompose server class #8468

minor fixes
f8ccddf
Select commit
Loading
Failed to load commit list.
GitHub Actions / Unit Test Results failed Jul 10, 2024 in 0s

2 errors, 326 fail, 116 skipped, 3 639 pass in 14h 35m 48s

    29 files  +    28      29 suites  +28   14h 35m 48s ⏱️ + 14h 35m 22s
 4 083 tests + 4 039   3 639 ✅ + 3 631    116 💤 +   80    326 ❌ +  326  2 🔥 +2 
50 647 runs  +50 603  44 569 ✅ +44 561  2 288 💤 +2 252  3 781 ❌ +3 781  9 🔥 +9 

Results for commit f8ccddf. ± Comparison against earlier commit 04f5797.

Annotations

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_contact_listen_address[tcp://0.0.0.0:---nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 3s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:36297', workers: 0, cores: 0, tasks: 0>
nanny = '--nanny', listen_address = 'tcp://0.0.0.0:35917'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("listen_address", ["tcp://0.0.0.0:", "tcp://127.0.0.2:"])
    @gen_cluster(client=True, nthreads=[])
    async def test_contact_listen_address(c, s, nanny, listen_address):
        port = open_port()
        listen_address += str(port)
        with popen(
            [
                "dask",
                "worker",
                s.address,
                nanny,
                "--no-dashboard",
                "--contact-address",
                f"tcp://127.0.0.2:{port}",
                "--listen-address",
                listen_address,
            ]
        ):
            await c.wait_for_workers(1)
            info = c.scheduler_info()
            assert info["workers"].keys() == {f"tcp://127.0.0.2:{port}"}
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           assert await c.run(func) == {f"tcp://127.0.0.2:{port}": listen_address}

distributed/cli/tests/test_dask_worker.py:428: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:426: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_contact_listen_address[tcp://0.0.0.0:---no-nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:33441', workers: 0, cores: 0, tasks: 0>
nanny = '--no-nanny', listen_address = 'tcp://0.0.0.0:48915'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("listen_address", ["tcp://0.0.0.0:", "tcp://127.0.0.2:"])
    @gen_cluster(client=True, nthreads=[])
    async def test_contact_listen_address(c, s, nanny, listen_address):
        port = open_port()
        listen_address += str(port)
        with popen(
            [
                "dask",
                "worker",
                s.address,
                nanny,
                "--no-dashboard",
                "--contact-address",
                f"tcp://127.0.0.2:{port}",
                "--listen-address",
                listen_address,
            ]
        ):
            await c.wait_for_workers(1)
            info = c.scheduler_info()
            assert info["workers"].keys() == {f"tcp://127.0.0.2:{port}"}
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           assert await c.run(func) == {f"tcp://127.0.0.2:{port}": listen_address}

distributed/cli/tests/test_dask_worker.py:428: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:426: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_contact_listen_address[tcp://127.0.0.2:---nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 3s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:39575', workers: 0, cores: 0, tasks: 0>
nanny = '--nanny', listen_address = 'tcp://127.0.0.2:43035'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("listen_address", ["tcp://0.0.0.0:", "tcp://127.0.0.2:"])
    @gen_cluster(client=True, nthreads=[])
    async def test_contact_listen_address(c, s, nanny, listen_address):
        port = open_port()
        listen_address += str(port)
        with popen(
            [
                "dask",
                "worker",
                s.address,
                nanny,
                "--no-dashboard",
                "--contact-address",
                f"tcp://127.0.0.2:{port}",
                "--listen-address",
                listen_address,
            ]
        ):
            await c.wait_for_workers(1)
            info = c.scheduler_info()
            assert info["workers"].keys() == {f"tcp://127.0.0.2:{port}"}
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           assert await c.run(func) == {f"tcp://127.0.0.2:{port}": listen_address}

distributed/cli/tests/test_dask_worker.py:428: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:426: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_contact_listen_address[tcp://127.0.0.2:---no-nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:39169', workers: 0, cores: 0, tasks: 0>
nanny = '--no-nanny', listen_address = 'tcp://127.0.0.2:52429'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("listen_address", ["tcp://0.0.0.0:", "tcp://127.0.0.2:"])
    @gen_cluster(client=True, nthreads=[])
    async def test_contact_listen_address(c, s, nanny, listen_address):
        port = open_port()
        listen_address += str(port)
        with popen(
            [
                "dask",
                "worker",
                s.address,
                nanny,
                "--no-dashboard",
                "--contact-address",
                f"tcp://127.0.0.2:{port}",
                "--listen-address",
                listen_address,
            ]
        ):
            await c.wait_for_workers(1)
            info = c.scheduler_info()
            assert info["workers"].keys() == {f"tcp://127.0.0.2:{port}"}
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           assert await c.run(func) == {f"tcp://127.0.0.2:{port}": listen_address}

distributed/cli/tests/test_dask_worker.py:428: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:426: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_respect_host_listen_address[127.0.0.2---nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 3s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:39279', workers: 0, cores: 0, tasks: 0>
nanny = '--nanny', host = '127.0.0.2'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
    @gen_cluster(client=True, nthreads=[])
    async def test_respect_host_listen_address(c, s, nanny, host):
        with popen(["dask", "worker", s.address, nanny, "--no-dashboard", "--host", host]):
            await c.wait_for_workers(1)
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           listen_addresses = await c.run(func)

distributed/cli/tests/test_dask_worker.py:483: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:481: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_respect_host_listen_address[127.0.0.2---no-nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:44499', workers: 0, cores: 0, tasks: 0>
nanny = '--no-nanny', host = '127.0.0.2'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
    @gen_cluster(client=True, nthreads=[])
    async def test_respect_host_listen_address(c, s, nanny, host):
        with popen(["dask", "worker", s.address, nanny, "--no-dashboard", "--host", host]):
            await c.wait_for_workers(1)
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           listen_addresses = await c.run(func)

distributed/cli/tests/test_dask_worker.py:483: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:481: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_respect_host_listen_address[0.0.0.0---nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 3s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:42299', workers: 0, cores: 0, tasks: 0>
nanny = '--nanny', host = '0.0.0.0'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
    @gen_cluster(client=True, nthreads=[])
    async def test_respect_host_listen_address(c, s, nanny, host):
        with popen(["dask", "worker", s.address, nanny, "--no-dashboard", "--host", host]):
            await c.wait_for_workers(1)
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           listen_addresses = await c.run(func)

distributed/cli/tests/test_dask_worker.py:483: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:481: AttributeError

Check warning on line 0 in distributed.cli.tests.test_dask_worker

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

9 out of 14 runs failed: test_respect_host_listen_address[0.0.0.0---no-nanny] (distributed.cli.tests.test_dask_worker)

artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
Raw output
AttributeError: 'Worker' object has no attribute 'listener'
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:36203', workers: 0, cores: 0, tasks: 0>
nanny = '--no-nanny', host = '0.0.0.0'

    @pytest.mark.slow
    @pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
    @pytest.mark.parametrize("nanny", ["--nanny", "--no-nanny"])
    @pytest.mark.parametrize("host", ["127.0.0.2", "0.0.0.0"])
    @gen_cluster(client=True, nthreads=[])
    async def test_respect_host_listen_address(c, s, nanny, host):
        with popen(["dask", "worker", s.address, nanny, "--no-dashboard", "--host", host]):
            await c.wait_for_workers(1)
    
            # roundtrip works
            assert await c.submit(lambda x: x + 1, 10) == 11
    
            def func(dask_worker):
                return dask_worker.listener.listen_address
    
>           listen_addresses = await c.run(func)

distributed/cli/tests/test_dask_worker.py:483: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/client.py:2912: in _run
    raise exc
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

>   return dask_worker.listener.listen_address
E   AttributeError: 'Worker' object has no attribute 'listener'

distributed/cli/tests/test_dask_worker.py:481: AttributeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[True-ws://-None-8787] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
RuntimeError: Scheduler failed to start.
self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
>               await wait_for(self.start_unsafe(), timeout=timeout)

distributed/node.py:518: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/utils.py:1954: in wait_for
    return await asyncio.wait_for(fut, timeout)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/tasks.py:442: in wait_for
    return await fut
distributed/scheduler.py:4098: in start_unsafe
    await self.server.listen(
distributed/core.py:364: in listen
    listener = await listen(
distributed/comm/core.py:256: in _
    await self.start()
distributed/comm/ws.py:402: in start
    self.server.listen(self.port)
../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/tcpserver.py:151: in listen
    sockets = bind_sockets(port, address=address)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

port = 8787, address = None, family = <AddressFamily.AF_UNSPEC: 0>
backlog = 128, flags = <AddressInfo.AI_PASSIVE: 1>, reuse_port = False

    def bind_sockets(
        port: int,
        address: str = None,
        family: socket.AddressFamily = socket.AF_UNSPEC,
        backlog: int = _DEFAULT_BACKLOG,
        flags: int = None,
        reuse_port: bool = False,
    ) -> List[socket.socket]:
        """Creates listening sockets bound to the given port and address.
    
        Returns a list of socket objects (multiple sockets are returned if
        the given address maps to multiple IP addresses, which is most common
        for mixed IPv4 and IPv6 use).
    
        Address may be either an IP address or hostname.  If it's a hostname,
        the server will listen on all IP addresses associated with the
        name.  Address may be an empty string or None to listen on all
        available interfaces.  Family may be set to either `socket.AF_INET`
        or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
        both will be used if available.
    
        The ``backlog`` argument has the same meaning as for
        `socket.listen() <socket.socket.listen>`.
    
        ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
        ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
    
        ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
        in the list. If your platform doesn't support this option ValueError will
        be raised.
        """
        if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
            raise ValueError("the platform doesn't support SO_REUSEPORT")
    
        sockets = []
        if address == "":
            address = None
        if not socket.has_ipv6 and family == socket.AF_UNSPEC:
            # Python can be compiled with --disable-ipv6, which causes
            # operations on AF_INET6 sockets to fail, but does not
            # automatically exclude those results from getaddrinfo
            # results.
            # http://bugs.python.org/issue16208
            family = socket.AF_INET
        if flags is None:
            flags = socket.AI_PASSIVE
        bound_port = None
        unique_addresses = set()  # type: set
        for res in sorted(
            socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
            key=lambda x: x[0],
        ):
            if res in unique_addresses:
                continue
    
            unique_addresses.add(res)
    
            af, socktype, proto, canonname, sockaddr = res
            if (
                sys.platform == "darwin"
                and address == "localhost"
                and af == socket.AF_INET6
                and sockaddr[3] != 0
            ):
                # Mac OS X includes a link-local address fe80::1%lo0 in the
                # getaddrinfo results for 'localhost'.  However, the firewall
                # doesn't understand that this is a local address and will
                # prompt for access (often repeatedly, due to an apparent
                # bug in its ability to remember granting access to an
                # application). Skip these addresses.
                continue
            try:
                sock = socket.socket(af, socktype, proto)
            except socket.error as e:
                if errno_from_exception(e) == errno.EAFNOSUPPORT:
                    continue
                raise
            set_close_exec(sock.fileno())
            if os.name != "nt":
                try:
                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                except socket.error as e:
                    if errno_from_exception(e) != errno.ENOPROTOOPT:
                        # Hurd doesn't support SO_REUSEADDR.
                        raise
            if reuse_port:
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
            if af == socket.AF_INET6:
                # On linux, ipv6 sockets accept ipv4 too by default,
                # but this makes it impossible to bind to both
                # 0.0.0.0 in ipv4 and :: in ipv6.  On other systems,
                # separate sockets *must* be used to listen for both ipv4
                # and ipv6.  For consistency, always disable ipv4 on our
                # ipv6 sockets and use a separate ipv4 socket when needed.
                #
                # Python 2.x on windows doesn't have IPPROTO_IPV6.
                if hasattr(socket, "IPPROTO_IPV6"):
                    sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
    
            # automatic port allocation with port=None
            # should bind on the same port on IPv4 and IPv6
            host, requested_port = sockaddr[:2]
            if requested_port == 0 and bound_port is not None:
                sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
    
            sock.setblocking(False)
>           sock.bind(sockaddr)
E           OSError: [Errno 98] Address already in use

../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/netutil.py:174: OSError

The above exception was the direct cause of the following exception:

dashboard = True, protocol = 'ws://', security = None, port = 8787

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
>       async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:

distributed/comm/tests/test_ws.py:156: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/node.py:213: in __aenter__
    await self
distributed/node.py:207: in _
    await self.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
                await wait_for(self.start_unsafe(), timeout=timeout)
            except asyncio.TimeoutError as exc:
                await _close_on_failure(exc)
                raise asyncio.TimeoutError(
                    f"{type(self).__name__} start timed out after {timeout}s."
                ) from exc
            except Exception as exc:
                await _close_on_failure(exc)
>               raise RuntimeError(f"{type(self).__name__} failed to start.") from exc
E               RuntimeError: Scheduler failed to start.

distributed/node.py:526: RuntimeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[True-wss://-True-8787] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
RuntimeError: Scheduler failed to start.
self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
>               await wait_for(self.start_unsafe(), timeout=timeout)

distributed/node.py:518: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/utils.py:1954: in wait_for
    return await asyncio.wait_for(fut, timeout)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/tasks.py:442: in wait_for
    return await fut
distributed/scheduler.py:4098: in start_unsafe
    await self.server.listen(
distributed/core.py:364: in listen
    listener = await listen(
distributed/comm/core.py:256: in _
    await self.start()
distributed/comm/ws.py:402: in start
    self.server.listen(self.port)
../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/tcpserver.py:151: in listen
    sockets = bind_sockets(port, address=address)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

port = 8787, address = None, family = <AddressFamily.AF_UNSPEC: 0>
backlog = 128, flags = <AddressInfo.AI_PASSIVE: 1>, reuse_port = False

    def bind_sockets(
        port: int,
        address: str = None,
        family: socket.AddressFamily = socket.AF_UNSPEC,
        backlog: int = _DEFAULT_BACKLOG,
        flags: int = None,
        reuse_port: bool = False,
    ) -> List[socket.socket]:
        """Creates listening sockets bound to the given port and address.
    
        Returns a list of socket objects (multiple sockets are returned if
        the given address maps to multiple IP addresses, which is most common
        for mixed IPv4 and IPv6 use).
    
        Address may be either an IP address or hostname.  If it's a hostname,
        the server will listen on all IP addresses associated with the
        name.  Address may be an empty string or None to listen on all
        available interfaces.  Family may be set to either `socket.AF_INET`
        or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
        both will be used if available.
    
        The ``backlog`` argument has the same meaning as for
        `socket.listen() <socket.socket.listen>`.
    
        ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
        ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
    
        ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
        in the list. If your platform doesn't support this option ValueError will
        be raised.
        """
        if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
            raise ValueError("the platform doesn't support SO_REUSEPORT")
    
        sockets = []
        if address == "":
            address = None
        if not socket.has_ipv6 and family == socket.AF_UNSPEC:
            # Python can be compiled with --disable-ipv6, which causes
            # operations on AF_INET6 sockets to fail, but does not
            # automatically exclude those results from getaddrinfo
            # results.
            # http://bugs.python.org/issue16208
            family = socket.AF_INET
        if flags is None:
            flags = socket.AI_PASSIVE
        bound_port = None
        unique_addresses = set()  # type: set
        for res in sorted(
            socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
            key=lambda x: x[0],
        ):
            if res in unique_addresses:
                continue
    
            unique_addresses.add(res)
    
            af, socktype, proto, canonname, sockaddr = res
            if (
                sys.platform == "darwin"
                and address == "localhost"
                and af == socket.AF_INET6
                and sockaddr[3] != 0
            ):
                # Mac OS X includes a link-local address fe80::1%lo0 in the
                # getaddrinfo results for 'localhost'.  However, the firewall
                # doesn't understand that this is a local address and will
                # prompt for access (often repeatedly, due to an apparent
                # bug in its ability to remember granting access to an
                # application). Skip these addresses.
                continue
            try:
                sock = socket.socket(af, socktype, proto)
            except socket.error as e:
                if errno_from_exception(e) == errno.EAFNOSUPPORT:
                    continue
                raise
            set_close_exec(sock.fileno())
            if os.name != "nt":
                try:
                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                except socket.error as e:
                    if errno_from_exception(e) != errno.ENOPROTOOPT:
                        # Hurd doesn't support SO_REUSEADDR.
                        raise
            if reuse_port:
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
            if af == socket.AF_INET6:
                # On linux, ipv6 sockets accept ipv4 too by default,
                # but this makes it impossible to bind to both
                # 0.0.0.0 in ipv4 and :: in ipv6.  On other systems,
                # separate sockets *must* be used to listen for both ipv4
                # and ipv6.  For consistency, always disable ipv4 on our
                # ipv6 sockets and use a separate ipv4 socket when needed.
                #
                # Python 2.x on windows doesn't have IPPROTO_IPV6.
                if hasattr(socket, "IPPROTO_IPV6"):
                    sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
    
            # automatic port allocation with port=None
            # should bind on the same port on IPv4 and IPv6
            host, requested_port = sockaddr[:2]
            if requested_port == 0 and bound_port is not None:
                sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
    
            sock.setblocking(False)
>           sock.bind(sockaddr)
E           OSError: [Errno 98] Address already in use

../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/netutil.py:174: OSError

The above exception was the direct cause of the following exception:

dashboard = True, protocol = 'wss://'
security = Security(require_encryption=True, tls_ca_file=Temporary (In-memory), tls_client_cert=Temporary (In-memory), tls_client..., tls_scheduler_key=Temporary (In-memory), tls_worker_cert=Temporary (In-memory), tls_worker_key=Temporary (In-memory))
port = 8787

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
>       async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:

distributed/comm/tests/test_ws.py:156: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/node.py:213: in __aenter__
    await self
distributed/node.py:207: in _
    await self.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
                await wait_for(self.start_unsafe(), timeout=timeout)
            except asyncio.TimeoutError as exc:
                await _close_on_failure(exc)
                raise asyncio.TimeoutError(
                    f"{type(self).__name__} start timed out after {timeout}s."
                ) from exc
            except Exception as exc:
                await _close_on_failure(exc)
>               raise RuntimeError(f"{type(self).__name__} failed to start.") from exc
E               RuntimeError: Scheduler failed to start.

distributed/node.py:526: RuntimeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[False-ws://-None-8787] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
RuntimeError: Scheduler failed to start.
self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
>               await wait_for(self.start_unsafe(), timeout=timeout)

distributed/node.py:518: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/utils.py:1954: in wait_for
    return await asyncio.wait_for(fut, timeout)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/tasks.py:442: in wait_for
    return await fut
distributed/scheduler.py:4098: in start_unsafe
    await self.server.listen(
distributed/core.py:364: in listen
    listener = await listen(
distributed/comm/core.py:256: in _
    await self.start()
distributed/comm/ws.py:402: in start
    self.server.listen(self.port)
../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/tcpserver.py:151: in listen
    sockets = bind_sockets(port, address=address)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

port = 8787, address = None, family = <AddressFamily.AF_UNSPEC: 0>
backlog = 128, flags = <AddressInfo.AI_PASSIVE: 1>, reuse_port = False

    def bind_sockets(
        port: int,
        address: str = None,
        family: socket.AddressFamily = socket.AF_UNSPEC,
        backlog: int = _DEFAULT_BACKLOG,
        flags: int = None,
        reuse_port: bool = False,
    ) -> List[socket.socket]:
        """Creates listening sockets bound to the given port and address.
    
        Returns a list of socket objects (multiple sockets are returned if
        the given address maps to multiple IP addresses, which is most common
        for mixed IPv4 and IPv6 use).
    
        Address may be either an IP address or hostname.  If it's a hostname,
        the server will listen on all IP addresses associated with the
        name.  Address may be an empty string or None to listen on all
        available interfaces.  Family may be set to either `socket.AF_INET`
        or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
        both will be used if available.
    
        The ``backlog`` argument has the same meaning as for
        `socket.listen() <socket.socket.listen>`.
    
        ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
        ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
    
        ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
        in the list. If your platform doesn't support this option ValueError will
        be raised.
        """
        if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
            raise ValueError("the platform doesn't support SO_REUSEPORT")
    
        sockets = []
        if address == "":
            address = None
        if not socket.has_ipv6 and family == socket.AF_UNSPEC:
            # Python can be compiled with --disable-ipv6, which causes
            # operations on AF_INET6 sockets to fail, but does not
            # automatically exclude those results from getaddrinfo
            # results.
            # http://bugs.python.org/issue16208
            family = socket.AF_INET
        if flags is None:
            flags = socket.AI_PASSIVE
        bound_port = None
        unique_addresses = set()  # type: set
        for res in sorted(
            socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
            key=lambda x: x[0],
        ):
            if res in unique_addresses:
                continue
    
            unique_addresses.add(res)
    
            af, socktype, proto, canonname, sockaddr = res
            if (
                sys.platform == "darwin"
                and address == "localhost"
                and af == socket.AF_INET6
                and sockaddr[3] != 0
            ):
                # Mac OS X includes a link-local address fe80::1%lo0 in the
                # getaddrinfo results for 'localhost'.  However, the firewall
                # doesn't understand that this is a local address and will
                # prompt for access (often repeatedly, due to an apparent
                # bug in its ability to remember granting access to an
                # application). Skip these addresses.
                continue
            try:
                sock = socket.socket(af, socktype, proto)
            except socket.error as e:
                if errno_from_exception(e) == errno.EAFNOSUPPORT:
                    continue
                raise
            set_close_exec(sock.fileno())
            if os.name != "nt":
                try:
                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                except socket.error as e:
                    if errno_from_exception(e) != errno.ENOPROTOOPT:
                        # Hurd doesn't support SO_REUSEADDR.
                        raise
            if reuse_port:
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
            if af == socket.AF_INET6:
                # On linux, ipv6 sockets accept ipv4 too by default,
                # but this makes it impossible to bind to both
                # 0.0.0.0 in ipv4 and :: in ipv6.  On other systems,
                # separate sockets *must* be used to listen for both ipv4
                # and ipv6.  For consistency, always disable ipv4 on our
                # ipv6 sockets and use a separate ipv4 socket when needed.
                #
                # Python 2.x on windows doesn't have IPPROTO_IPV6.
                if hasattr(socket, "IPPROTO_IPV6"):
                    sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
    
            # automatic port allocation with port=None
            # should bind on the same port on IPv4 and IPv6
            host, requested_port = sockaddr[:2]
            if requested_port == 0 and bound_port is not None:
                sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
    
            sock.setblocking(False)
>           sock.bind(sockaddr)
E           OSError: [Errno 98] Address already in use

../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/netutil.py:174: OSError

The above exception was the direct cause of the following exception:

dashboard = False, protocol = 'ws://', security = None, port = 8787

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
>       async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:

distributed/comm/tests/test_ws.py:156: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/node.py:213: in __aenter__
    await self
distributed/node.py:207: in _
    await self.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
                await wait_for(self.start_unsafe(), timeout=timeout)
            except asyncio.TimeoutError as exc:
                await _close_on_failure(exc)
                raise asyncio.TimeoutError(
                    f"{type(self).__name__} start timed out after {timeout}s."
                ) from exc
            except Exception as exc:
                await _close_on_failure(exc)
>               raise RuntimeError(f"{type(self).__name__} failed to start.") from exc
E               RuntimeError: Scheduler failed to start.

distributed/node.py:526: RuntimeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[False-wss://-True-8787] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
RuntimeError: Scheduler failed to start.
self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
>               await wait_for(self.start_unsafe(), timeout=timeout)

distributed/node.py:518: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/utils.py:1954: in wait_for
    return await asyncio.wait_for(fut, timeout)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/tasks.py:442: in wait_for
    return await fut
distributed/scheduler.py:4098: in start_unsafe
    await self.server.listen(
distributed/core.py:364: in listen
    listener = await listen(
distributed/comm/core.py:256: in _
    await self.start()
distributed/comm/ws.py:402: in start
    self.server.listen(self.port)
../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/tcpserver.py:151: in listen
    sockets = bind_sockets(port, address=address)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

port = 8787, address = None, family = <AddressFamily.AF_UNSPEC: 0>
backlog = 128, flags = <AddressInfo.AI_PASSIVE: 1>, reuse_port = False

    def bind_sockets(
        port: int,
        address: str = None,
        family: socket.AddressFamily = socket.AF_UNSPEC,
        backlog: int = _DEFAULT_BACKLOG,
        flags: int = None,
        reuse_port: bool = False,
    ) -> List[socket.socket]:
        """Creates listening sockets bound to the given port and address.
    
        Returns a list of socket objects (multiple sockets are returned if
        the given address maps to multiple IP addresses, which is most common
        for mixed IPv4 and IPv6 use).
    
        Address may be either an IP address or hostname.  If it's a hostname,
        the server will listen on all IP addresses associated with the
        name.  Address may be an empty string or None to listen on all
        available interfaces.  Family may be set to either `socket.AF_INET`
        or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
        both will be used if available.
    
        The ``backlog`` argument has the same meaning as for
        `socket.listen() <socket.socket.listen>`.
    
        ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
        ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
    
        ``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
        in the list. If your platform doesn't support this option ValueError will
        be raised.
        """
        if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
            raise ValueError("the platform doesn't support SO_REUSEPORT")
    
        sockets = []
        if address == "":
            address = None
        if not socket.has_ipv6 and family == socket.AF_UNSPEC:
            # Python can be compiled with --disable-ipv6, which causes
            # operations on AF_INET6 sockets to fail, but does not
            # automatically exclude those results from getaddrinfo
            # results.
            # http://bugs.python.org/issue16208
            family = socket.AF_INET
        if flags is None:
            flags = socket.AI_PASSIVE
        bound_port = None
        unique_addresses = set()  # type: set
        for res in sorted(
            socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
            key=lambda x: x[0],
        ):
            if res in unique_addresses:
                continue
    
            unique_addresses.add(res)
    
            af, socktype, proto, canonname, sockaddr = res
            if (
                sys.platform == "darwin"
                and address == "localhost"
                and af == socket.AF_INET6
                and sockaddr[3] != 0
            ):
                # Mac OS X includes a link-local address fe80::1%lo0 in the
                # getaddrinfo results for 'localhost'.  However, the firewall
                # doesn't understand that this is a local address and will
                # prompt for access (often repeatedly, due to an apparent
                # bug in its ability to remember granting access to an
                # application). Skip these addresses.
                continue
            try:
                sock = socket.socket(af, socktype, proto)
            except socket.error as e:
                if errno_from_exception(e) == errno.EAFNOSUPPORT:
                    continue
                raise
            set_close_exec(sock.fileno())
            if os.name != "nt":
                try:
                    sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
                except socket.error as e:
                    if errno_from_exception(e) != errno.ENOPROTOOPT:
                        # Hurd doesn't support SO_REUSEADDR.
                        raise
            if reuse_port:
                sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
            if af == socket.AF_INET6:
                # On linux, ipv6 sockets accept ipv4 too by default,
                # but this makes it impossible to bind to both
                # 0.0.0.0 in ipv4 and :: in ipv6.  On other systems,
                # separate sockets *must* be used to listen for both ipv4
                # and ipv6.  For consistency, always disable ipv4 on our
                # ipv6 sockets and use a separate ipv4 socket when needed.
                #
                # Python 2.x on windows doesn't have IPPROTO_IPV6.
                if hasattr(socket, "IPPROTO_IPV6"):
                    sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
    
            # automatic port allocation with port=None
            # should bind on the same port on IPv4 and IPv6
            host, requested_port = sockaddr[:2]
            if requested_port == 0 and bound_port is not None:
                sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
    
            sock.setblocking(False)
>           sock.bind(sockaddr)
E           OSError: [Errno 98] Address already in use

../../../miniconda3/envs/dask-distributed/lib/python3.9/site-packages/tornado/netutil.py:174: OSError

The above exception was the direct cause of the following exception:

dashboard = False, protocol = 'wss://'
security = Security(require_encryption=True, tls_ca_file=Temporary (In-memory), tls_client_cert=Temporary (In-memory), tls_client..., tls_scheduler_key=Temporary (In-memory), tls_worker_cert=Temporary (In-memory), tls_worker_key=Temporary (In-memory))
port = 8787

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
>       async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:

distributed/comm/tests/test_ws.py:156: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
distributed/node.py:213: in __aenter__
    await self
distributed/node.py:207: in _
    await self.start()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <Scheduler 'not-running', workers: 0, cores: 0, tasks: 0>

    @final
    async def start(self):
        async with self._startup_lock:
            if self.status == Status.failed:
                assert self.__startup_exc is not None
                raise self.__startup_exc
            elif self.status != Status.init:
                return self
            timeout = getattr(self, "death_timeout", None)
    
            async def _close_on_failure(exc: Exception) -> None:
                await self.close(reason=f"failure-to-start-{str(type(exc))}")
                self.status = Status.failed
                self.__startup_exc = exc
    
            try:
                await wait_for(self.start_unsafe(), timeout=timeout)
            except asyncio.TimeoutError as exc:
                await _close_on_failure(exc)
                raise asyncio.TimeoutError(
                    f"{type(self).__name__} start timed out after {timeout}s."
                ) from exc
            except Exception as exc:
                await _close_on_failure(exc)
>               raise RuntimeError(f"{type(self).__name__} failed to start.") from exc
E               RuntimeError: Scheduler failed to start.

distributed/node.py:526: RuntimeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[True-ws://-None-8786] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'Scheduler' object has no attribute 'listener'
dashboard = True, protocol = 'ws://', security = None, port = 8786

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
        async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:
            if port == 8787:
                assert s.http_server is s.listener.server
            else:
>               assert s.http_server is not s.listener.server
E               AttributeError: 'Scheduler' object has no attribute 'listener'

distributed/comm/tests/test_ws.py:162: AttributeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[True-wss://-True-8786] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'Scheduler' object has no attribute 'listener'
dashboard = True, protocol = 'wss://'
security = Security(require_encryption=True, tls_ca_file=Temporary (In-memory), tls_client_cert=Temporary (In-memory), tls_client..., tls_scheduler_key=Temporary (In-memory), tls_worker_cert=Temporary (In-memory), tls_worker_key=Temporary (In-memory))
port = 8786

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
        async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:
            if port == 8787:
                assert s.http_server is s.listener.server
            else:
>               assert s.http_server is not s.listener.server
E               AttributeError: 'Scheduler' object has no attribute 'listener'

distributed/comm/tests/test_ws.py:162: AttributeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[False-ws://-None-8786] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'Scheduler' object has no attribute 'listener'
dashboard = False, protocol = 'ws://', security = None, port = 8786

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
        async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:
            if port == 8787:
                assert s.http_server is s.listener.server
            else:
>               assert s.http_server is not s.listener.server
E               AttributeError: 'Scheduler' object has no attribute 'listener'

distributed/comm/tests/test_ws.py:162: AttributeError

Check warning on line 0 in distributed.comm.tests.test_ws

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_http_and_comm_server[False-wss://-True-8786] (distributed.comm.tests.test_ws)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
AttributeError: 'Scheduler' object has no attribute 'listener'
dashboard = False, protocol = 'wss://'
security = Security(require_encryption=True, tls_ca_file=Temporary (In-memory), tls_client_cert=Temporary (In-memory), tls_client..., tls_scheduler_key=Temporary (In-memory), tls_worker_cert=Temporary (In-memory), tls_worker_key=Temporary (In-memory))
port = 8786

    @pytest.mark.parametrize(
        "dashboard,protocol,security,port",
        [
            (True, "ws://", None, 8787),
            (True, "wss://", True, 8787),
            (False, "ws://", None, 8787),
            (False, "wss://", True, 8787),
            (True, "ws://", None, 8786),
            (True, "wss://", True, 8786),
            (False, "ws://", None, 8786),
            (False, "wss://", True, 8786),
        ],
    )
    @gen_test()
    async def test_http_and_comm_server(dashboard, protocol, security, port):
        if security:
            xfail_ssl_issue5601()
            pytest.importorskip("cryptography")
            security = Security.temporary()
        async with Scheduler(
            protocol=protocol, dashboard=dashboard, port=port, security=security
        ) as s:
            if port == 8787:
                assert s.http_server is s.listener.server
            else:
>               assert s.http_server is not s.listener.server
E               AttributeError: 'Scheduler' object has no attribute 'listener'

distributed/comm/tests/test_ws.py:162: AttributeError

Check warning on line 0 in distributed.deploy.tests.test_local

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_local_cluster_supports_blocked_handlers (distributed.deploy.tests.test_local)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
assert "'run_function' handler has been explicitly disallowed in Scheduler" in "The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns."
 +  where "The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns." = str(ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns."))
 +    where ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns.") = <ExceptionInfo ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns.") tblen=11>.value
loop = <tornado.platform.asyncio.AsyncIOMainLoop object at 0x7fb7b492b6d0>

    def test_local_cluster_supports_blocked_handlers(loop):
        with LocalCluster(
            blocked_handlers=["run_function"],
            n_workers=0,
            loop=loop,
            dashboard_address=":0",
        ) as c:
            with Client(c) as client:
                with pytest.raises(ValueError) as exc:
                    client.run_on_scheduler(lambda x: x, 42)
    
>       assert "'run_function' handler has been explicitly disallowed in Scheduler" in str(
            exc.value
        )
E       assert "'run_function' handler has been explicitly disallowed in Scheduler" in "The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns."
E        +  where "The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns." = str(ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns."))
E        +    where ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns.") = <ExceptionInfo ValueError("The 'run_function' handler has been explicitly disallowed in Server, possibly due to security concerns.") tblen=11>.value

distributed/deploy/tests/test_local.py:67: AssertionError

Check warning on line 0 in distributed.deploy.tests.test_local

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

1 out of 14 runs failed: test_defaults_5 (distributed.deploy.tests.test_local)

artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]

Check warning on line 0 in distributed.diagnostics.tests.test_install_plugin

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_package_install_restarts_on_nanny (distributed.diagnostics.tests.test_install_plugin)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 30s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 30s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
asyncio.exceptions.TimeoutError: Test timeout (30) hit after 30.00105986699998s.
========== Test stack trace starts here ==========
Stack for <Task pending name='Task-27171' coro=<test_package_install_restarts_on_nanny() running at /home/runner/work/distributed/distributed/distributed/diagnostics/tests/test_install_plugin.py:155> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7fb7b54443d0>()]>> (most recent call last):
  File "/home/runner/work/distributed/distributed/distributed/diagnostics/tests/test_install_plugin.py", line 155, in test_package_install_restarts_on_nanny
    await asyncio.sleep(0.01)
args = (), kwds = {}

    @wraps(func)
    def inner(*args, **kwds):
        with self._recreate_cm():
>           return func(*args, **kwds)

../../../miniconda3/envs/dask-distributed/lib/python3.9/contextlib.py:79: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
../../../miniconda3/envs/dask-distributed/lib/python3.9/contextlib.py:79: in inner
    return func(*args, **kwds)
distributed/utils_test.py:1102: in test_func
    return _run_and_close_tornado(async_fn_outer)
distributed/utils_test.py:378: in _run_and_close_tornado
    return asyncio_run(inner_fn(), loop_factory=get_loop_factory())
distributed/compatibility.py:236: in asyncio_run
    return loop.run_until_complete(main)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/base_events.py:647: in run_until_complete
    return future.result()
distributed/utils_test.py:375: in inner_fn
    return await async_fn(*args, **kwargs)
distributed/utils_test.py:1099: in async_fn_outer
    return await utils_wait_for(async_fn(), timeout=timeout * 2)
distributed/utils.py:1954: in wait_for
    return await asyncio.wait_for(fut, timeout)
../../../miniconda3/envs/dask-distributed/lib/python3.9/asyncio/tasks.py:479: in wait_for
    return fut.result()
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

    async def async_fn():
        result = None
        with dask.config.set(config):
            async with (
                _cluster_factory() as (s, workers),
                _client_factory(s) as c,
            ):
                args = [s] + workers
                if c is not None:
                    args = [c] + args
                try:
                    coro = func(*args, *outer_args, **kwargs)
                    task = asyncio.create_task(coro)
                    coro2 = utils_wait_for(
                        asyncio.shield(task), timeout=deadline.remaining
                    )
                    result = await coro2
                    validate_state(s, *workers)
    
                except asyncio.TimeoutError:
                    assert task
                    elapsed = deadline.elapsed
                    buffer = io.StringIO()
                    # This stack indicates where the coro/test is suspended
                    task.print_stack(file=buffer)
    
                    if cluster_dump_directory:
                        await dump_cluster_state(
                            s=s,
                            ws=workers,
                            output_dir=cluster_dump_directory,
                            func_name=func.__name__,
                        )
    
                    task.cancel()
                    while not task.cancelled():
                        await asyncio.sleep(0.01)
    
                    # Hopefully, the hang has been caused by inconsistent
                    # state, which should be much more meaningful than the
                    # timeout
                    validate_state(s, *workers)
    
                    # Remove as much of the traceback as possible; it's
                    # uninteresting boilerplate from utils_test and asyncio
                    # and not from the code being tested.
>                   raise asyncio.TimeoutError(
                        f"Test timeout ({timeout}) hit after {elapsed}s.\n"
                        "========== Test stack trace starts here ==========\n"
                        f"{buffer.getvalue()}"
                    ) from None
E                   asyncio.exceptions.TimeoutError: Test timeout (30) hit after 30.00105986699998s.
E                   ========== Test stack trace starts here ==========
E                   Stack for <Task pending name='Task-27171' coro=<test_package_install_restarts_on_nanny() running at /home/runner/work/distributed/distributed/distributed/diagnostics/tests/test_install_plugin.py:155> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x7fb7b54443d0>()]>> (most recent call last):
E                     File "/home/runner/work/distributed/distributed/distributed/diagnostics/tests/test_install_plugin.py", line 155, in test_package_install_restarts_on_nanny
E                       await asyncio.sleep(0.01)

distributed/utils_test.py:1041: TimeoutError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_async (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: 'NoneType' object is not subscriptable
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:34615', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:46111', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:39789', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>

    @gen_cluster(client=True)
    async def test_async(c, s, a, b):
        ms = MemorySampler()
        async with ms.sample("foo", measure="managed", interval=0.1):
            f = c.submit(lambda: 1)
            await f
            await asyncio.sleep(0.5)
    
>       assert ms.samples["foo"][0][1] == 0
E       TypeError: 'NoneType' object is not subscriptable

distributed/diagnostics/tests/test_memory_sampler.py:20: TypeError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_sync (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 4s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 4s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 2s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 2s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: 'NoneType' object is not subscriptable
client = <Client: 'tcp://127.0.0.1:46125' processes=2 threads=2, memory=31.21 GiB>

    def test_sync(client):
        ms = MemorySampler()
        with ms.sample("foo", measure="managed", interval=0.1):
            f = client.submit(lambda: 1)
            f.result()
            time.sleep(0.5)
    
>       assert ms.samples["foo"][0][1] == 0
E       TypeError: 'NoneType' object is not subscriptable

distributed/diagnostics/tests/test_memory_sampler.py:34: TypeError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_at_least_one_sample (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 0s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: object of type 'NoneType' has no len()
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:35657', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:39547', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:42489', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>

    @gen_cluster(client=True)  # MemorySampler internally fetches the client
    async def test_at_least_one_sample(c, s, a, b):
        """The first sample is taken immediately
        Also test omitting the label
        """
        ms = MemorySampler()
        async with ms.sample():
            pass
>       assert len(next(iter(ms.samples.values()))) == 1
E       TypeError: object of type 'NoneType' has no len()

distributed/diagnostics/tests/test_memory_sampler.py:46: TypeError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

All 14 runs failed: test_multi_sample (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-numpy-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: object of type 'NoneType' has no len()
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:45357', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:32833', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:43797', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>

    @pytest.mark.slow
    @gen_cluster(client=True)
    async def test_multi_sample(c, s, a, b):
        ms = MemorySampler()
        s1 = ms.sample("managed", measure="managed", interval=0.15)
        s2 = ms.sample("process", interval=0.2)
        async with s1, s2:
            idle_mem = s.memory.process
            f = c.submit(lambda: "x" * 100 * 2**20)  # 100 MiB
            await f
            while s.memory.process < idle_mem + 80 * 2**20:
                # Wait for heartbeat
                await asyncio.sleep(0.01)
            await asyncio.sleep(0.6)
    
        m = ms.samples["managed"]
        p = ms.samples["process"]
>       assert len(m) >= 2
E       TypeError: object of type 'NoneType' has no len()

distributed/diagnostics/tests/test_memory_sampler.py:66: TypeError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

12 out of 14 runs failed: test_pandas_multiseries[False] (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: 'NoneType' object is not subscriptable
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:35367', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:36039', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:35981', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
align = False

    @pytest.mark.slow
    @gen_cluster(client=True)
    @pytest.mark.parametrize("align", [False, True])
    async def test_pandas_multiseries(c, s, a, b, align):
        """Test that multiple series are upsampled and aligned to each other"""
        pd = pytest.importorskip("pandas")
    
        ms = MemorySampler()
        for label, interval, final_sleep in (("foo", 0.15, 1.0), ("bar", 0.2, 0.6)):
            async with ms.sample(label, measure="managed", interval=interval):
                x = c.submit(lambda: 1, key="x")
                await x
                await asyncio.sleep(final_sleep)
            del x
            while "x" in s.tasks:
                await asyncio.sleep(0.01)
    
        for label in ("foo", "bar"):
>           assert ms.samples[label][0][1] == 0
E           TypeError: 'NoneType' object is not subscriptable

distributed/diagnostics/tests/test_memory_sampler.py:126: TypeError

Check warning on line 0 in distributed.diagnostics.tests.test_memory_sampler

See this annotation in the file changed.

@github-actions github-actions / Unit Test Results

12 out of 14 runs failed: test_pandas_multiseries[True] (distributed.diagnostics.tests.test_memory_sampler)

artifacts/macos-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.10-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.11-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.12-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-default-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_expr-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-3.9-no_queue-notci1/pytest.xml [took 1s]
artifacts/ubuntu-latest-mindeps-pandas-notci1/pytest.xml [took 1s]
artifacts/windows-latest-3.10-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.11-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.12-default-notci1/pytest.xml [took 0s]
artifacts/windows-latest-3.9-default-notci1/pytest.xml [took 0s]
Raw output
TypeError: 'NoneType' object is not subscriptable
c = <Client: No scheduler connected>
s = <Scheduler 'tcp://127.0.0.1:43487', workers: 0, cores: 0, tasks: 0>
a = <Worker 'tcp://127.0.0.1:35337', name: 0, status: closed, stored: 0, running: 0/1, ready: 0, comm: 0, waiting: 0>
b = <Worker 'tcp://127.0.0.1:33983', name: 1, status: closed, stored: 0, running: 0/2, ready: 0, comm: 0, waiting: 0>
align = True

    @pytest.mark.slow
    @gen_cluster(client=True)
    @pytest.mark.parametrize("align", [False, True])
    async def test_pandas_multiseries(c, s, a, b, align):
        """Test that multiple series are upsampled and aligned to each other"""
        pd = pytest.importorskip("pandas")
    
        ms = MemorySampler()
        for label, interval, final_sleep in (("foo", 0.15, 1.0), ("bar", 0.2, 0.6)):
            async with ms.sample(label, measure="managed", interval=interval):
                x = c.submit(lambda: 1, key="x")
                await x
                await asyncio.sleep(final_sleep)
            del x
            while "x" in s.tasks:
                await asyncio.sleep(0.01)
    
        for label in ("foo", "bar"):
>           assert ms.samples[label][0][1] == 0
E           TypeError: 'NoneType' object is not subscriptable

distributed/diagnostics/tests/test_memory_sampler.py:126: TypeError