-
Notifications
You must be signed in to change notification settings - Fork 33
fix: prevent deadlock during Wayland client destruction #819
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,7 +1,7 @@ | ||
| // Copyright (C) 2023 JiDe Zhang <zhangjide@deepin.org>. | ||
| // Copyright (C) 2023-2026 JiDe Zhang <zhangjide@deepin.org>. | ||
| // SPDX-License-Identifier: Apache-2.0 OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only | ||
|
|
||
| #include <QObject> | ||
| #define private public | ||
| #include <QCoreApplication> | ||
| #include <private/qhighdpiscaling_p.h> | ||
|
|
@@ -23,12 +23,13 @@ | |
| #include <QEvent> | ||
| #include <QCoreApplication> | ||
| #include <QAbstractEventDispatcher> | ||
| #include <QSocketNotifier> | ||
| #include <QMutex> | ||
| #include <QDebug> | ||
| #include <QScopedValueRollback> | ||
| #include <QProcess> | ||
| #include <QLocalServer> | ||
| #include <QLocalSocket> | ||
| #include <private/qthread_p.h> | ||
| #include <private/qguiapplication_p.h> | ||
| #include <qpa/qplatformthemefactory_p.h> | ||
|
|
@@ -104,18 +105,26 @@ | |
| loop = wl_display_get_event_loop(display->handle()); | ||
| int fd = wl_event_loop_get_fd(loop); | ||
|
|
||
| auto processWaylandEvents = [this] { | ||
| sockNot.reset(new QSocketNotifier(fd, QSocketNotifier::Read)); | ||
| QObject::connect(sockNot.get(), &QSocketNotifier::activated, q, [this] { | ||
| if (isProcessingEvents) | ||
| return; | ||
|
|
||
| QScopedValueRollback<bool> guard(isProcessingEvents, true); | ||
|
|
||
| int ret = wl_event_loop_dispatch(loop, 0); | ||
| if (ret) | ||
| fprintf(stderr, "wl_event_loop_dispatch error: %d\n", ret); | ||
| wl_display_flush_clients(display->handle()); | ||
| }; | ||
|
|
||
| sockNot.reset(new QSocketNotifier(fd, QSocketNotifier::Read)); | ||
| QObject::connect(sockNot.get(), &QSocketNotifier::activated, q, processWaylandEvents); | ||
| }); | ||
|
|
||
| // Match upstream wl_display_run order: flush before dispatch. | ||
| QAbstractEventDispatcher *dispatcher = QThread::currentThread()->eventDispatcher(); | ||
| QObject::connect(dispatcher, &QAbstractEventDispatcher::aboutToBlock, q, processWaylandEvents); | ||
| QObject::connect(dispatcher, &QAbstractEventDispatcher::aboutToBlock, q, [this] { | ||
| if (isProcessingEvents) | ||
| return; | ||
|
|
||
| safeFlushClients(); | ||
| }); | ||
|
|
||
| for (auto socket : std::as_const(sockets)) | ||
| initSocket(socket); | ||
|
|
@@ -127,6 +136,12 @@ | |
| { | ||
| W_Q(WServer); | ||
|
|
||
| // Disconnect event handlers BEFORE destroying clients to prevent | ||
| // callbacks from firing during client destruction. | ||
| sockNot.reset(); | ||
| if (auto *dispatcher = QThread::currentThread()->eventDispatcher()) | ||
| QObject::disconnect(dispatcher, nullptr, q, nullptr); | ||
|
|
||
| if (display) | ||
| wl_display_destroy_clients(*display); | ||
|
|
||
|
|
@@ -137,9 +152,22 @@ | |
| (*i)->destroy(q); | ||
| delete *i; | ||
| } | ||
| } | ||
|
|
||
| sockNot.reset(); | ||
| QThread::currentThread()->eventDispatcher()->disconnect(q); | ||
| // Replace wl_display_flush_clients: its wl_list_for_each_safe loop deadlocks | ||
| // when destroy-signal cascades make the pre-saved next node self-linked. | ||
| void WServerPrivate::safeFlushClients() | ||
| { | ||
| struct wl_list *head = wl_display_get_client_list(display->handle()); | ||
|
Comment on lines
+159
to
+161
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. issue (bug_risk): Guard against Since |
||
| struct wl_list *node = head->next; | ||
| while (node != head) { | ||
| // Self-linked node: already destroyed, stop to avoid infinite loop. | ||
| if (node->next == node) | ||
| break; | ||
| struct wl_list *next = node->next; | ||
| wl_client_flush(wl_client_from_link(node)); | ||
| node = next; | ||
| } | ||
| } | ||
|
|
||
| void WServerPrivate::initSocket(WSocket *socketServer) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
issue (complexity): Consider extracting the shared re-entrancy guard into a helper (e.g., withEventProcessingGuard) to centralize the logic and avoid duplicated checks in both lambdas.
You can factor out the re-entrancy guard to reduce duplication and make the control flow clearer without changing behavior.
Right now both lambdas do:
You can keep the current semantics (activated → dispatch only, aboutToBlock → flush only) but centralize the guard logic:
Then wire the handlers as:
This:
isProcessingEventschecks andQScopedValueRollbackboilerplate.