Skip to content
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

- Per-instance unmaskView propagates to child views (#7733)
- **Warning:** If you relied on children of an unmasked view still being individually redacted, verify your Session Replay redaction after updating. An explicit `maskView(_:)` on a descendant still takes precedence.
- Eliminate binary image cache lock contention during SDK startup (#7766)

## 9.8.0

Expand Down
86 changes: 52 additions & 34 deletions Sources/SentryCrash/Recording/SentryCrashBinaryImageCache.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include <mach-o/dyld.h>
#include <mach-o/dyld_images.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
Expand Down Expand Up @@ -66,22 +67,20 @@ typedef struct SentryCrashBinaryImageNode {
struct SentryCrashBinaryImageNode *next;
} SentryCrashBinaryImageNode;

static SentryCrashBinaryImageNode rootNode = { 0 };
static SentryCrashBinaryImageNode *tailNode = NULL;
static pthread_mutex_t binaryImagesMutex = PTHREAD_MUTEX_INITIALIZER;

static sentrycrashbic_cacheChangeCallback imageAddedCallback = NULL;
static sentrycrashbic_cacheChangeCallback imageRemovedCallback = NULL;
static SentryCrashBinaryImageNode rootNode = { 0 };
static _Atomic(SentryCrashBinaryImageNode *) tailNode = NULL;
static _Atomic(sentrycrashbic_cacheChangeCallback) imageAddedCallback = NULL;
static _Atomic(sentrycrashbic_cacheChangeCallback) imageRemovedCallback = NULL;

static void
binaryImageAdded(const struct mach_header *header, intptr_t slide)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode == NULL) {
pthread_mutex_unlock(&binaryImagesMutex);
// Quick check without the mutex β€” tailNode is atomic so this is safe.
// If cache was stopped (NULL), skip all work including dladdr.
if (atomic_load_explicit(&tailNode, memory_order_acquire) == NULL) {
return;
}
pthread_mutex_unlock(&binaryImagesMutex);
Dl_info info;
if (!dladdr(header, &info) || info.dli_fname == NULL) {
return;
Expand All @@ -99,18 +98,23 @@ binaryImageAdded(const struct mach_header *header, intptr_t slide)
newNode->next = NULL;
_will_add_image();
pthread_mutex_lock(&binaryImagesMutex);
// Recheck tailNode as it could be null when
// stopped from another thread.
if (tailNode != NULL) {
tailNode->next = newNode;
tailNode = tailNode->next;
// Recheck tailNode under mutex β€” it could have been set to NULL by
// stopCache() between our atomic check above and this point.
SentryCrashBinaryImageNode *currentTail = atomic_load_explicit(&tailNode, memory_order_relaxed);
if (currentTail != NULL) {
currentTail->next = newNode;
atomic_store_explicit(&tailNode, newNode, memory_order_release);
} else {
free(newNode);
newNode = NULL;
}
pthread_mutex_unlock(&binaryImagesMutex);
if (newNode && imageAddedCallback) {
imageAddedCallback(&newNode->image);
if (newNode) {
sentrycrashbic_cacheChangeCallback addCb
= atomic_load_explicit(&imageAddedCallback, memory_order_acquire);
if (addCb) {
addCb(&newNode->image);
}
}
}

Expand All @@ -122,8 +126,10 @@ binaryImageRemoved(const struct mach_header *header, intptr_t slide)
while (nextNode != NULL) {
if (nextNode->image.address == (uint64_t)header) {
nextNode->available = false;
if (imageRemovedCallback) {
imageRemovedCallback(&nextNode->image);
sentrycrashbic_cacheChangeCallback rmCb
Comment thread
denrase marked this conversation as resolved.
Outdated
= atomic_load_explicit(&imageRemovedCallback, memory_order_acquire);
if (rmCb) {
rmCb(&nextNode->image);
}
break;
}
Expand All @@ -143,7 +149,7 @@ sentrycrashbic_iterateOverImages(sentrycrashbic_imageIteratorCallback callback,

// If tailNode is null it means the cache was stopped, therefore we end the iteration.
// This will minimize any race condition effect without the need for locks.
while (nextNode != NULL && tailNode != NULL) {
while (nextNode != NULL && atomic_load_explicit(&tailNode, memory_order_acquire) != NULL) {
if (nextNode->available) {
callback(&nextNode->image, context);
}
Expand Down Expand Up @@ -192,7 +198,7 @@ void
sentrycrashbic_startCache(void)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode != NULL) {
if (atomic_load_explicit(&tailNode, memory_order_relaxed) != NULL) {
// Already initialized
pthread_mutex_unlock(&binaryImagesMutex);
return;
Expand All @@ -201,10 +207,10 @@ sentrycrashbic_startCache(void)
if (sentrycrashbic_shouldAddDyld()) {
sentrycrashdl_initialize();
SentryCrashBinaryImageNode *dyldNode = sentrycrashbic_getDyldNode();
tailNode = dyldNode;
atomic_store_explicit(&tailNode, dyldNode, memory_order_release);
rootNode.next = dyldNode;
} else {
tailNode = &rootNode;
atomic_store_explicit(&tailNode, &rootNode, memory_order_release);
rootNode.next = NULL;
}
pthread_mutex_unlock(&binaryImagesMutex);
Expand All @@ -219,14 +225,14 @@ void
sentrycrashbic_stopCache(void)
{
pthread_mutex_lock(&binaryImagesMutex);
if (tailNode == NULL) {
if (atomic_load_explicit(&tailNode, memory_order_relaxed) == NULL) {
pthread_mutex_unlock(&binaryImagesMutex);
return;
}

SentryCrashBinaryImageNode *node = rootNode.next;
rootNode.next = NULL;
tailNode = NULL;
atomic_store_explicit(&tailNode, NULL, memory_order_release);

while (node != NULL) {
SentryCrashBinaryImageNode *nextNode = node->next;
Expand All @@ -237,26 +243,38 @@ sentrycrashbic_stopCache(void)
pthread_mutex_unlock(&binaryImagesMutex);
}

static void
initialReportToCallback(SentryCrashBinaryImage *image, void *context)
{
sentrycrashbic_cacheChangeCallback callback = (sentrycrashbic_cacheChangeCallback)context;
callback(image);
}

void
sentrycrashbic_registerAddedCallback(sentrycrashbic_cacheChangeCallback callback)
{
imageAddedCallback = callback;
atomic_store_explicit(&imageAddedCallback, callback, memory_order_release);
if (callback) {
// Snapshot the current tail under the mutex, then iterate outside it.
// This avoids holding the mutex for the entire iteration, which would
// block any concurrent binaryImageAdded() calls (e.g. from dlopen on
// another thread during SDK startup). Nodes are never freed during
// normal operation, so the snapshot pointer remains valid.
pthread_mutex_lock(&binaryImagesMutex);
sentrycrashbic_iterateOverImages(&initialReportToCallback, callback);
SentryCrashBinaryImageNode *snapshotTail
= atomic_load_explicit(&tailNode, memory_order_acquire);
Comment thread
cursor[bot] marked this conversation as resolved.
pthread_mutex_unlock(&binaryImagesMutex);

if (snapshotTail != NULL) {
SentryCrashBinaryImageNode *node = &rootNode;
while (node != NULL && atomic_load_explicit(&tailNode, memory_order_acquire) != NULL) {
if (node->available) {
callback(&node->image);
}
if (node == snapshotTail) {
break;
}
node = node->next;
}
Comment thread
sentry[bot] marked this conversation as resolved.
Comment thread
cursor[bot] marked this conversation as resolved.
}
}
}

void
sentrycrashbic_registerRemovedCallback(sentrycrashbic_cacheChangeCallback callback)
{
imageRemovedCallback = callback;
atomic_store_explicit(&imageRemovedCallback, callback, memory_order_release);
}
81 changes: 44 additions & 37 deletions Sources/Swift/Core/Helper/SentryBinaryImageCache.swift
Original file line number Diff line number Diff line change
Expand Up @@ -27,40 +27,45 @@ import Foundation
*/
@objc(SentryBinaryImageCache)
@_spi(Private) public final class SentryBinaryImageCache: NSObject {
@objc public internal(set) var cache: [SentryBinaryImageInfo]?
private var cache: [SentryBinaryImageInfo]?
private var isDebug: Bool = false
// Use a recursive lock to allow the same thread to enter again
private let lock = NSRecursiveLock()
private let lock = NSLock()
/// When true, `cache` has unsorted appends and must be sorted before binary search.
private var needsSort: Bool = false

@objc public func start(_ isDebug: Bool) {
lock.synchronized {
self.isDebug = isDebug
self.cache = []
sentrycrashbic_registerAddedCallback { imagePtr in
guard let imagePtr else {
SentrySDKLog.warning("The image is NULL. Can't add NULL to cache.")
return
}
let image = imagePtr.pointee
SentryDependencyContainer.sharedInstance().binaryImageCache.binaryImageAdded(
imageName: image.name, vmAddress: image.vmAddress, address: image.address, size: image.size, uuid: image.uuid)
self.needsSort = false
}

// Register callbacks outside the lock β€” they acquire it independently.
sentrycrashbic_registerAddedCallback { imagePtr in
guard let imagePtr else {
SentrySDKLog.warning("The image is NULL. Can't add NULL to cache.")
return
}
sentrycrashbic_registerRemovedCallback { imagePtr in
guard let imagePtr else {
SentrySDKLog.warning("The image is NULL. Can't add NULL to cache.")
return
}
let image = imagePtr.pointee
SentryDependencyContainer.sharedInstance().binaryImageCache.binaryImageRemoved(image.address)
let image = imagePtr.pointee
SentryDependencyContainer.sharedInstance().binaryImageCache.binaryImageAdded(
imageName: image.name, vmAddress: image.vmAddress, address: image.address, size: image.size, uuid: image.uuid)
}
sentrycrashbic_registerRemovedCallback { imagePtr in
guard let imagePtr else {
SentrySDKLog.warning("The image is NULL. Can't add NULL to cache.")
return
}
let image = imagePtr.pointee
SentryDependencyContainer.sharedInstance().binaryImageCache.binaryImageRemoved(image.address)
}
}

@objc public func stop() {
sentrycrashbic_registerAddedCallback(nil)
sentrycrashbic_registerRemovedCallback(nil)
lock.synchronized {
sentrycrashbic_registerAddedCallback(nil)
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Non-atomic start/stop creates callback-cache state mismatch

Medium Severity

Splitting callback registration out of lock.synchronized in start() and stop() creates race windows. If stop() runs between start()'s lock release and callback registration, the callbacks get registered after stop() already cleared cache to nil. The callbacks remain active but guard self.cache != nil causes all images to be silently dropped, leaving a permanently empty cache. The previous code was atomicβ€”both operations were inside one lock.synchronized block. Since SentrySDK.close() can be called from any thread, this is reachable in production.

Additional Locations (1)
Fix in CursorΒ Fix in Web

sentrycrashbic_registerRemovedCallback(nil)
self.cache = nil
self.needsSort = false
}
}

Expand Down Expand Up @@ -88,27 +93,17 @@ import Foundation
size: size
)

var shouldValidate = false
lock.synchronized {
guard let cache = self.cache else { return }

// Binary search insertion to maintain sorted order by address
var left = 0
var right = cache.count
guard self.cache != nil else { return }

while left < right {
let mid = (left + right) / 2
let compareImage = cache[mid]
if newImage.address < compareImage.address {
right = mid
} else {
left = mid + 1
}
}

self.cache?.insert(newImage, at: left)
// O(1) append β€” sorting is deferred until a lookup or removal needs it.
self.cache?.append(newImage)
self.needsSort = true
shouldValidate = self.isDebug
}

if isDebug {
if shouldValidate {
// This validation adds some overhead with each class present in the image, so we only
// run this when debug is enabled. A non main queue is used to avoid affecting the UI.
LoadValidator.checkForDuplicatedSDK(imageName: nameString,
Expand All @@ -131,6 +126,7 @@ import Foundation
@objc
public func binaryImageRemoved(_ imageAddress: UInt64) {
lock.synchronized {
sortIfNeeded()
guard let index = indexOfImage(address: imageAddress) else { return }
self.cache?.remove(at: index)
}
Expand All @@ -139,11 +135,20 @@ import Foundation
@objc
public func imageByAddress(_ address: UInt64) -> SentryBinaryImageInfo? {
lock.synchronized {
sortIfNeeded()
guard let index = indexOfImage(address: address) else { return nil }
return cache?[index]
}
}

/// Sorts the cache if new elements have been appended since the last sort.
/// Must be called while holding `lock`.
private func sortIfNeeded() {
guard needsSort else { return }
self.cache?.sort { $0.address < $1.address }
needsSort = false
}

private func indexOfImage(address: UInt64) -> Int? {
guard let cache = self.cache else { return nil }

Expand All @@ -169,6 +174,7 @@ import Foundation
@objc(imagePathsForInAppInclude:)
public func imagePathsFor(inAppInclude: String) -> Set<String> {
lock.synchronized {
// No sort needed β€” this iterates all entries regardless of order.
var imagePaths = Set<String>()

guard let cache = self.cache else { return imagePaths }
Expand All @@ -185,6 +191,7 @@ import Foundation
@objc
public func getAllBinaryImages() -> [SentryBinaryImageInfo] {
lock.synchronized {
sortIfNeeded()
return cache ?? []
}
}
Expand Down
2 changes: 1 addition & 1 deletion Tests/SentryTests/Helper/EnvelopeComparison.swift
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ extension Data {
// Used to support iOS < 16.0
func backwardCompatibleSplit(separator: Data) -> [Data] {
// When the `split` function is available all we need to do is call it.
if #available(iOS 16.0, tvOS 16.0, watchOS 16.0, macOS 13.0, *) {
if #available(iOS 16.0, tvOS 16.0, watchOS 9.0, macOS 13.0, *) {
return split(separator: separator)
}

Expand Down
Loading
Loading