From 6093cb093dc924cd19aebbb99856aeb7b1e989fa Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 11:07:36 -0700 Subject: [PATCH 1/7] feat: Update createImageBitmap() to accept ArrayBuffer or Blob On Android, React Native 0.82 and later suffers from a serious open issue where `file://` URIs to local assets cannot be loaded with `fetch()`: https://github.com/facebook/react-native/issues/54626 That means it's very difficult to load bundled texture assets to use with `react-native-wgpu` in release builds on Android. Develompent builds work fine, since assets are loaded over HTTP, but release builds directly include assets which resolve to local `file://` URIs like `file:///android_res/drawable/assets_textures_foo.jpg`. Because `react-native-wgpu`'s `createImageBitmap()` currently requires a `Blob`, there's no good workaround for the React Native issue, since the only thing which can create `Blob`s is `fetch()`. (Note: I tried using the workaround from https://github.com/expo/expo/issues/2402#issuecomment-443726662 to create an `XMLHttpRequest` to fetch the `file://` URI, but it fails in React Native 0.82 and later as well.) To resolve this issue, this PR extends `react-native-wgpu`'s `createImageBitmap()` to accept either `Blob` *or* `ArrayBuffer`s containing encoded image bytes (PNG/JPEG/etc.). In specific, this PR: - Upgrades the C++ version to C++20 for `std::span` support to avoid copies - Adds `createImageBitmapFromData(std::span)` to `PlatformContext` - Refactors the existing `Blob` codepaths to resolve the `Blob` to bytes, then use the `std::span` codepath - Detects `ArrayBuffer` vs `Blob` in `RNWebGPU::createImageBitmap` at runtime - Adds a TypeScript global overload to allow calling `createImageBitmap(ArrayBuffer)` - Updates the `TexturedCube` sample to use the new `ArrayBuffer` codepath I tested this by running the Android and iOS samples in the simulator and on device. I did use Claude Code to help draft this PR, but then edited the code myself and tested it by hand (I promise I'm a human being). --- apps/example/package.json | 2 +- apps/example/src/Cube/TexturedCube.tsx | 11 +- packages/webgpu/android/CMakeLists.txt | 3 +- packages/webgpu/android/build.gradle | 2 +- .../android/cpp/AndroidPlatformContext.h | 231 +++++++----------- packages/webgpu/apple/ApplePlatformContext.h | 6 + packages/webgpu/apple/ApplePlatformContext.mm | 127 +++++----- packages/webgpu/cpp/rnwgpu/PlatformContext.h | 8 + packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h | 48 +++- packages/webgpu/react-native-wgpu.podspec | 2 +- .../scripts/build/apple.toolchain.cmake | 4 +- packages/webgpu/src/index.tsx | 5 + 12 files changed, 226 insertions(+), 223 deletions(-) diff --git a/apps/example/package.json b/apps/example/package.json index c7aebf8f1..40955f341 100644 --- a/apps/example/package.json +++ b/apps/example/package.json @@ -12,7 +12,7 @@ "pod:install:ios": "pod install --project-directory=ios", "pod:install:macos": "pod install --project-directory=macos", "build:android": "cd android && ./gradlew assembleDebug --warning-mode all", - "build:ios": "react-native build-ios --scheme Example --mode Debug --extra-params \"-sdk iphonesimulator CC=clang CPLUSPLUS=clang++ LD=clang LDPLUSPLUS=clang++ GCC_OPTIMIZATION_LEVEL=0 GCC_PRECOMPILE_PREFIX_HEADER=YES ASSETCATALOG_COMPILER_OPTIMIZATION=time DEBUG_INFORMATION_FORMAT=dwarf COMPILER_INDEX_STORE_ENABLE=NO\"", + "build:ios": "react-native build-ios --scheme Example --mode Debug --extra-params \"-sdk iphonesimulator CC=clang CPLUSPLUS=clang++ LD=clang LDPLUSPLUS=clang++ GCC_OPTIMIZATION_LEVEL=0 GCC_PRECOMPILE_PREFIX_HEADER=YES ASSETCATALOG_COMPILER_OPTIMIZATION=time DEBUG_INFORMATION_FORMAT=dwarf COMPILER_INDEX_STORE_ENABLE=NO CLANG_CXX_LANGUAGE_STANDARD=c++20\"", "build:macos": "react-native build-macos --scheme Example --mode Debug", "mkdist": "node -e \"require('node:fs').mkdirSync('dist', { recursive: true, mode: 0o755 })\"", "postinstall": "node -e \"if (process.platform !== 'darwin') { console.log('Skipping iOS pod install on non-macOS environment.'); process.exit(0); } const { execSync } = require('child_process'); execSync('yarn pod:install:ios', { stdio: 'inherit' });\"" diff --git a/apps/example/src/Cube/TexturedCube.tsx b/apps/example/src/Cube/TexturedCube.tsx index 50e34b1ac..7a8475bc5 100644 --- a/apps/example/src/Cube/TexturedCube.tsx +++ b/apps/example/src/Cube/TexturedCube.tsx @@ -102,9 +102,10 @@ export const TexturedCube = () => { // Fetch the image and upload it into a GPUTexture. let cubeTexture: GPUTexture; - { - const response = await fetchAsset(require("../assets/Di-3d.png")); - const imageBitmap = await createImageBitmap(await response.blob()); + try { + const asset = await fetchAsset(require("../assets/Di-3d.png")); + const arrayBuffer = await asset.arrayBuffer(); + const imageBitmap = await createImageBitmap(arrayBuffer); cubeTexture = device.createTexture({ size: [imageBitmap.width, imageBitmap.height, 1], format: "rgba8unorm", @@ -116,8 +117,10 @@ export const TexturedCube = () => { device.queue.copyExternalImageToTexture( { source: imageBitmap }, { texture: cubeTexture }, - [imageBitmap.width, imageBitmap.height], + [imageBitmap.width, imageBitmap.height] ); + } catch (err) { + console.error("Failed to fetch asset", err); } // Create a sampler with linear filtering for smooth interpolation. diff --git a/packages/webgpu/android/CMakeLists.txt b/packages/webgpu/android/CMakeLists.txt index cd198bd7d..6e7488b87 100644 --- a/packages/webgpu/android/CMakeLists.txt +++ b/packages/webgpu/android/CMakeLists.txt @@ -2,7 +2,8 @@ cmake_minimum_required(VERSION 3.4.1) project(RNWGPU) set (CMAKE_VERBOSE_MAKEFILE ON) -set (CMAKE_CXX_STANDARD 17) +set (CMAKE_CXX_STANDARD 20) +set (CMAKE_CXX_STANDARD_REQUIRED True) set (PACKAGE_NAME "react-native-wgpu") diff --git a/packages/webgpu/android/build.gradle b/packages/webgpu/android/build.gradle index 51205e3d3..460adc44d 100644 --- a/packages/webgpu/android/build.gradle +++ b/packages/webgpu/android/build.gradle @@ -71,7 +71,7 @@ android { buildConfigField "boolean", "IS_NEW_ARCHITECTURE_ENABLED", "true" externalNativeBuild { cmake { - cppFlags "-fexceptions", "-frtti", "-std=c++1y", "-DONANDROID" + cppFlags "-fexceptions", "-frtti", "-DONANDROID" abiFilters (*reactNativeArchitectures()) arguments '-DANDROID_STL=c++_shared', "-DNODE_MODULES_DIR=${nodeModules}", diff --git a/packages/webgpu/android/cpp/AndroidPlatformContext.h b/packages/webgpu/android/cpp/AndroidPlatformContext.h index 7fe122f04..473cea690 100644 --- a/packages/webgpu/android/cpp/AndroidPlatformContext.h +++ b/packages/webgpu/android/cpp/AndroidPlatformContext.h @@ -23,6 +23,41 @@ class AndroidPlatformContext : public PlatformContext { private: jobject _blobModule; + std::vector resolveBlob(JNIEnv *env, const std::string &blobId, + double offset, double size) { + if (!_blobModule) { + throw std::runtime_error("BlobModule instance is null"); + } + + jclass blobModuleClass = env->GetObjectClass(_blobModule); + if (!blobModuleClass) { + throw std::runtime_error("Couldn't find BlobModule class"); + } + + jmethodID resolveMethod = env->GetMethodID(blobModuleClass, "resolve", + "(Ljava/lang/String;II)[B"); + if (!resolveMethod) { + throw std::runtime_error("Couldn't find resolve method in BlobModule"); + } + + jstring jBlobId = env->NewStringUTF(blobId.c_str()); + jbyteArray blobData = (jbyteArray)env->CallObjectMethod( + _blobModule, resolveMethod, jBlobId, static_cast(offset), + static_cast(size)); + env->DeleteLocalRef(jBlobId); + + if (!blobData) { + throw std::runtime_error("Couldn't retrieve blob data"); + } + + jsize len = env->GetArrayLength(blobData); + std::vector data(len); + env->GetByteArrayRegion(blobData, 0, len, + reinterpret_cast(data.data())); + env->DeleteLocalRef(blobData); + return data; + } + public: explicit AndroidPlatformContext(jobject blobModule) : _blobModule(blobModule) {} @@ -52,188 +87,106 @@ class AndroidPlatformContext : public PlatformContext { throw std::runtime_error("Couldn't get JNI environment"); } - // Use the BlobModule instance from _blobModule - if (!_blobModule) { - throw std::runtime_error("BlobModule instance is null"); - } + auto data = resolveBlob(env, blobId, offset, size); + return createImageBitmapFromData(data); + } - // Get the resolve method ID - jclass blobModuleClass = env->GetObjectClass(_blobModule); - if (!blobModuleClass) { - throw std::runtime_error("Couldn't find BlobModule class"); - } + void createImageBitmapAsync( + std::string blobId, double offset, double size, + std::function onSuccess, + std::function onError) override { + std::thread([this, blobId = std::move(blobId), offset, size, + onSuccess = std::move(onSuccess), + onError = std::move(onError)]() { + jni::Environment::ensureCurrentThreadIsAttached(); + try { + JNIEnv *env = facebook::jni::Environment::current(); + if (!env) { + throw std::runtime_error("Couldn't get JNI environment"); + } + auto data = resolveBlob(env, blobId, offset, size); + auto result = createImageBitmapFromData(data); + onSuccess(std::move(result)); + } catch (const std::exception &e) { + onError(e.what()); + } + }).detach(); + } - jmethodID resolveMethod = env->GetMethodID(blobModuleClass, "resolve", - "(Ljava/lang/String;II)[B"); - if (!resolveMethod) { - throw std::runtime_error("Couldn't find resolve method in BlobModule"); - } + ImageData createImageBitmapFromData(std::span data) override { + jni::Environment::ensureCurrentThreadIsAttached(); - // Resolve the blob data - jstring jBlobId = env->NewStringUTF(blobId.c_str()); - jbyteArray blobData = (jbyteArray)env->CallObjectMethod( - _blobModule, resolveMethod, jBlobId, static_cast(offset), - static_cast(size)); - env->DeleteLocalRef(jBlobId); + JNIEnv *env = facebook::jni::Environment::current(); + if (!env) { + throw std::runtime_error("Couldn't get JNI environment"); + } - if (!blobData) { - throw std::runtime_error("Couldn't retrieve blob data"); + // Create jbyteArray from the raw bytes + jbyteArray byteArray = env->NewByteArray(static_cast(data.size())); + if (!byteArray) { + throw std::runtime_error("Couldn't allocate byte array"); } + env->SetByteArrayRegion(byteArray, 0, static_cast(data.size()), + reinterpret_cast(data.data())); - // Create a Bitmap from the blob data + // Decode via BitmapFactory jclass bitmapFactoryClass = env->FindClass("android/graphics/BitmapFactory"); jmethodID decodeByteArrayMethod = env->GetStaticMethodID(bitmapFactoryClass, "decodeByteArray", "([BII)Landroid/graphics/Bitmap;"); - jint blobLength = env->GetArrayLength(blobData); + jint length = static_cast(data.size()); jobject bitmap = env->CallStaticObjectMethod( - bitmapFactoryClass, decodeByteArrayMethod, blobData, 0, blobLength); + bitmapFactoryClass, decodeByteArrayMethod, byteArray, 0, length); if (!bitmap) { - env->DeleteLocalRef(blobData); + env->DeleteLocalRef(byteArray); throw std::runtime_error("Couldn't decode image"); } - // Get bitmap info AndroidBitmapInfo bitmapInfo; if (AndroidBitmap_getInfo(env, bitmap, &bitmapInfo) != ANDROID_BITMAP_RESULT_SUCCESS) { - env->DeleteLocalRef(blobData); + env->DeleteLocalRef(byteArray); env->DeleteLocalRef(bitmap); throw std::runtime_error("Couldn't get bitmap info"); } - // Lock the bitmap pixels void *bitmapPixels; if (AndroidBitmap_lockPixels(env, bitmap, &bitmapPixels) != ANDROID_BITMAP_RESULT_SUCCESS) { - env->DeleteLocalRef(blobData); + env->DeleteLocalRef(byteArray); env->DeleteLocalRef(bitmap); throw std::runtime_error("Couldn't lock bitmap pixels"); } - // Copy the bitmap data - std::vector imageData(bitmapInfo.height * bitmapInfo.stride); - memcpy(imageData.data(), bitmapPixels, imageData.size()); + ImageData result; + result.width = static_cast(bitmapInfo.width); + result.height = static_cast(bitmapInfo.height); + result.data.resize(bitmapInfo.height * bitmapInfo.stride); + memcpy(result.data.data(), bitmapPixels, result.data.size()); - // Unlock the bitmap pixels AndroidBitmap_unlockPixels(env, bitmap); - // Clean up JNI references - env->DeleteLocalRef(blobData); + env->DeleteLocalRef(byteArray); env->DeleteLocalRef(bitmap); - ImageData result; - result.width = static_cast(bitmapInfo.width); - result.height = static_cast(bitmapInfo.height); - result.data = imageData; return result; } - void createImageBitmapAsync( - std::string blobId, double offset, double size, - std::function onSuccess, + void createImageBitmapFromDataAsync( + std::span data, std::function onSuccess, std::function onError) override { - // Capture blobModule for the background thread - jobject blobModule = _blobModule; - - // Dispatch to a background thread - std::thread([blobModule, blobId = std::move(blobId), offset, size, + std::thread([this, ownedData = std::vector(data.begin(), data.end()), onSuccess = std::move(onSuccess), - onError = std::move(onError)]() { + onError = std::move(onError)]() mutable { jni::Environment::ensureCurrentThreadIsAttached(); - - JNIEnv *env = facebook::jni::Environment::current(); - if (!env) { - onError("Couldn't get JNI environment"); - return; - } - - if (!blobModule) { - onError("BlobModule instance is null"); - return; - } - - // Get the resolve method ID - jclass blobModuleClass = env->GetObjectClass(blobModule); - if (!blobModuleClass) { - onError("Couldn't find BlobModule class"); - return; - } - - jmethodID resolveMethod = env->GetMethodID(blobModuleClass, "resolve", - "(Ljava/lang/String;II)[B"); - if (!resolveMethod) { - onError("Couldn't find resolve method in BlobModule"); - return; - } - - // Resolve the blob data - jstring jBlobId = env->NewStringUTF(blobId.c_str()); - jbyteArray blobData = (jbyteArray)env->CallObjectMethod( - blobModule, resolveMethod, jBlobId, static_cast(offset), - static_cast(size)); - env->DeleteLocalRef(jBlobId); - - if (!blobData) { - onError("Couldn't retrieve blob data"); - return; + try { + auto result = createImageBitmapFromData(ownedData); + onSuccess(std::move(result)); + } catch (const std::exception &e) { + onError(e.what()); } - - // Create a Bitmap from the blob data - jclass bitmapFactoryClass = - env->FindClass("android/graphics/BitmapFactory"); - jmethodID decodeByteArrayMethod = - env->GetStaticMethodID(bitmapFactoryClass, "decodeByteArray", - "([BII)Landroid/graphics/Bitmap;"); - jint blobLength = env->GetArrayLength(blobData); - jobject bitmap = env->CallStaticObjectMethod( - bitmapFactoryClass, decodeByteArrayMethod, blobData, 0, blobLength); - - if (!bitmap) { - env->DeleteLocalRef(blobData); - onError("Couldn't decode image"); - return; - } - - // Get bitmap info - AndroidBitmapInfo bitmapInfo; - if (AndroidBitmap_getInfo(env, bitmap, &bitmapInfo) != - ANDROID_BITMAP_RESULT_SUCCESS) { - env->DeleteLocalRef(blobData); - env->DeleteLocalRef(bitmap); - onError("Couldn't get bitmap info"); - return; - } - - // Lock the bitmap pixels - void *bitmapPixels; - if (AndroidBitmap_lockPixels(env, bitmap, &bitmapPixels) != - ANDROID_BITMAP_RESULT_SUCCESS) { - env->DeleteLocalRef(blobData); - env->DeleteLocalRef(bitmap); - onError("Couldn't lock bitmap pixels"); - return; - } - - // Copy the bitmap data - std::vector imageData(bitmapInfo.height * bitmapInfo.stride); - memcpy(imageData.data(), bitmapPixels, imageData.size()); - - // Unlock the bitmap pixels - AndroidBitmap_unlockPixels(env, bitmap); - - // Clean up JNI references - env->DeleteLocalRef(blobData); - env->DeleteLocalRef(bitmap); - - ImageData result; - result.width = static_cast(bitmapInfo.width); - result.height = static_cast(bitmapInfo.height); - result.data = std::move(imageData); - - onSuccess(std::move(result)); }).detach(); } }; diff --git a/packages/webgpu/apple/ApplePlatformContext.h b/packages/webgpu/apple/ApplePlatformContext.h index 730de3306..81bdaddf0 100644 --- a/packages/webgpu/apple/ApplePlatformContext.h +++ b/packages/webgpu/apple/ApplePlatformContext.h @@ -20,6 +20,12 @@ class ApplePlatformContext : public PlatformContext { std::string blobId, double offset, double size, std::function onSuccess, std::function onError) override; + + ImageData createImageBitmapFromData(std::span data) override; + + void createImageBitmapFromDataAsync( + std::span data, std::function onSuccess, + std::function onError) override; }; } // namespace rnwgpu diff --git a/packages/webgpu/apple/ApplePlatformContext.mm b/packages/webgpu/apple/ApplePlatformContext.mm index 00ff61a74..7083422ed 100644 --- a/packages/webgpu/apple/ApplePlatformContext.mm +++ b/packages/webgpu/apple/ApplePlatformContext.mm @@ -39,6 +39,10 @@ void checkIfUsingSimulatorWithAPIValidation() { return instance.CreateSurface(&surfaceDescriptor); } +static std::span nsDataToSpan(NSData *data) { + return {static_cast(const_cast(data.bytes)), data.length}; +} + ImageData ApplePlatformContext::createImageBitmap(std::string blobId, double offset, double size) { RCTBlobManager *blobManager = @@ -49,13 +53,44 @@ void checkIfUsingSimulatorWithAPIValidation() { size:(long)size]; if (!blobData) { - throw std::runtime_error("Couldn't retrive blob data"); + throw std::runtime_error("Couldn't retrieve blob data"); + } + + return createImageBitmapFromData(nsDataToSpan(blobData)); +} + +void ApplePlatformContext::createImageBitmapAsync( + std::string blobId, double offset, double size, + std::function onSuccess, + std::function onError) { + // Resolve blob on current thread (requires RCTBridge access) + RCTBlobManager *blobManager = + [[RCTBridge currentBridge] moduleForClass:RCTBlobManager.class]; + NSData *blobData = + [blobManager resolve:[NSString stringWithUTF8String:blobId.c_str()] + offset:(long)offset + size:(long)size]; + + if (!blobData) { + onError("Couldn't retrieve blob data"); + return; } + // blobData is alive during this synchronous call; + // createImageBitmapFromDataAsync copies the span before dispatching + createImageBitmapFromDataAsync(nsDataToSpan(blobData), std::move(onSuccess), + std::move(onError)); +} + +ImageData ApplePlatformContext::createImageBitmapFromData( + std::span data) { + NSData *nsData = [NSData dataWithBytes:data.data() + length:data.size()]; + #if !TARGET_OS_OSX - UIImage *image = [UIImage imageWithData:blobData]; + UIImage *image = [UIImage imageWithData:nsData]; #else - NSImage *image = [[NSImage alloc] initWithData:blobData]; + NSImage *image = [[NSImage alloc] initWithData:nsData]; #endif if (!image) { throw std::runtime_error("Couldn't decode image"); @@ -72,94 +107,42 @@ void checkIfUsingSimulatorWithAPIValidation() { size_t height = CGImageGetHeight(cgImage); size_t bitsPerComponent = 8; size_t bytesPerRow = width * 4; - std::vector imageData(height * bytesPerRow); + + ImageData result; + result.width = static_cast(width); + result.height = static_cast(height); + result.data.resize(height * bytesPerRow); + result.format = wgpu::TextureFormat::RGBA8Unorm; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef context = CGBitmapContextCreate( - imageData.data(), width, height, bitsPerComponent, bytesPerRow, + result.data.data(), width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); CGContextDrawImage(context, CGRectMake(0, 0, width, height), cgImage); - // Now imageData contains a copy of the bitmap data - CGContextRelease(context); CGColorSpaceRelease(colorSpace); - // Use the copied data - ImageData result; - result.width = static_cast(width); - result.height = static_cast(height); - result.data = imageData; - result.format = wgpu::TextureFormat::RGBA8Unorm; return result; } -void ApplePlatformContext::createImageBitmapAsync( - std::string blobId, double offset, double size, - std::function onSuccess, +void ApplePlatformContext::createImageBitmapFromDataAsync( + std::span data, std::function onSuccess, std::function onError) { - // Capture blob data on the current thread (requires RCTBridge access) - RCTBlobManager *blobManager = - [[RCTBridge currentBridge] moduleForClass:RCTBlobManager.class]; - NSData *blobData = - [blobManager resolve:[NSString stringWithUTF8String:blobId.c_str()] - offset:(long)offset - size:(long)size]; + // Copy span data into shared_ptr so the dispatch_async block owns the memory + auto ownedData = + std::make_shared>(data.begin(), data.end()); - if (!blobData) { - onError("Couldn't retrieve blob data"); - return; - } - - // Retain the data for the background block - NSData *retainedData = [blobData copy]; - - // Dispatch heavy image decoding work to a background queue dispatch_async( dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0), ^{ @autoreleasepool { -#if !TARGET_OS_OSX - UIImage *image = [UIImage imageWithData:retainedData]; -#else - NSImage *image = [[NSImage alloc] initWithData:retainedData]; -#endif - if (!image) { - onError("Couldn't decode image"); - return; + try { + auto result = createImageBitmapFromData(*ownedData); + onSuccess(std::move(result)); + } catch (const std::exception &e) { + onError(e.what()); } - -#if !TARGET_OS_OSX - CGImageRef cgImage = image.CGImage; -#else - CGImageRef cgImage = [image CGImageForProposedRect:NULL - context:NULL - hints:NULL]; -#endif - size_t width = CGImageGetWidth(cgImage); - size_t height = CGImageGetHeight(cgImage); - size_t bitsPerComponent = 8; - size_t bytesPerRow = width * 4; - std::vector imageData(height * bytesPerRow); - - CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); - CGContextRef context = CGBitmapContextCreate( - imageData.data(), width, height, bitsPerComponent, bytesPerRow, - colorSpace, - kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); - - CGContextDrawImage(context, CGRectMake(0, 0, width, height), cgImage); - - CGContextRelease(context); - CGColorSpaceRelease(colorSpace); - - ImageData result; - result.width = static_cast(width); - result.height = static_cast(height); - result.data = std::move(imageData); - result.format = wgpu::TextureFormat::RGBA8Unorm; - - onSuccess(std::move(result)); } }); } diff --git a/packages/webgpu/cpp/rnwgpu/PlatformContext.h b/packages/webgpu/cpp/rnwgpu/PlatformContext.h index 5dea59ed1..e95b680fd 100644 --- a/packages/webgpu/cpp/rnwgpu/PlatformContext.h +++ b/packages/webgpu/cpp/rnwgpu/PlatformContext.h @@ -2,6 +2,7 @@ #include #include +#include #include #include @@ -31,6 +32,13 @@ class PlatformContext { std::string blobId, double offset, double size, std::function onSuccess, std::function onError) = 0; + + // Create ImageBitmap from raw encoded image bytes (PNG/JPEG/etc.) + virtual ImageData createImageBitmapFromData(std::span data) = 0; + + virtual void createImageBitmapFromDataAsync( + std::span data, std::function onSuccess, + std::function onError) = 0; }; } // namespace rnwgpu diff --git a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h index fbaee35c1..025a2df9a 100644 --- a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h +++ b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h @@ -5,6 +5,7 @@ #include "NativeObject.h" +#include "ArrayBuffer.h" #include "Canvas.h" #include "GPU.h" #include "GPUCanvasContext.h" @@ -79,13 +80,54 @@ class RNWebGPU : public NativeObject { const jsi::Value & /*thisVal*/, const jsi::Value *args, size_t count) { if (count < 1) { - throw jsi::JSError(runtime, "createImageBitmap requires a Blob argument"); + throw jsi::JSError(runtime, + "createImageBitmap requires a Blob or ArrayBuffer " + "argument"); } - auto blob = - JSIConverter>::fromJSI(runtime, args[0], false); auto platformContext = _platformContext; auto callInvoker = _callInvoker; + + // Check if the argument is an ArrayBuffer or TypedArray + if (args[0].isObject()) { + auto obj = args[0].getObject(runtime); + if (obj.isArrayBuffer(runtime) || obj.hasProperty(runtime, "buffer")) { + auto arrayBuffer = + JSIConverter>::fromJSI( + runtime, args[0], false); + // Copy bytes on the JS thread — the ArrayBuffer pointer is into + // JS-owned memory that can be GC'd + std::vector dataCopy(arrayBuffer->data(), + arrayBuffer->data() + arrayBuffer->size()); + + return Promise::createPromise( + runtime, + [platformContext, callInvoker, + dataCopy = std::move(dataCopy)]( + jsi::Runtime & /*runtime*/, + std::shared_ptr promise) mutable { + platformContext->createImageBitmapFromDataAsync( + dataCopy, + [callInvoker, promise](ImageData imageData) { + auto imageBitmap = + std::make_shared(imageData); + callInvoker->invokeAsync([promise, imageBitmap]() { + promise->resolve( + JSIConverter>::toJSI( + promise->runtime, imageBitmap)); + }); + }, + [callInvoker, promise](std::string error) { + callInvoker->invokeAsync( + [promise, error]() { promise->reject(error); }); + }); + }); + } + } + + // Fall through to existing Blob path + auto blob = + JSIConverter>::fromJSI(runtime, args[0], false); std::string blobId = blob->blobId; double offset = blob->offset; double size = blob->size; diff --git a/packages/webgpu/react-native-wgpu.podspec b/packages/webgpu/react-native-wgpu.podspec index cbee57bd7..ac01a3b66 100644 --- a/packages/webgpu/react-native-wgpu.podspec +++ b/packages/webgpu/react-native-wgpu.podspec @@ -35,7 +35,7 @@ Pod::Spec.new do |s| s.pod_target_xcconfig = { "HEADER_SEARCH_PATHS" => "\"$(PODS_ROOT)/boost\" \"$(PODS_TARGET_SRCROOT)/cpp\"", "OTHER_CPLUSPLUSFLAGS" => "-DFOLLY_NO_CONFIG -DFOLLY_MOBILE=1 -DFOLLY_USE_LIBCPP=1", - "CLANG_CXX_LANGUAGE_STANDARD" => "c++17" + "CLANG_CXX_LANGUAGE_STANDARD" => "c++20" } s.dependency "React-RCTFabric" s.dependency "React-Codegen" diff --git a/packages/webgpu/scripts/build/apple.toolchain.cmake b/packages/webgpu/scripts/build/apple.toolchain.cmake index 543e41b72..0562c2002 100644 --- a/packages/webgpu/scripts/build/apple.toolchain.cmake +++ b/packages/webgpu/scripts/build/apple.toolchain.cmake @@ -839,6 +839,8 @@ set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ") set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ") set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}") set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}") +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED True) if(ARCHS MATCHES "((^|;|, )(arm64|arm64e|x86_64))+") set(CMAKE_C_SIZEOF_DATA_PTR 8) @@ -1174,4 +1176,4 @@ macro(find_host_package) set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH) -endmacro(find_host_package) \ No newline at end of file +endmacro(find_host_package) diff --git a/packages/webgpu/src/index.tsx b/packages/webgpu/src/index.tsx index 46875c256..a497a9bf0 100644 --- a/packages/webgpu/src/index.tsx +++ b/packages/webgpu/src/index.tsx @@ -23,4 +23,9 @@ declare global { DecodeToUTF8: (buffer: NodeJS.ArrayBufferView | ArrayBuffer) => string; createImageBitmap: typeof createImageBitmap; }; + + // Extend createImageBitmap to accept ArrayBuffer/TypedArray (encoded image bytes) + function createImageBitmap( + image: ArrayBuffer | ArrayBufferView, + ): Promise; } From 96fadbddeeaa8e84bd35d75b7b87fde5a2ce88bd Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 12:01:09 -0700 Subject: [PATCH 2/7] - Use std::span - Clean up JNI local refs - Rethrow error in TexturedCube test --- apps/example/src/Cube/TexturedCube.tsx | 1 + .../webgpu/android/cpp/AndroidPlatformContext.h | 16 ++++++++++++++-- packages/webgpu/apple/ApplePlatformContext.h | 4 ++-- packages/webgpu/apple/ApplePlatformContext.mm | 8 ++++---- packages/webgpu/cpp/rnwgpu/PlatformContext.h | 4 ++-- 5 files changed, 23 insertions(+), 10 deletions(-) diff --git a/apps/example/src/Cube/TexturedCube.tsx b/apps/example/src/Cube/TexturedCube.tsx index 7a8475bc5..fca301fde 100644 --- a/apps/example/src/Cube/TexturedCube.tsx +++ b/apps/example/src/Cube/TexturedCube.tsx @@ -121,6 +121,7 @@ export const TexturedCube = () => { ); } catch (err) { console.error("Failed to fetch asset", err); + throw err; } // Create a sampler with linear filtering for smooth interpolation. diff --git a/packages/webgpu/android/cpp/AndroidPlatformContext.h b/packages/webgpu/android/cpp/AndroidPlatformContext.h index 473cea690..0dfe9a24e 100644 --- a/packages/webgpu/android/cpp/AndroidPlatformContext.h +++ b/packages/webgpu/android/cpp/AndroidPlatformContext.h @@ -36,6 +36,8 @@ class AndroidPlatformContext : public PlatformContext { jmethodID resolveMethod = env->GetMethodID(blobModuleClass, "resolve", "(Ljava/lang/String;II)[B"); + env->DeleteLocalRef(blobModuleClass); + if (!resolveMethod) { throw std::runtime_error("Couldn't find resolve method in BlobModule"); } @@ -113,7 +115,7 @@ class AndroidPlatformContext : public PlatformContext { }).detach(); } - ImageData createImageBitmapFromData(std::span data) override { + ImageData createImageBitmapFromData(std::span data) override { jni::Environment::ensureCurrentThreadIsAttached(); JNIEnv *env = facebook::jni::Environment::current(); @@ -132,12 +134,22 @@ class AndroidPlatformContext : public PlatformContext { // Decode via BitmapFactory jclass bitmapFactoryClass = env->FindClass("android/graphics/BitmapFactory"); + if (!bitmapFactoryClass) { + env->DeleteLocalRef(byteArray); + throw std::runtime_error("Couldn't find BitmapFactory class"); + } jmethodID decodeByteArrayMethod = env->GetStaticMethodID(bitmapFactoryClass, "decodeByteArray", "([BII)Landroid/graphics/Bitmap;"); + if (!decodeByteArrayMethod) { + env->DeleteLocalRef(byteArray); + env->DeleteLocalRef(bitmapFactoryClass); + throw std::runtime_error("Couldn't find decodeByteArray method"); + } jint length = static_cast(data.size()); jobject bitmap = env->CallStaticObjectMethod( bitmapFactoryClass, decodeByteArrayMethod, byteArray, 0, length); + env->DeleteLocalRef(bitmapFactoryClass); if (!bitmap) { env->DeleteLocalRef(byteArray); @@ -175,7 +187,7 @@ class AndroidPlatformContext : public PlatformContext { } void createImageBitmapFromDataAsync( - std::span data, std::function onSuccess, + std::span data, std::function onSuccess, std::function onError) override { std::thread([this, ownedData = std::vector(data.begin(), data.end()), onSuccess = std::move(onSuccess), diff --git a/packages/webgpu/apple/ApplePlatformContext.h b/packages/webgpu/apple/ApplePlatformContext.h index 81bdaddf0..24d27c76f 100644 --- a/packages/webgpu/apple/ApplePlatformContext.h +++ b/packages/webgpu/apple/ApplePlatformContext.h @@ -21,10 +21,10 @@ class ApplePlatformContext : public PlatformContext { std::function onSuccess, std::function onError) override; - ImageData createImageBitmapFromData(std::span data) override; + ImageData createImageBitmapFromData(std::span data) override; void createImageBitmapFromDataAsync( - std::span data, std::function onSuccess, + std::span data, std::function onSuccess, std::function onError) override; }; diff --git a/packages/webgpu/apple/ApplePlatformContext.mm b/packages/webgpu/apple/ApplePlatformContext.mm index 7083422ed..ba201033c 100644 --- a/packages/webgpu/apple/ApplePlatformContext.mm +++ b/packages/webgpu/apple/ApplePlatformContext.mm @@ -39,8 +39,8 @@ void checkIfUsingSimulatorWithAPIValidation() { return instance.CreateSurface(&surfaceDescriptor); } -static std::span nsDataToSpan(NSData *data) { - return {static_cast(const_cast(data.bytes)), data.length}; +static std::span nsDataToSpan(NSData *data) { + return {static_cast(data.bytes), data.length}; } ImageData ApplePlatformContext::createImageBitmap(std::string blobId, @@ -83,7 +83,7 @@ void checkIfUsingSimulatorWithAPIValidation() { } ImageData ApplePlatformContext::createImageBitmapFromData( - std::span data) { + std::span data) { NSData *nsData = [NSData dataWithBytes:data.data() length:data.size()]; @@ -128,7 +128,7 @@ void checkIfUsingSimulatorWithAPIValidation() { } void ApplePlatformContext::createImageBitmapFromDataAsync( - std::span data, std::function onSuccess, + std::span data, std::function onSuccess, std::function onError) { // Copy span data into shared_ptr so the dispatch_async block owns the memory auto ownedData = diff --git a/packages/webgpu/cpp/rnwgpu/PlatformContext.h b/packages/webgpu/cpp/rnwgpu/PlatformContext.h index e95b680fd..e7a272476 100644 --- a/packages/webgpu/cpp/rnwgpu/PlatformContext.h +++ b/packages/webgpu/cpp/rnwgpu/PlatformContext.h @@ -34,10 +34,10 @@ class PlatformContext { std::function onError) = 0; // Create ImageBitmap from raw encoded image bytes (PNG/JPEG/etc.) - virtual ImageData createImageBitmapFromData(std::span data) = 0; + virtual ImageData createImageBitmapFromData(std::span data) = 0; virtual void createImageBitmapFromDataAsync( - std::span data, std::function onSuccess, + std::span data, std::function onSuccess, std::function onError) = 0; }; From 63084b7a1aa43a61ce6dccd026f2881f2947de46 Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 12:07:47 -0700 Subject: [PATCH 3/7] Avoid extra copy of image data when decoding --- packages/webgpu/apple/ApplePlatformContext.mm | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/packages/webgpu/apple/ApplePlatformContext.mm b/packages/webgpu/apple/ApplePlatformContext.mm index ba201033c..3cf4ac32d 100644 --- a/packages/webgpu/apple/ApplePlatformContext.mm +++ b/packages/webgpu/apple/ApplePlatformContext.mm @@ -84,8 +84,15 @@ void checkIfUsingSimulatorWithAPIValidation() { ImageData ApplePlatformContext::createImageBitmapFromData( std::span data) { - NSData *nsData = [NSData dataWithBytes:data.data() - length:data.size()]; + // This avoids a copy by assuming the UIImage/NSImage constructors + // decode `nsData` eagerly before the memory for the wrapped `data` + // is freed. + // + // Since we get the `CGImageRef` from `image` and then throw + // it away, that's a fairly safe assumption. + NSData *nsData = [NSData dataWithBytesNoCopy:const_cast(data.data()) + length:data.size() + freeWhenDone:NO]; #if !TARGET_OS_OSX UIImage *image = [UIImage imageWithData:nsData]; From 6af5b83283418fb2218b6cde58eb0be37b7b2ca5 Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 12:22:35 -0700 Subject: [PATCH 4/7] - Properly detect ArrayBuffer vs TypedArray - Add unit tests for ArrayBuffer and TypedArray --- packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h | 37 ++++++++-- .../webgpu/src/__tests__/ImageData.spec.ts | 73 +++++++++++++++++++ 2 files changed, 102 insertions(+), 8 deletions(-) diff --git a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h index 025a2df9a..8786ba076 100644 --- a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h +++ b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h @@ -5,7 +5,6 @@ #include "NativeObject.h" -#include "ArrayBuffer.h" #include "Canvas.h" #include "GPU.h" #include "GPUCanvasContext.h" @@ -88,17 +87,39 @@ class RNWebGPU : public NativeObject { auto platformContext = _platformContext; auto callInvoker = _callInvoker; - // Check if the argument is an ArrayBuffer or TypedArray + // Check if the argument is an ArrayBuffer or ArrayBufferView + // (TypedArray / DataView) if (args[0].isObject()) { auto obj = args[0].getObject(runtime); - if (obj.isArrayBuffer(runtime) || obj.hasProperty(runtime, "buffer")) { - auto arrayBuffer = - JSIConverter>::fromJSI( - runtime, args[0], false); + + const uint8_t *dataPtr = nullptr; + size_t dataSize = 0; + + if (obj.isArrayBuffer(runtime)) { + // Plain ArrayBuffer — use the full buffer + auto &ab = obj.getArrayBuffer(runtime); + dataPtr = ab.data(runtime); + dataSize = ab.size(runtime); + } else if (obj.hasProperty(runtime, "buffer")) { + // TypedArray or DataView — respect byteOffset/byteLength + auto bufferVal = obj.getProperty(runtime, "buffer"); + if (bufferVal.isObject() && + bufferVal.getObject(runtime).isArrayBuffer(runtime)) { + auto &ab = + bufferVal.getObject(runtime).getArrayBuffer(runtime); + auto byteOffset = static_cast( + obj.getProperty(runtime, "byteOffset").asNumber()); + auto byteLength = static_cast( + obj.getProperty(runtime, "byteLength").asNumber()); + dataPtr = ab.data(runtime) + byteOffset; + dataSize = byteLength; + } + } + + if (dataPtr != nullptr) { // Copy bytes on the JS thread — the ArrayBuffer pointer is into // JS-owned memory that can be GC'd - std::vector dataCopy(arrayBuffer->data(), - arrayBuffer->data() + arrayBuffer->size()); + std::vector dataCopy(dataPtr, dataPtr + dataSize); return Promise::createPromise( runtime, diff --git a/packages/webgpu/src/__tests__/ImageData.spec.ts b/packages/webgpu/src/__tests__/ImageData.spec.ts index 468b821c2..c51815502 100644 --- a/packages/webgpu/src/__tests__/ImageData.spec.ts +++ b/packages/webgpu/src/__tests__/ImageData.spec.ts @@ -1,3 +1,4 @@ +import fs from "fs"; import path from "path"; import { checkImage, client, encodeImage, decodeImage } from "./setup"; @@ -23,4 +24,76 @@ describe("Image Bitmap", () => { const image = encodeImage(result); checkImage(image, "snapshots/ref.png"); }); + // The following tests exercise the React Native ArrayBuffer/TypedArray + // overload of createImageBitmap, which is not part of the standard web API. + it("createImageBitmap from ArrayBuffer", async () => { + if (client.OS === "web") { + return; + } + const pngBytes = Array.from( + fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), + ); + const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); + const result = await client.eval( + async ({ pngData }) => { + const bytes = new Uint8Array(pngData); + const bmp = await createImageBitmap(bytes.buffer); + return { width: bmp.width, height: bmp.height }; + }, + { pngData: pngBytes }, + ); + expect(result.width).toBe(expected.width); + expect(result.height).toBe(expected.height); + }); + it("createImageBitmap from Uint8Array", async () => { + if (client.OS === "web") { + return; + } + const pngBytes = Array.from( + fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), + ); + const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); + const result = await client.eval( + async ({ pngData }) => { + const bytes = new Uint8Array(pngData); + const bmp = await createImageBitmap(bytes); + return { width: bmp.width, height: bmp.height }; + }, + { pngData: pngBytes }, + ); + expect(result.width).toBe(expected.width); + expect(result.height).toBe(expected.height); + }); + it("createImageBitmap from Uint8Array subarray (byteOffset/byteLength)", async () => { + if (client.OS === "web") { + return; + } + const pngBytes = Array.from( + fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), + ); + const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); + const result = await client.eval( + async ({ pngData }) => { + // Embed PNG bytes at an offset within a larger buffer + const padding = 128; + const totalLength = padding + pngData.length + padding; + const largeBuffer = new ArrayBuffer(totalLength); + const fullView = new Uint8Array(largeBuffer); + // Fill with garbage bytes + fullView.fill(0xff); + // Copy PNG bytes into the middle + const pngView = new Uint8Array(largeBuffer, padding, pngData.length); + for (let i = 0; i < pngData.length; i++) { + pngView[i] = pngData[i]; + } + // createImageBitmap must respect byteOffset/byteLength of the view, + // not use the full underlying ArrayBuffer (which has garbage padding) + const bmp = await createImageBitmap(pngView); + return { width: bmp.width, height: bmp.height }; + }, + { pngData: pngBytes }, + ); + expect(result.width).toBe(expected.width); + expect(result.height).toBe(expected.height); + }); }); From 63b2fe17b68c00d79b06b2ab9f66e0bf16ebf3ff Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 12:40:40 -0700 Subject: [PATCH 5/7] Use std::span for ArrayBuffer data for safety --- packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h index 8786ba076..2da4b3e5d 100644 --- a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h +++ b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h @@ -92,14 +92,12 @@ class RNWebGPU : public NativeObject { if (args[0].isObject()) { auto obj = args[0].getObject(runtime); - const uint8_t *dataPtr = nullptr; - size_t dataSize = 0; + std::span data; if (obj.isArrayBuffer(runtime)) { // Plain ArrayBuffer — use the full buffer auto &ab = obj.getArrayBuffer(runtime); - dataPtr = ab.data(runtime); - dataSize = ab.size(runtime); + data = {ab.data(runtime), ab.size(runtime)}; } else if (obj.hasProperty(runtime, "buffer")) { // TypedArray or DataView — respect byteOffset/byteLength auto bufferVal = obj.getProperty(runtime, "buffer"); @@ -111,15 +109,14 @@ class RNWebGPU : public NativeObject { obj.getProperty(runtime, "byteOffset").asNumber()); auto byteLength = static_cast( obj.getProperty(runtime, "byteLength").asNumber()); - dataPtr = ab.data(runtime) + byteOffset; - dataSize = byteLength; + data = {ab.data(runtime) + byteOffset, byteLength}; } } - if (dataPtr != nullptr) { + if (!data.empty()) { // Copy bytes on the JS thread — the ArrayBuffer pointer is into // JS-owned memory that can be GC'd - std::vector dataCopy(dataPtr, dataPtr + dataSize); + std::vector dataCopy(data.begin(), data.end()); return Promise::createPromise( runtime, From cd648f008854efffa56fee31a0bd384266e8a67c Mon Sep 17 00:00:00 2001 From: Ben Hamilton Date: Thu, 19 Feb 2026 14:31:20 -0700 Subject: [PATCH 6/7] auto & -> const auto & --- packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h index 2da4b3e5d..59fe14bd8 100644 --- a/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h +++ b/packages/webgpu/cpp/rnwgpu/api/RNWebGPU.h @@ -96,14 +96,14 @@ class RNWebGPU : public NativeObject { if (obj.isArrayBuffer(runtime)) { // Plain ArrayBuffer — use the full buffer - auto &ab = obj.getArrayBuffer(runtime); + const auto &ab = obj.getArrayBuffer(runtime); data = {ab.data(runtime), ab.size(runtime)}; } else if (obj.hasProperty(runtime, "buffer")) { // TypedArray or DataView — respect byteOffset/byteLength auto bufferVal = obj.getProperty(runtime, "buffer"); if (bufferVal.isObject() && bufferVal.getObject(runtime).isArrayBuffer(runtime)) { - auto &ab = + const auto &ab = bufferVal.getObject(runtime).getArrayBuffer(runtime); auto byteOffset = static_cast( obj.getProperty(runtime, "byteOffset").asNumber()); From 38c76dca9d5b7107edd30636822a9c73a048a0ac Mon Sep 17 00:00:00 2001 From: William Candillon Date: Tue, 24 Feb 2026 08:46:04 +0100 Subject: [PATCH 7/7] :wrench: --- apps/example/ios/Podfile.lock | 4 +- .../webgpu/src/__tests__/ImageData.spec.ts | 97 ++++++++++--------- packages/webgpu/src/__tests__/setup.ts | 12 +++ 3 files changed, 64 insertions(+), 49 deletions(-) diff --git a/apps/example/ios/Podfile.lock b/apps/example/ios/Podfile.lock index 10e49aa98..ccdb59915 100644 --- a/apps/example/ios/Podfile.lock +++ b/apps/example/ios/Podfile.lock @@ -1865,7 +1865,7 @@ PODS: - ReactCommon/turbomodule/core - SocketRocket - Yoga - - react-native-wgpu (0.5.3): + - react-native-wgpu (0.5.4): - boost - DoubleConversion - fast_float @@ -2938,7 +2938,7 @@ SPEC CHECKSUMS: React-microtasksnativemodule: 75b6604b667d297292345302cc5bfb6b6aeccc1b react-native-safe-area-context: c00143b4823773bba23f2f19f85663ae89ceb460 react-native-skia: 5bf2b2107cd7f2d806fd364f5e16b1c7554ed3cd - react-native-wgpu: 27d4c1aaa89ba015e8c02d5dbf8abeaa83c4d523 + react-native-wgpu: 5528610fabc9eb435d4ee578b4d6f7c1e133bf56 React-NativeModulesApple: 879fbdc5dcff7136abceb7880fe8a2022a1bd7c3 React-oscompat: 93b5535ea7f7dff46aaee4f78309a70979bdde9d React-perflogger: 5536d2df3d18fe0920263466f7b46a56351c0510 diff --git a/packages/webgpu/src/__tests__/ImageData.spec.ts b/packages/webgpu/src/__tests__/ImageData.spec.ts index c51815502..5b438581b 100644 --- a/packages/webgpu/src/__tests__/ImageData.spec.ts +++ b/packages/webgpu/src/__tests__/ImageData.spec.ts @@ -1,7 +1,13 @@ import fs from "fs"; import path from "path"; -import { checkImage, client, encodeImage, decodeImage } from "./setup"; +import { + checkImage, + client, + encodeImage, + decodeImage, + itSkipsOnWeb, +} from "./setup"; describe("Image Bitmap", () => { it("createImageBitmap (1)", async () => { @@ -26,74 +32,71 @@ describe("Image Bitmap", () => { }); // The following tests exercise the React Native ArrayBuffer/TypedArray // overload of createImageBitmap, which is not part of the standard web API. - it("createImageBitmap from ArrayBuffer", async () => { - if (client.OS === "web") { - return; - } + itSkipsOnWeb("createImageBitmap from ArrayBuffer", async () => { const pngBytes = Array.from( fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), ); const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); const result = await client.eval( - async ({ pngData }) => { + ({ pngData }) => { const bytes = new Uint8Array(pngData); - const bmp = await createImageBitmap(bytes.buffer); - return { width: bmp.width, height: bmp.height }; + return createImageBitmap(bytes.buffer).then((bmp) => { + return { width: bmp.width, height: bmp.height }; + }); }, { pngData: pngBytes }, ); expect(result.width).toBe(expected.width); expect(result.height).toBe(expected.height); }); - it("createImageBitmap from Uint8Array", async () => { - if (client.OS === "web") { - return; - } + itSkipsOnWeb("createImageBitmap from Uint8Array", async () => { const pngBytes = Array.from( fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), ); const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); const result = await client.eval( - async ({ pngData }) => { + ({ pngData }) => { const bytes = new Uint8Array(pngData); - const bmp = await createImageBitmap(bytes); - return { width: bmp.width, height: bmp.height }; - }, - { pngData: pngBytes }, - ); - expect(result.width).toBe(expected.width); - expect(result.height).toBe(expected.height); - }); - it("createImageBitmap from Uint8Array subarray (byteOffset/byteLength)", async () => { - if (client.OS === "web") { - return; - } - const pngBytes = Array.from( - fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), - ); - const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); - const result = await client.eval( - async ({ pngData }) => { - // Embed PNG bytes at an offset within a larger buffer - const padding = 128; - const totalLength = padding + pngData.length + padding; - const largeBuffer = new ArrayBuffer(totalLength); - const fullView = new Uint8Array(largeBuffer); - // Fill with garbage bytes - fullView.fill(0xff); - // Copy PNG bytes into the middle - const pngView = new Uint8Array(largeBuffer, padding, pngData.length); - for (let i = 0; i < pngData.length; i++) { - pngView[i] = pngData[i]; - } - // createImageBitmap must respect byteOffset/byteLength of the view, - // not use the full underlying ArrayBuffer (which has garbage padding) - const bmp = await createImageBitmap(pngView); - return { width: bmp.width, height: bmp.height }; + return createImageBitmap(bytes).then((bmp) => { + return { width: bmp.width, height: bmp.height }; + }); }, { pngData: pngBytes }, ); expect(result.width).toBe(expected.width); expect(result.height).toBe(expected.height); }); + itSkipsOnWeb( + "createImageBitmap from Uint8Array subarray (byteOffset/byteLength)", + async () => { + const pngBytes = Array.from( + fs.readFileSync(path.join(__dirname, "./assets/Di-3d.png")), + ); + const expected = decodeImage(path.join(__dirname, "./assets/Di-3d.png")); + const result = await client.eval( + ({ pngData }) => { + // Embed PNG bytes at an offset within a larger buffer + const padding = 128; + const totalLength = padding + pngData.length + padding; + const largeBuffer = new ArrayBuffer(totalLength); + const fullView = new Uint8Array(largeBuffer); + // Fill with garbage bytes + fullView.fill(0xff); + // Copy PNG bytes into the middle + const pngView = new Uint8Array(largeBuffer, padding, pngData.length); + for (let i = 0; i < pngData.length; i++) { + pngView[i] = pngData[i]; + } + // createImageBitmap must respect byteOffset/byteLength of the view, + // not use the full underlying ArrayBuffer (which has garbage padding) + return createImageBitmap(pngView).then((bmp) => { + return { width: bmp.width, height: bmp.height }; + }); + }, + { pngData: pngBytes }, + ); + expect(result.width).toBe(expected.width); + expect(result.height).toBe(expected.height); + }, + ); }); diff --git a/packages/webgpu/src/__tests__/setup.ts b/packages/webgpu/src/__tests__/setup.ts index cdb8fdde7..3b2694294 100644 --- a/packages/webgpu/src/__tests__/setup.ts +++ b/packages/webgpu/src/__tests__/setup.ts @@ -407,6 +407,18 @@ export const checkImage = ( return 0; }; +export const itSkipsOnWeb = ( + name: string, + fn: () => Promise, +) => { + it(name, async () => { + if (client.OS === "web") { + return; + } + await fn(); + }); +}; + export const decodeImage = (relPath: string): BitmapData => { const p = path.resolve(__dirname, relPath); const data = fs.readFileSync(p);