diff --git a/.github/ISSUE_TEMPLATE/bug_report.yaml b/.github/ISSUE_TEMPLATE/bug_report.yaml
index 840d9c3..24987df 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yaml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yaml
@@ -80,7 +80,7 @@ body:
```csharp
// Your MRE code here
```
- render: shell
+ render: csharp
validations:
required: false
@@ -93,7 +93,7 @@ body:
```
(Paste full error message and stack trace here)
```
- render: shell
+ render: csharp
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/documentation_issue.yaml b/.github/ISSUE_TEMPLATE/documentation_issue.yaml
index 1d3591d..d13874c 100644
--- a/.github/ISSUE_TEMPLATE/documentation_issue.yaml
+++ b/.github/ISSUE_TEMPLATE/documentation_issue.yaml
@@ -87,7 +87,7 @@ body:
```csharp
// Improved code example
```
- render: shell
+ render: markdown
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yaml
index 0c7ec68..599634f 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yaml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yaml
@@ -65,7 +65,7 @@ body:
// public class NewComponent : SoundComponent { ... }
// public void ExistingComponent.NewMethod(ParameterType param) { ... }
```
- render: shell
+ render: csharp
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/performance_issue.yaml b/.github/ISSUE_TEMPLATE/performance_issue.yaml
index 6601c79..03e9cb2 100644
--- a/.github/ISSUE_TEMPLATE/performance_issue.yaml
+++ b/.github/ISSUE_TEMPLATE/performance_issue.yaml
@@ -77,7 +77,7 @@ body:
```csharp
// Your MRE code here
```
- render: shell
+ render: csharp
validations:
required: false
@@ -91,7 +91,7 @@ body:
* Exported profiling sessions (if shareable).
* Key findings from the profiler.
placeholder: Paste profiling data, links, or key findings here.
- render: shell
+ render: markdown
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/platform_specific_issue.yaml b/.github/ISSUE_TEMPLATE/platform_specific_issue.yaml
index a8bb206..2f4f81d 100644
--- a/.github/ISSUE_TEMPLATE/platform_specific_issue.yaml
+++ b/.github/ISSUE_TEMPLATE/platform_specific_issue.yaml
@@ -125,7 +125,7 @@ body:
```csharp
// Your MRE code here
```
- render: shell
+ render: csharp
validations:
required: false
@@ -137,7 +137,7 @@ body:
```
(Paste full error message and stack trace here)
```
- render: shell
+ render: csharp
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/question.yaml b/.github/ISSUE_TEMPLATE/question.yaml
index 86dd087..db4734c 100644
--- a/.github/ISSUE_TEMPLATE/question.yaml
+++ b/.github/ISSUE_TEMPLATE/question.yaml
@@ -58,7 +58,7 @@ body:
```csharp
// Your code here
```
- render: shell
+ render: csharp
validations:
required: false
diff --git a/.github/ISSUE_TEMPLATE/security_vulnerability_report.yaml b/.github/ISSUE_TEMPLATE/security_vulnerability_report.yaml
index a1f7e81..aef3391 100644
--- a/.github/ISSUE_TEMPLATE/security_vulnerability_report.yaml
+++ b/.github/ISSUE_TEMPLATE/security_vulnerability_report.yaml
@@ -71,7 +71,7 @@ body:
```csharp
// PoC code (if safe to share publicly)
```
- render: shell
+ render: markdown
validations:
required: false
diff --git a/.github/workflows/build-extensions-apm.yml b/.github/workflows/build-extensions-apm.yml
index 3532a66..f258cd1 100644
--- a/.github/workflows/build-extensions-apm.yml
+++ b/.github/workflows/build-extensions-apm.yml
@@ -48,7 +48,7 @@ jobs:
cross_file: "linux-arm64.crossfile"
# macOS builds
- - os: macos-latest
+ - os: macos-15-intel
rid: osx-x64
platform: macOS
arch: x86_64
diff --git a/.github/workflows/build-ffmpeg.yml b/.github/workflows/build-ffmpeg.yml
index e316eb9..5986161 100644
--- a/.github/workflows/build-ffmpeg.yml
+++ b/.github/workflows/build-ffmpeg.yml
@@ -162,7 +162,7 @@ jobs:
matrix:
include:
- arch: x64
- os: macos-13
+ os: macos-15-intel
cmake_arch: x86_64
- arch: arm64
os: macos-14
diff --git a/.github/workflows/build-miniaudio.yml b/.github/workflows/build-miniaudio.yml
index 3a47127..4d73353 100644
--- a/.github/workflows/build-miniaudio.yml
+++ b/.github/workflows/build-miniaudio.yml
@@ -160,14 +160,13 @@ jobs:
lib_extension: ".dll"
toolchain: "Visual Studio 17 2022"
- # Disabled due to https://github.com/mackron/miniaudio/issues/1045
- #- os: windows-latest
- # rid: win-arm64
- # platform: Windows
- # arch: ARM64
- # cmake_target_arch: ARM64
- # lib_extension: ".dll"
- # toolchain: "Visual Studio 17 2022"
+ - os: windows-latest
+ rid: win-arm64
+ platform: Windows
+ arch: ARM64
+ cmake_target_arch: ARM64
+ lib_extension: ".dll"
+ toolchain: "Visual Studio 17 2022"
# Linux builds
- os: ubuntu-22.04
@@ -199,7 +198,7 @@ jobs:
cmake_target_arch: arm64
lib_extension: ".dylib"
- - os: macos-latest
+ - os: macos-15-intel
rid: osx-x64
platform: macOS
arch: x86_64
diff --git a/.github/workflows/build-portmidi.yml b/.github/workflows/build-portmidi.yml
index 46a474f..b0b057d 100644
--- a/.github/workflows/build-portmidi.yml
+++ b/.github/workflows/build-portmidi.yml
@@ -45,7 +45,7 @@ jobs:
cross_compile: true
# macOS (Clang) - Universal binaries are avoided to produce discrete arch-specific libs.
- - os: macos-latest
+ - os: macos-15-intel
rid: osx-x64
platform: macOS
arch: x86_64
diff --git a/.gitmodules b/.gitmodules
index dd2b6e2..a111a99 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,4 +1,4 @@
-[submodule "Extensions/SoundFlow.Extensions.WebRtc.Apm/Native"]
+[submodule "Native/webrtc-audio-processing"]
path = Native/webrtc-audio-processing
url = https://github.com/LSXPrime/webrtc-audio-processing
[submodule "Native/miniaudio-backend/miniaudio"]
diff --git a/CITATION.cff b/CITATION.cff
new file mode 100644
index 0000000..0c30bd9
--- /dev/null
+++ b/CITATION.cff
@@ -0,0 +1,31 @@
+cff-version: 1.2.0
+message: "If you use this software, please cite it as below."
+type: software
+authors:
+ - family-names: "Abdallah"
+ given-names: "Ahmed"
+ email: "lsxprime@gmail.com"
+title: "SoundFlow: A high-performance, secure audio and MIDI engine for .NET"
+version: "1.4.0"
+date-released: "2026-01-06"
+repository-code: "https://github.com/LSXPrime/SoundFlow"
+url: "https://lsxprime.github.io/soundflow-docs/"
+license: MIT
+keywords:
+ - "audio-engine"
+ - ".net"
+ - "csharp"
+ - "digital-signal-processing"
+ - "midi"
+ - "cross-platform"
+ - "audio-synthesis"
+ - "audio-encryption"
+ - "audio-watermarking"
+abstract: >-
+ A high-performance, modular audio & MIDI engine for .NET8+.
+ A complete toolkit for the entire audio lifecycle of
+ Playback, Recording, Multi-track Editing, Pro Synthesis
+ (MPE/SF2), Real-time DSP, and Visualization. Includes a
+ unique security suite for AES-256 encryption, acoustic
+ fingerprinting, and watermarking. Featuring SIMD and
+ NativeAOT support.
\ No newline at end of file
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegDecoder.cs b/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegDecoder.cs
index 5143d00..954067c 100644
--- a/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegDecoder.cs
+++ b/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegDecoder.cs
@@ -40,8 +40,10 @@ public FFmpegDecoder(Stream stream, AudioFormat targetFormat)
if (result != FFmpegResult.Success)
{
+ var logMessage = $"Failed to initialize FFmpeg decoder. Result: {result}";
+ Log.Error(logMessage);
_handle.Dispose();
- throw new FFmpegException(result, $"Failed to initialize FFmpeg decoder. Result: {result}");
+ throw new FFmpegException(result, logMessage);
}
SampleFormat = targetFormat.Format = nativeFormat;
@@ -51,8 +53,10 @@ public FFmpegDecoder(Stream stream, AudioFormat targetFormat)
var lengthInFrames = FFmpeg.GetLengthInPcmFrames(_handle);
if (lengthInFrames < 0)
{
+ const string logMessage = "Failed to get stream length, the decoder handle may be invalid.";
+ Log.Error(logMessage);
_handle.Dispose();
- throw new InvalidOperationException("Failed to get stream length; the decoder handle may be invalid.");
+ throw new InvalidOperationException(logMessage);
}
Length = (int)(lengthInFrames * Channels);
}
@@ -120,7 +124,7 @@ private unsafe nuint OnRead(IntPtr pUserData, IntPtr pBuffer, nuint bytesToRead)
}
catch
{
- Log.Critical("[FFmpegDecoder] Failed to read from stream.");
+ Log.Critical("Failed to read from stream.");
// Signal error/EOF to FFmpeg by returning 0. FFmpeg will handle this gracefully as AVERROR_EOF.
return 0;
}
@@ -135,7 +139,7 @@ private long OnSeek(IntPtr pUserData, long offset, SeekWhence whence)
}
catch
{
- Log.Critical("[FFmpegDecoder] Failed to seek stream.");
+ Log.Critical("Failed to seek stream.");
return -1;
}
}
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegEncoder.cs b/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegEncoder.cs
index 250e55e..c28af74 100644
--- a/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegEncoder.cs
+++ b/Codecs/SoundFlow.Codecs.FFMpeg/FFmpegEncoder.cs
@@ -39,8 +39,10 @@ public FFmpegEncoder(Stream stream, string formatId, AudioFormat sourceFormat)
if (result != FFmpegResult.Success)
{
+ var logMessage = $"Failed to initialize FFmpeg encoder for format '{formatId}'. Result: {result}";
+ Log.Error(logMessage);
_handle.Dispose();
- throw new FFmpegException(result, $"Failed to initialize FFmpeg encoder for format '{formatId}'. Result: {result}");
+ throw new FFmpegException(result, logMessage);
}
}
@@ -77,7 +79,7 @@ private unsafe nuint OnWrite(IntPtr pUserData, IntPtr pBuffer, nuint bytesToWrit
}
catch
{
- Log.Critical("[FFmpegEncoder] Failed to write to stream.");
+ Log.Critical("Failed to write to stream.");
return 0;
}
}
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/README.md b/Codecs/SoundFlow.Codecs.FFMpeg/README.md
index 2473650..42d1904 100644
--- a/Codecs/SoundFlow.Codecs.FFMpeg/README.md
+++ b/Codecs/SoundFlow.Codecs.FFMpeg/README.md
@@ -21,12 +21,12 @@ By registering this extension with the SoundFlow engine, you can seamlessly load
This extension provides a high-performance and memory-efficient bridge to FFmpeg's audio capabilities:
* **Broad Codec Support:** Adds support for dozens of popular audio formats, including:
- * **Lossy:** MP3, AAC, Ogg Vorbis, Opus, WMA, AC3
+ * **Lossy:** MP3 (encoded via LAME), AAC, Ogg Vorbis, Opus, WMA, AC3
* **Lossless:** FLAC, ALAC (Apple Lossless), APE, WavPack (WV), TTA
* **And many more container and raw formats.**
* **Seamless Integration:** Implements a high-priority `ICodecFactory`, allowing SoundFlow to automatically use FFmpeg for supported formats with no changes to your existing playback or recording code.
* **High Performance & Efficiency:** Works directly with streams using a callback-based native wrapper. This avoids loading entire audio files into memory, making it ideal for large files and network streams.
-* **Cross-Platform:** Includes pre-compiled native binaries for Windows, macOS, and Linux (x64, x86, ARM64), ensuring it works wherever SoundFlow runs.
+* **Cross-Platform:** Includes pre-compiled native binaries for Windows, macOS, Linux, Android, iOS and FreeBSD (x64, x86, ARM64), ensuring it works wherever SoundFlow runs.
* **Automatic Format Conversion:** The native wrapper intelligently uses FFmpeg's `swresample` library to automatically convert audio from its source format to the format required by your application (e.g., 32-bit float), simplifying your audio pipeline.
## Getting Started
@@ -93,18 +93,18 @@ device.Stop();
## Technical Details
-The native library included in this package is a custom-built, lightweight version of FFmpeg. To minimize binary size, it is configured with a "disable-all, enable-specific" strategy. The build includes a curated set of audio-only components and excludes all video processing, hardware acceleration, networking protocols (except `file` and `pipe`), and other non-essential features.
+The native library included in this package is a custom-built, lightweight wrapper around FFmpeg and the LAME MP3 encoder. To minimize binary size, it is configured with a "disable-all, enable-specific" strategy. The build includes a curated set of audio-only components and excludes all video processing, hardware acceleration, networking protocols (except `file` and `pipe`), and other non-essential features.
This results in a small, focused, and highly efficient native dependency tailored specifically for SoundFlow's audio processing needs.
## Origin and Licensing
-This `SoundFlow.Codecs.FFMpeg` package consists of C# wrapper code and a custom native library that statically links against FFmpeg libraries.
+This `SoundFlow.Codecs.FFMpeg` package consists of C# wrapper code and a custom native library that statically links against FFmpeg and LAME libraries.
* The C# code within this `SoundFlow.Codecs.FFMpeg` package is licensed under the **MIT License**.
-* The included FFmpeg build is configured to be compatible with **LGPL v2.1 or later**. It is compiled with `--disable-gpl` and `--disable-nonfree` flags. Your use of this package must comply with the terms of the LGPL. This generally means that if you dynamically link to this library, you can use it in proprietary software, but if you modify the FFmpeg source code itself, you must release those changes.
+* The included native library builds upon FFmpeg and LAME, both of which are licensed under the **LGPL v2.1 or later**. The FFmpeg build is configured with `--disable-gpl` and `--disable-nonfree` flags. Your use of this package must comply with the terms of the LGPL. This generally means that if you dynamically link to this library, you can use it in proprietary software, but if you modify the FFmpeg or LAME source code itself, you must release those changes.
-**Users of this package must comply with the terms of BOTH the MIT License (for the C# wrapper) and the LGPL (for the underlying FFmpeg components).** For detailed information, please consult the official [FFmpeg Licensing Page](https://ffmpeg.org/legal.html).
+**Users of this package must comply with the terms of BOTH the MIT License (for the C# wrapper) and the LGPL (for the underlying FFmpeg and LAME components).** For detailed information, please consult the official [FFmpeg Licensing Page](https://ffmpeg.org/legal.html) and the [LAME Project Website](https://lame.sourceforge.io/).
## Contributing
@@ -112,7 +112,7 @@ Contributions to `SoundFlow.Codecs.FFMpeg` are welcome! Please open issues or su
## Acknowledgments
-This package would not be possible without the incredible work of the **FFmpeg project team and its contributors**.
+This package would not be possible without the incredible work of the **FFmpeg project team and its contributors**. Special thanks to the **LAME project** for the high-quality MP3 encoder.
## License
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/SoundFlow.Codecs.FFMpeg.csproj b/Codecs/SoundFlow.Codecs.FFMpeg/SoundFlow.Codecs.FFMpeg.csproj
index 2a52e4b..239af7f 100644
--- a/Codecs/SoundFlow.Codecs.FFMpeg/SoundFlow.Codecs.FFMpeg.csproj
+++ b/Codecs/SoundFlow.Codecs.FFMpeg/SoundFlow.Codecs.FFMpeg.csproj
@@ -5,9 +5,11 @@
enableenabletrue
+ true
+ true
SoundFlow FFmpeg CodecsSoundFlow.Codecs.FFMpeg
- 1.3.0
+ 1.4.0Provides FFmpeg-based audio codecs (MP3, AAC, OGG, Opus, etc.) for the SoundFlow audio engine.soundflow;audio;codec;ffmpeg;mp3;aac;ogg;opustrue
@@ -33,6 +35,10 @@
Alwaystrue
+
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm/native/libsoundflow-ffmpeg.so
index ebf79a0..3c74dc4 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm64/native/libsoundflow-ffmpeg.so
index 2c3a91d..8c4d373 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-arm64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-x64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-x64/native/libsoundflow-ffmpeg.so
index 3166b13..1d990df 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-x64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/android-x64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-arm64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-arm64/native/libsoundflow-ffmpeg.so
index 330a9a3..077aa95 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-arm64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-arm64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-x64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-x64/native/libsoundflow-ffmpeg.so
index ba0999c..6ca79d2 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-x64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/freebsd-x64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/ios-arm64/native/soundflow-ffmpeg.framework/soundflow-ffmpeg b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/ios-arm64/native/soundflow-ffmpeg.framework/soundflow-ffmpeg
index d8682f1..22a9475 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/ios-arm64/native/soundflow-ffmpeg.framework/soundflow-ffmpeg and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/ios-arm64/native/soundflow-ffmpeg.framework/soundflow-ffmpeg differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm/native/libsoundflow-ffmpeg.so
index 767ccf2..f0e2a72 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm64/native/libsoundflow-ffmpeg.so
index 6d46a6e..c2ba2be 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-arm64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-x64/native/libsoundflow-ffmpeg.so b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-x64/native/libsoundflow-ffmpeg.so
index 7daa9a9..a76c125 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-x64/native/libsoundflow-ffmpeg.so and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/linux-x64/native/libsoundflow-ffmpeg.so differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-arm64/native/libsoundflow-ffmpeg.dylib b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-arm64/native/libsoundflow-ffmpeg.dylib
index 191082d..27fcc94 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-arm64/native/libsoundflow-ffmpeg.dylib and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-arm64/native/libsoundflow-ffmpeg.dylib differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-x64/native/libsoundflow-ffmpeg.dylib b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-x64/native/libsoundflow-ffmpeg.dylib
index 4c1dc43..4d5ae74 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-x64/native/libsoundflow-ffmpeg.dylib and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/osx-x64/native/libsoundflow-ffmpeg.dylib differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-arm64/native/soundflow-ffmpeg.dll b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-arm64/native/soundflow-ffmpeg.dll
index f130139..0494028 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-arm64/native/soundflow-ffmpeg.dll and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-arm64/native/soundflow-ffmpeg.dll differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x64/native/soundflow-ffmpeg.dll b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x64/native/soundflow-ffmpeg.dll
index 5472d1a..6418ec0 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x64/native/soundflow-ffmpeg.dll and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x64/native/soundflow-ffmpeg.dll differ
diff --git a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x86/native/soundflow-ffmpeg.dll b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x86/native/soundflow-ffmpeg.dll
index c42e1e7..e919c98 100644
Binary files a/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x86/native/soundflow-ffmpeg.dll and b/Codecs/SoundFlow.Codecs.FFMpeg/runtimes/win-x86/native/soundflow-ffmpeg.dll differ
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/AudioProcessingModule.cs b/Extensions/SoundFlow.Extensions.WebRtc.Apm/AudioProcessingModule.cs
index 1a7829b..20514b2 100644
--- a/Extensions/SoundFlow.Extensions.WebRtc.Apm/AudioProcessingModule.cs
+++ b/Extensions/SoundFlow.Extensions.WebRtc.Apm/AudioProcessingModule.cs
@@ -117,6 +117,7 @@ private void Dispose(bool disposing)
}
}
+ ///
~StreamConfig()
{
Dispose(false);
@@ -227,7 +228,8 @@ protected virtual void Dispose(bool disposing)
_disposedValue = true;
}
}
-
+
+ ///
~ProcessingConfig()
{
Dispose(false);
@@ -369,6 +371,7 @@ private void Dispose(bool disposing)
}
}
+ ///
~ApmConfig()
{
Dispose(false);
@@ -760,6 +763,7 @@ private void Dispose(bool disposing)
}
}
+ ///
~AudioProcessingModule()
{
Dispose(false);
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/Components/NoiseSuppressor.cs b/Extensions/SoundFlow.Extensions.WebRtc.Apm/Components/NoiseSuppressor.cs
index 3c90d5a..4fed7af 100644
--- a/Extensions/SoundFlow.Extensions.WebRtc.Apm/Components/NoiseSuppressor.cs
+++ b/Extensions/SoundFlow.Extensions.WebRtc.Apm/Components/NoiseSuppressor.cs
@@ -294,6 +294,7 @@ public void Dispose()
}
}
+ ///
~NoiseSuppressor()
{
Dispose();
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/Modifiers/WebRtcApmModifier.cs b/Extensions/SoundFlow.Extensions.WebRtc.Apm/Modifiers/WebRtcApmModifier.cs
index 065ab45..a1fa6b0 100644
--- a/Extensions/SoundFlow.Extensions.WebRtc.Apm/Modifiers/WebRtcApmModifier.cs
+++ b/Extensions/SoundFlow.Extensions.WebRtc.Apm/Modifiers/WebRtcApmModifier.cs
@@ -620,7 +620,7 @@ private void InitializeApmAndFeatures()
}
catch (Exception ex)
{
- Log.Error($"[WebRtcApmModifier] Init Exception: {ex.Message}");
+ Log.Error($"Init Exception: {ex.Message}");
Enabled = false;
DisposeApmNativeResources();
throw;
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/SoundFlow.Extensions.WebRtc.Apm.csproj b/Extensions/SoundFlow.Extensions.WebRtc.Apm/SoundFlow.Extensions.WebRtc.Apm.csproj
index 08cb385..02de82b 100644
--- a/Extensions/SoundFlow.Extensions.WebRtc.Apm/SoundFlow.Extensions.WebRtc.Apm.csproj
+++ b/Extensions/SoundFlow.Extensions.WebRtc.Apm/SoundFlow.Extensions.WebRtc.Apm.csproj
@@ -5,6 +5,8 @@
enableenabletrue
+ true
+ trueSoundFlow WebRTC APM ExtensionWebRTC Audio Processing Module (APM) extension for SoundFlow, providing advanced audio processing capabilities like noise suppression, echo cancellation, and gain control.audio, webrtc, apm, noise-suppression, echo-cancellation, sound-processing
@@ -15,7 +17,7 @@
Githublogo.pngREADME.md
- 1.0.4
+ 1.4.0true
@@ -30,6 +32,10 @@
Alwaystrue
+
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-arm64/native/libwebrtc-apm.so b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-arm64/native/libwebrtc-apm.so
index 2cc6f57..46905c8 100644
Binary files a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-arm64/native/libwebrtc-apm.so and b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-arm64/native/libwebrtc-apm.so differ
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-x64/native/libwebrtc-apm.so b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-x64/native/libwebrtc-apm.so
index 1b5447e..9502acb 100644
Binary files a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-x64/native/libwebrtc-apm.so and b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/android-x64/native/libwebrtc-apm.so differ
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/ios-arm64/native/libwebrtc-apm.dylib b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/ios-arm64/native/libwebrtc-apm.dylib
index fb2e810..8061371 100644
Binary files a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/ios-arm64/native/libwebrtc-apm.dylib and b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/ios-arm64/native/libwebrtc-apm.dylib differ
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x64/native/webrtc-apm.dll b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x64/native/webrtc-apm.dll
index 250bf88..f86131d 100644
Binary files a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x64/native/webrtc-apm.dll and b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x64/native/webrtc-apm.dll differ
diff --git a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x86/native/webrtc-apm.dll b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x86/native/webrtc-apm.dll
index 51125dd..35c928d 100644
Binary files a/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x86/native/webrtc-apm.dll and b/Extensions/SoundFlow.Extensions.WebRtc.Apm/runtimes/win-x86/native/webrtc-apm.dll differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/Devices/PortMidiInputDevice.cs b/Midi/SoundFlow.Midi.PortMidi/Devices/PortMidiInputDevice.cs
index 74b7b45..4839302 100644
--- a/Midi/SoundFlow.Midi.PortMidi/Devices/PortMidiInputDevice.cs
+++ b/Midi/SoundFlow.Midi.PortMidi/Devices/PortMidiInputDevice.cs
@@ -1,9 +1,7 @@
-using SoundFlow.Abstracts.Devices;
-using SoundFlow.Midi.Devices;
+using SoundFlow.Midi.Devices;
using SoundFlow.Midi.PortMidi.Enums;
using SoundFlow.Midi.PortMidi.Exceptions;
using SoundFlow.Midi.Structs;
-using SoundFlow.Structs;
using SoundFlow.Utils;
namespace SoundFlow.Midi.PortMidi.Devices;
@@ -67,7 +65,7 @@ private unsafe void PollForMessages()
// A non-real-time status byte terminates any ongoing SysEx message.
if (inSysEx && (status & 0x80) != 0 && status < 0xF8 && status != 0xF7)
{
- Log.Warning("[PortMidi] SysEx message was truncated by a new status byte.");
+ Log.Warning("SysEx message was truncated by a new status byte.");
inSysEx = false;
sysexBuffer.Clear();
}
diff --git a/Midi/SoundFlow.Midi.PortMidi/SoundFlow.Midi.PortMidi.csproj b/Midi/SoundFlow.Midi.PortMidi/SoundFlow.Midi.PortMidi.csproj
index cddd6b6..2c62b85 100644
--- a/Midi/SoundFlow.Midi.PortMidi/SoundFlow.Midi.PortMidi.csproj
+++ b/Midi/SoundFlow.Midi.PortMidi/SoundFlow.Midi.PortMidi.csproj
@@ -5,6 +5,8 @@
enableenabletrue
+ true
+ trueSoundFlow.Midi.PortMidiA PortMidi backend for the SoundFlow audio engine, providing cross-platform MIDI I/O capabilities.Copyright (c) 2025 LSXPrime
@@ -15,7 +17,7 @@
logo.pngREADME.mdaudio, sound, midi, portmidi, crossplatform, c#, .net
- 1.3.0
+ 1.4.0https://github.com/LSXPrime/SoundFlow/releasestrueLSXPrime
@@ -32,6 +34,10 @@
Alwaystrue
+
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-arm64/native/libportmidi.so b/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-arm64/native/libportmidi.so
index d16d5f3..dbf9e13 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-arm64/native/libportmidi.so and b/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-arm64/native/libportmidi.so differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-x64/native/libportmidi.so b/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-x64/native/libportmidi.so
index ddacba1..e220c89 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-x64/native/libportmidi.so and b/Midi/SoundFlow.Midi.PortMidi/runtimes/freebsd-x64/native/libportmidi.so differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm/native/libportmidi.so b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm/native/libportmidi.so
index 3ea70d3..5aaa0ed 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm/native/libportmidi.so and b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm/native/libportmidi.so differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm64/native/libportmidi.so b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm64/native/libportmidi.so
index cb02d20..950abdd 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm64/native/libportmidi.so and b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-arm64/native/libportmidi.so differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-x64/native/libportmidi.so b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-x64/native/libportmidi.so
index 356a5cd..e1fc63d 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-x64/native/libportmidi.so and b/Midi/SoundFlow.Midi.PortMidi/runtimes/linux-x64/native/libportmidi.so differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-arm64/native/portmidi.dll b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-arm64/native/portmidi.dll
index 0db67af..bd68fac 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-arm64/native/portmidi.dll and b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-arm64/native/portmidi.dll differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x64/native/portmidi.dll b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x64/native/portmidi.dll
index 87f5ca2..ebc3199 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x64/native/portmidi.dll and b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x64/native/portmidi.dll differ
diff --git a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x86/native/portmidi.dll b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x86/native/portmidi.dll
index be8d354..561bd9e 100644
Binary files a/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x86/native/portmidi.dll and b/Midi/SoundFlow.Midi.PortMidi/runtimes/win-x86/native/portmidi.dll differ
diff --git a/Native/ffmpeg-codec/CMakeLists.txt b/Native/ffmpeg-codec/CMakeLists.txt
index 7c8d7d7..41491c2 100644
--- a/Native/ffmpeg-codec/CMakeLists.txt
+++ b/Native/ffmpeg-codec/CMakeLists.txt
@@ -3,6 +3,12 @@ project(SoundFlowFFmpeg C)
cmake_policy(SET CMP0135 NEW)
+# LAME - Set macOS Deployment Target
+if(CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND NOT CMAKE_OSX_DEPLOYMENT_TARGET)
+ set(CMAKE_OSX_DEPLOYMENT_TARGET "10.13")
+ message(STATUS "CMAKE_OSX_DEPLOYMENT_TARGET not set. Defaulting to ${CMAKE_OSX_DEPLOYMENT_TARGET}")
+endif()
+
# Helper – normalise CPU names that vary between OSes / toolchains
function(normalise_arch IN OUT)
string(TOLOWER "${IN}" _arch)
@@ -71,16 +77,27 @@ endif()
set(RUNTIME_ID "${TARGET_OS}-${TARGET_ARCH}")
set(OUTPUT_DIR "${CMAKE_BINARY_DIR}/runtimes/${RUNTIME_ID}/native")
set(FFMPEG_INSTALL_DIR "${CMAKE_BINARY_DIR}/ffmpeg-install")
+set(LAME_INSTALL_DIR "${CMAKE_BINARY_DIR}/lame-install")
-# FFmpeg – download/build parameters
+# FFmpeg & LAME download parameters
set(FFMPEG_VERSION "8.0")
set(FFMPEG_URL "https://ffmpeg.org/releases/ffmpeg-${FFMPEG_VERSION}.tar.gz")
+set(LAME_URL "https://downloads.sourceforge.net/project/lame/lame/3.100/lame-3.100.tar.gz")
if(NOT CMAKE_BUILD_PARALLEL_LEVEL)
set(CMAKE_BUILD_PARALLEL_LEVEL 1)
endif()
-# Common configure flags
+# Common LAME Configure Flags
+set(LAME_COMMON_FLAGS
+ --disable-shared
+ --enable-static
+ --with-pic
+ --disable-frontend
+ --disable-decoder
+)
+
+# Common FFmpeg Configure Flags
set(FFMPEG_COMMON_FLAGS
--disable-gpl
--disable-nonfree
@@ -90,6 +107,12 @@ set(FFMPEG_COMMON_FLAGS
--enable-static
--enable-pic
--disable-symver
+
+
+ # Library Linking (LAME Support)
+ --enable-libmp3lame
+ "--extra-cflags=-I${LAME_INSTALL_DIR}/include"
+ "--extra-ldflags=-L${LAME_INSTALL_DIR}/lib"
# General Disables
--disable-doc
@@ -288,6 +311,7 @@ set(FFMPEG_COMMON_FLAGS
--enable-encoder=eac3
--enable-encoder=flac
--enable-encoder=mp2
+ --enable-encoder=libmp3lame
--enable-encoder=nellymoser
--enable-encoder=opus
--enable-encoder=sbc
@@ -346,6 +370,9 @@ if(TARGET_OS STREQUAL "win")
set(FFMPEG_ARCH "aarch64")
endif()
+ # LAME MinGW setup
+ list(APPEND LAME_COMMON_FLAGS --host=${FFMPEG_ARCH}-w64-mingw32)
+
list(APPEND FFMPEG_COMMON_FLAGS
--target-os=mingw32
--arch=${FFMPEG_ARCH}
@@ -359,18 +386,26 @@ if(TARGET_OS STREQUAL "win")
elseif(TARGET_OS STREQUAL "osx")
if(TARGET_ARCH STREQUAL "x64")
set(FFMPEG_ARCH "x86_64")
+ set(MACOS_ARCH_CLANG "x86_64")
set(MACOS_TARGET "x86_64-apple-darwin")
else()
set(FFMPEG_ARCH "aarch64")
- set(MACOS_TARGET "arm64-apple-darwin")
+ set(MACOS_ARCH_CLANG "arm64")
+ set(MACOS_TARGET "aarch64-apple-darwin")
endif()
+ # LAME macOS setup
+ list(APPEND LAME_COMMON_FLAGS
+ --host=${MACOS_TARGET}
+ "CFLAGS=-arch ${MACOS_ARCH_CLANG} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -Wno-implicit-function-declaration"
+ "LDFLAGS=-arch ${MACOS_ARCH_CLANG} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
+
list(APPEND FFMPEG_COMMON_FLAGS
--target-os=darwin
--arch=${FFMPEG_ARCH}
--enable-cross-compile
- "--extra-cflags=-target ${MACOS_TARGET}"
- "--extra-ldflags=-target ${MACOS_TARGET}"
+ "--extra-cflags=-target ${MACOS_TARGET} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}"
+ "--extra-ldflags=-target ${MACOS_TARGET} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}"
--enable-runtime-cpudetect)
elseif(TARGET_OS STREQUAL "linux")
@@ -384,6 +419,13 @@ elseif(TARGET_OS STREQUAL "linux")
set(LINUX_CROSS_PREFIX "aarch64-linux-gnu-")
endif()
+ # LAME Linux setup
+ if(CMAKE_CROSSCOMPILING)
+ # remove the trailing dash from prefix to get the host
+ string(REGEX REPLACE "-$" "" LAME_HOST "${LINUX_CROSS_PREFIX}")
+ list(APPEND LAME_COMMON_FLAGS --host=${LAME_HOST})
+ endif()
+
list(APPEND FFMPEG_COMMON_FLAGS
--target-os=linux
--arch=${FFMPEG_ARCH}
@@ -419,6 +461,14 @@ elseif(TARGET_OS STREQUAL "android")
# Construct the full path to the correct NDK compiler wrapper script
set(ANDROID_COMPILER_WRAPPER "${NDK_TOOLCHAIN_BIN_DIR}/${ANDROID_TARGET_TRIPLE}${ANDROID_API_LEVEL}-clang")
+ # LAME Android setup
+ list(APPEND LAME_COMMON_FLAGS
+ --host=${ANDROID_TARGET_TRIPLE}
+ --with-sysroot=${CMAKE_SYSROOT}
+ "CC=${ANDROID_COMPILER_WRAPPER}"
+ "AR=${NDK_TOOLCHAIN_BIN_DIR}/llvm-ar"
+ "RANLIB=${NDK_TOOLCHAIN_BIN_DIR}/llvm-ranlib")
+
list(APPEND FFMPEG_COMMON_FLAGS
--target-os=android
--arch=${FFMPEG_ARCH}
@@ -429,6 +479,12 @@ elseif(TARGET_OS STREQUAL "android")
--strip=${NDK_TOOLCHAIN_BIN_DIR}/llvm-strip)
elseif(TARGET_OS STREQUAL "ios")
+ # LAME iOS setup
+ list(APPEND LAME_COMMON_FLAGS
+ --host=aarch64-apple-darwin
+ "CFLAGS=-arch arm64 -isysroot ${CMAKE_OSX_SYSROOT} -miphoneos-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -Wno-implicit-function-declaration"
+ "LDFLAGS=-arch arm64 -isysroot ${CMAKE_OSX_SYSROOT} -miphoneos-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET}")
+
list(APPEND FFMPEG_COMMON_FLAGS
--target-os=darwin
--arch=aarch64
@@ -457,18 +513,33 @@ endif()
# External build of FFmpeg
find_program(BASH_EXECUTABLE bash REQUIRED)
-if(CMAKE_GENERATOR STREQUAL "MinGW Makefiles")
- find_program(FFMPEG_MAKE_EXECUTABLE make HINTS "C:/msys64/usr/bin")
- if(NOT FFMPEG_MAKE_EXECUTABLE)
- message(FATAL_ERROR "Could not find MSYS 'make'. It's required to build FFmpeg with MinGW Makefiles.")
- endif()
-else()
- # For all other generators (Unix Makefiles, Ninja, etc.), CMAKE_MAKE_PROGRAM is fine.
- set(FFMPEG_MAKE_EXECUTABLE ${CMAKE_MAKE_PROGRAM})
+find_program(MAKE_EXECUTABLE NAMES gmake make HINTS "C:/msys64/usr/bin" "/usr/bin")
+if(NOT MAKE_EXECUTABLE)
+ message(FATAL_ERROR "Could not find 'make' or 'gmake'. It is required to build FFmpeg/LAME via autotools.")
endif()
+message(STATUS "Using Make program: ${MAKE_EXECUTABLE}")
include(ExternalProject)
+
+# LAME Dependency
+ExternalProject_Add(lame_dependency
+ URL ${LAME_URL}
+ PREFIX ${CMAKE_BINARY_DIR}/lame
+ INSTALL_DIR ${LAME_INSTALL_DIR}
+ BUILD_IN_SOURCE 1
+ CONFIGURE_COMMAND ${BASH_EXECUTABLE} /configure
+ --prefix=${LAME_INSTALL_DIR}
+ ${LAME_COMMON_FLAGS}
+ BUILD_COMMAND ${MAKE_EXECUTABLE} -j${CMAKE_BUILD_PARALLEL_LEVEL}
+ INSTALL_COMMAND ${MAKE_EXECUTABLE} install
+ LOG_DOWNLOAD 1
+ LOG_CONFIGURE 1
+ LOG_BUILD 1
+ LOG_INSTALL 1
+)
+
ExternalProject_Add(ffmpeg_dependency
+ DEPENDS lame_dependency
URL ${FFMPEG_URL}
PREFIX ${CMAKE_BINARY_DIR}/ffmpeg
INSTALL_DIR ${FFMPEG_INSTALL_DIR}
@@ -476,8 +547,8 @@ ExternalProject_Add(ffmpeg_dependency
CONFIGURE_COMMAND ${BASH_EXECUTABLE} /configure
--prefix=${FFMPEG_INSTALL_DIR}
${FFMPEG_COMMON_FLAGS}
- BUILD_COMMAND ${FFMPEG_MAKE_EXECUTABLE} -j${CMAKE_BUILD_PARALLEL_LEVEL}
- INSTALL_COMMAND ${FFMPEG_MAKE_EXECUTABLE} install
+ BUILD_COMMAND ${MAKE_EXECUTABLE} -j${CMAKE_BUILD_PARALLEL_LEVEL}
+ INSTALL_COMMAND ${MAKE_EXECUTABLE} install
LOG_DOWNLOAD 1
LOG_CONFIGURE 1
LOG_BUILD 1
@@ -519,10 +590,13 @@ target_include_directories(soundflow-ffmpeg PRIVATE
${FFMPEG_INSTALL_DIR}/include)
target_link_directories(soundflow-ffmpeg PRIVATE
- ${FFMPEG_INSTALL_DIR}/lib)
+ ${FFMPEG_INSTALL_DIR}/lib
+ ${LAME_INSTALL_DIR}/lib)
+# Link libraries by name.
target_link_libraries(soundflow-ffmpeg PRIVATE
- avformat avcodec swresample avutil)
+ avformat avcodec swresample avutil
+ mp3lame)
# Platform system libraries
if(TARGET_OS STREQUAL "win")
@@ -544,4 +618,4 @@ elseif(TARGET_OS STREQUAL "linux" OR TARGET_OS STREQUAL "freebsd")
elseif(TARGET_OS STREQUAL "android")
target_link_libraries(soundflow-ffmpeg PRIVATE m atomic)
target_link_options(soundflow-ffmpeg PRIVATE "-Wl,-z,max-page-size=16384")
-endif()
+endif()
\ No newline at end of file
diff --git a/Native/ffmpeg-codec/soundflow-ffmpeg.c b/Native/ffmpeg-codec/soundflow-ffmpeg.c
index 95a8eb0..554a626 100644
--- a/Native/ffmpeg-codec/soundflow-ffmpeg.c
+++ b/Native/ffmpeg-codec/soundflow-ffmpeg.c
@@ -4,6 +4,8 @@
#include
#include
#include
+#include
+#include
#include
#include
#include
@@ -35,13 +37,16 @@ struct SF_Encoder {
AVStream* stream;
AVPacket* packet;
AVFrame* frame;
+ AVFrame* temp_frame;
SwrContext* swr_ctx;
AVIOContext* avio_ctx;
uint8_t* io_buffer;
+ AVAudioFifo* fifo;
sf_write_callback onWrite;
void* pUserData;
int64_t next_pts;
SFSampleFormat input_format;
+ uint32_t input_sample_rate;
};
// Helper Functions
@@ -206,6 +211,23 @@ SF_FFMPEG_API SF_Result sf_decoder_read_pcm_frames(SF_Decoder* decoder, void* pF
int draining = 0;
while (frames_read < frameCount) {
+ // Check if the resampler has data buffered from previous calls.
+ if (swr_get_out_samples(decoder->swr_ctx, 0) > 0) {
+ // Call swr_convert with NULL input to flush/read buffered data
+ int out_samples = swr_convert(decoder->swr_ctx,
+ out_ptr,
+ (int)(frameCount - frames_read),
+ NULL, 0);
+
+ if (out_samples > 0) {
+ out_ptr[0] += out_samples * decoder->target_channels * decoder->target_bytes_per_sample;
+ frames_read += out_samples;
+
+ // If we filled the user buffer, we are done for this call.
+ if (frames_read >= frameCount) break;
+ }
+ }
+
// Try to receive a decoded frame
int ret = avcodec_receive_frame(decoder->codec_ctx, decoder->frame);
@@ -237,7 +259,7 @@ SF_FFMPEG_API SF_Result sf_decoder_read_pcm_frames(SF_Decoder* decoder, void* pF
frames_read += flushed_samples;
}
} while (flushed_samples > 0 && frames_read < frameCount);
-
+
// End of stream is not an error, break loop and return success.
break;
}
@@ -364,6 +386,7 @@ static int encode_and_write(SF_Encoder* encoder, AVFrame* frame) {
SF_FFMPEG_API SF_Result sf_encoder_init(SF_Encoder* encoder, const char* format_name, sf_write_callback onWrite, void* pUserData, SFSampleFormat sampleFormat, uint32_t channels, uint32_t sampleRate) {
if (!encoder) return SF_RESULT_ERROR_INVALID_ARGS;
+ if (channels == 0 || sampleRate == 0) return SF_RESULT_ERROR_INVALID_ARGS;
// Set FFmpeg to only log errors
av_log_set_level(AV_LOG_ERROR);
@@ -372,6 +395,7 @@ SF_FFMPEG_API SF_Result sf_encoder_init(SF_Encoder* encoder, const char* format_
encoder->pUserData = pUserData;
encoder->next_pts = 0;
encoder->input_format = sampleFormat;
+ encoder->input_sample_rate = sampleRate;
const AVOutputFormat* out_fmt = av_guess_format(format_name, NULL, NULL);
if (!out_fmt) return SF_RESULT_ENCODER_ERROR_FORMAT_NOT_FOUND;
@@ -387,9 +411,18 @@ SF_FFMPEG_API SF_Result sf_encoder_init(SF_Encoder* encoder, const char* format_
encoder->codec_ctx = avcodec_alloc_context3(codec);
if (!encoder->codec_ctx) return SF_RESULT_ENCODER_ERROR_CODEC_CONTEXT_ALLOC;
+ // Enable experimental codecs (like native Opus) if necessary
+ encoder->codec_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
+
AVChannelLayout ch_layout;
av_channel_layout_default(&ch_layout, channels);
- av_channel_layout_copy(&encoder->codec_ctx->ch_layout, &ch_layout);
+ // Copy the layout to the context.
+ if (av_channel_layout_copy(&encoder->codec_ctx->ch_layout, &ch_layout) < 0) {
+ av_channel_layout_uninit(&ch_layout);
+ return SF_RESULT_ENCODER_ERROR_CODEC_CONTEXT_ALLOC;
+ }
+ av_channel_layout_uninit(&ch_layout);
+
encoder->codec_ctx->sample_rate = sampleRate;
encoder->codec_ctx->time_base = (AVRational){1, sampleRate};
@@ -429,39 +462,117 @@ SF_FFMPEG_API SF_Result sf_encoder_init(SF_Encoder* encoder, const char* format_
encoder->packet = av_packet_alloc();
encoder->frame = av_frame_alloc();
- if (!encoder->packet || !encoder->frame) return SF_RESULT_ENCODER_ERROR_PACKET_FRAME_ALLOC;
+ encoder->temp_frame = av_frame_alloc(); // Allocate reusable temp frame
+
+ encoder->fifo = av_audio_fifo_alloc(encoder->codec_ctx->sample_fmt, encoder->codec_ctx->ch_layout.nb_channels, 1024);
+
+ if (!encoder->packet || !encoder->frame || !encoder->temp_frame || !encoder->fifo) return SF_RESULT_ENCODER_ERROR_PACKET_FRAME_ALLOC;
return SF_RESULT_SUCCESS;
}
SF_FFMPEG_API SF_Result sf_encoder_write_pcm_frames(SF_Encoder* encoder, void* pFramesIn, int64_t frameCount, int64_t* out_frames_written) {
- if (!encoder || !pFramesIn || !out_frames_written || frameCount <= 0) return SF_RESULT_ERROR_INVALID_ARGS;
+ if (!encoder || !pFramesIn || !out_frames_written) return SF_RESULT_ERROR_INVALID_ARGS;
+ if (frameCount <= 0) {
+ *out_frames_written = 0;
+ return SF_RESULT_SUCCESS;
+ }
*out_frames_written = 0;
- AVFrame* resampled_frame = av_frame_alloc();
- resampled_frame->format = encoder->codec_ctx->sample_fmt;
- av_channel_layout_copy(&resampled_frame->ch_layout, &encoder->codec_ctx->ch_layout);
- resampled_frame->sample_rate = encoder->codec_ctx->sample_rate;
- resampled_frame->nb_samples = (int)frameCount;
- if (av_frame_get_buffer(resampled_frame, 0) < 0) {
- av_frame_free(&resampled_frame);
- return SF_RESULT_ERROR_ALLOCATION_FAILED;
+
+ // 1. Resample Input Data
+ // Use input_sample_rate to calculate correct delay and output count logic
+ int64_t delay = swr_get_delay(encoder->swr_ctx, encoder->input_sample_rate);
+ int64_t max_out_samples_64 = av_rescale_rnd(delay + frameCount,
+ encoder->codec_ctx->sample_rate,
+ encoder->input_sample_rate,
+ AV_ROUND_UP);
+
+ if (max_out_samples_64 > INT_MAX || max_out_samples_64 <= 0) {
+ return SF_RESULT_ERROR_INVALID_ARGS; // Too many samples
+ }
+ int max_out_samples = (int)max_out_samples_64;
+
+ // Reset and prepare temp_frame
+ av_frame_unref(encoder->temp_frame);
+
+ encoder->temp_frame->format = encoder->codec_ctx->sample_fmt;
+ // Copy layout from codec context (deep copy)
+ if (av_channel_layout_copy(&encoder->temp_frame->ch_layout, &encoder->codec_ctx->ch_layout) < 0) {
+ return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
+ }
+
+ encoder->temp_frame->sample_rate = encoder->codec_ctx->sample_rate;
+ encoder->temp_frame->nb_samples = max_out_samples;
+
+ int ret = av_frame_get_buffer(encoder->temp_frame, 0);
+ if (ret < 0) {
+ if (ret == AVERROR(ENOMEM)) return SF_RESULT_ERROR_ALLOCATION_FAILED;
+ return SF_RESULT_ENCODER_ERROR_RESAMPLER_INIT_FAILED;
}
const uint8_t* pIn[] = { (const uint8_t*)pFramesIn };
- swr_convert(encoder->swr_ctx, resampled_frame->data, resampled_frame->nb_samples, pIn, (int)frameCount);
+ int converted_samples = swr_convert(encoder->swr_ctx, encoder->temp_frame->data, encoder->temp_frame->nb_samples, pIn, (int)frameCount);
+
+ if (converted_samples < 0) {
+ return SF_RESULT_ENCODER_ERROR_RESAMPLER_INIT_FAILED;
+ }
- resampled_frame->pts = encoder->next_pts;
- encoder->next_pts += resampled_frame->nb_samples;
+ // 2. Add resampled data to FIFO
+ if (av_audio_fifo_realloc(encoder->fifo, av_audio_fifo_size(encoder->fifo) + converted_samples) < 0) {
+ return SF_RESULT_ERROR_ALLOCATION_FAILED;
+ }
+
+ if (av_audio_fifo_write(encoder->fifo, (void**)encoder->temp_frame->data, converted_samples) < converted_samples) {
+ return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
+ }
- int ret = encode_and_write(encoder, resampled_frame);
- av_frame_free(&resampled_frame);
+ // We can unref temp_frame here to free the large buffer used for resampling
+ av_frame_unref(encoder->temp_frame);
- if (ret < 0) {
- if (ret == AVERROR(EIO)) {
- return SF_RESULT_ENCODER_ERROR_WRITE_FAILED;
+ // 3. Encode data from FIFO in fixed-size chunks
+ int frame_size = encoder->codec_ctx->frame_size;
+
+ // If frame_size is 0 (e.g. PCM), the encoder accepts variable sizes, so we process everything in FIFO.
+ // If frame_size is > 0 (e.g. MP3, AAC), we must feed exactly frame_size samples.
+
+ while (av_audio_fifo_size(encoder->fifo) >= frame_size || (frame_size == 0 && av_audio_fifo_size(encoder->fifo) > 0)) {
+ // Determine how many samples to read
+ int to_read = (frame_size > 0) ? frame_size : av_audio_fifo_size(encoder->fifo);
+ if (to_read <= 0) break; // Safety check
+
+ // Prepare frame for encoder
+ av_frame_unref(encoder->frame);
+
+ encoder->frame->format = encoder->codec_ctx->sample_fmt;
+ if (av_channel_layout_copy(&encoder->frame->ch_layout, &encoder->codec_ctx->ch_layout) < 0) {
+ return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
+ }
+ encoder->frame->sample_rate = encoder->codec_ctx->sample_rate;
+ encoder->frame->nb_samples = to_read;
+
+ ret = av_frame_get_buffer(encoder->frame, 0);
+ if (ret < 0) {
+ if (ret == AVERROR(ENOMEM)) return SF_RESULT_ERROR_ALLOCATION_FAILED;
+ return SF_RESULT_ENCODER_ERROR_PACKET_FRAME_ALLOC;
+ }
+
+ // Read from FIFO
+ if (av_audio_fifo_read(encoder->fifo, (void**)encoder->frame->data, to_read) < to_read) {
+ return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
+ }
+
+ // Set PTS
+ encoder->frame->pts = encoder->next_pts;
+ encoder->next_pts += to_read;
+
+ ret = encode_and_write(encoder, encoder->frame);
+ if (ret < 0) {
+ if (ret == AVERROR(EIO)) {
+ return SF_RESULT_ENCODER_ERROR_WRITE_FAILED;
+ }
+ return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
}
- return SF_RESULT_ENCODER_ERROR_ENCODING_FAILED;
}
*out_frames_written = frameCount;
@@ -471,12 +582,39 @@ SF_FFMPEG_API SF_Result sf_encoder_write_pcm_frames(SF_Encoder* encoder, void* p
SF_FFMPEG_API void sf_encoder_free(SF_Encoder* encoder) {
if (!encoder) return;
- // Flush the encoder by sending a NULL frame
- encode_and_write(encoder, NULL);
+ // Flush any remaining samples in FIFO
+ if (encoder->fifo) {
+ int remaining_samples = av_audio_fifo_size(encoder->fifo);
+ if (remaining_samples > 0) {
+ av_frame_unref(encoder->frame);
+ encoder->frame->format = encoder->codec_ctx->sample_fmt;
+ av_channel_layout_copy(&encoder->frame->ch_layout, &encoder->codec_ctx->ch_layout);
+ encoder->frame->sample_rate = encoder->codec_ctx->sample_rate;
+ encoder->frame->nb_samples = remaining_samples;
+
+ if (av_frame_get_buffer(encoder->frame, 0) >= 0) {
+ if (av_audio_fifo_read(encoder->fifo, (void**)encoder->frame->data, remaining_samples) == remaining_samples) {
+ encoder->frame->pts = encoder->next_pts;
+ encoder->next_pts += remaining_samples;
+ encode_and_write(encoder, encoder->frame);
+ }
+ }
+ }
+ av_audio_fifo_free(encoder->fifo);
+
+ // Flush the encoder by sending a NULL frame
+ encode_and_write(encoder, NULL);
- av_write_trailer(encoder->format_ctx);
+ // Write the trailer (only valid if header was successfully written, implied by fifo existence)
+ av_write_trailer(encoder->format_ctx);
+ }
+
+ // Free resources.
+
+ if (encoder->codec_ctx) {
+ avcodec_free_context(&encoder->codec_ctx);
+ }
- avcodec_free_context(&encoder->codec_ctx);
if (encoder->format_ctx) {
if (encoder->format_ctx->pb) {
// Flush any buffered data before freeing.
@@ -487,9 +625,10 @@ SF_FFMPEG_API void sf_encoder_free(SF_Encoder* encoder) {
avformat_free_context(encoder->format_ctx);
}
- av_packet_free(&encoder->packet);
- av_frame_free(&encoder->frame);
- swr_free(&encoder->swr_ctx);
+ if (encoder->packet) av_packet_free(&encoder->packet);
+ if (encoder->frame) av_frame_free(&encoder->frame);
+ if (encoder->temp_frame) av_frame_free(&encoder->temp_frame);
+ if (encoder->swr_ctx) swr_free(&encoder->swr_ctx);
free(encoder);
}
diff --git a/Native/ffmpeg-codec/soundflow-ffmpeg.h b/Native/ffmpeg-codec/soundflow-ffmpeg.h
index 1aedc3e..734309d 100644
--- a/Native/ffmpeg-codec/soundflow-ffmpeg.h
+++ b/Native/ffmpeg-codec/soundflow-ffmpeg.h
@@ -1,5 +1,5 @@
-#ifndef SOUNDFLOW-FFMPEG_H
-#define SOUNDFLOW-FFMPEG_H
+#ifndef SOUNDFLOW-FFMPEG_H
+#define SOUNDFLOW-FFMPEG_H
#include
#include
diff --git a/README.md b/README.md
index c6c5b01..06e33cc 100644
--- a/README.md
+++ b/README.md
@@ -1,18 +1,23 @@
-
+
+> ⚠️ **Project Status:** The maintainer is on hiatus from Jan 2026 to Feb 2027. Support and updates will be limited.
+>
+> [**Read the full announcement for details.**](https://github.com/LSXPrime/SoundFlow/discussions/102)
+
+[](https://github.com/LSXPrime/SoundFlow)
# SoundFlow
-**A Powerful and Extensible .NET Audio Engine for Enterprise Applications**
+**The Complete .NET Audio Framework: From High-Fidelity Synthesis to Secure Distribution**
-[](https://github.com/LSXPrime/SoundFlow/actions/workflows/build.yml) [](https://opensource.org/licenses/MIT) [](https://www.nuget.org/packages/SoundFlow) [](https://dotnet.microsoft.com/download/dotnet/8.0)
+[](https://github.com/LSXPrime/SoundFlow/actions/workflows/release.yml) [](https://opensource.org/licenses/MIT) [](https://www.nuget.org/packages/SoundFlow) [](https://dotnet.microsoft.com/download/dotnet/8.0)
[](https://thebsd.github.io/StandWithPalestine)
This project stands in solidarity with the people of Palestine and condemns the ongoing violence and ethnic cleansing by Israel. We believe developers have a responsibility to be aware of such injustices. Read our full statement on the catastrophic situation in Palestine and the surrounding region.
@@ -20,40 +25,57 @@
SoundFlow is a robust and versatile .NET audio engine designed for seamless cross-platform audio processing. It provides a comprehensive set of features for audio playback, recording, processing, analysis, and visualization, all within a well-structured and extensible framework. SoundFlow empowers developers to build sophisticated audio applications, from real-time communication systems to advanced non-linear audio editors.
-**Key Features:**
+## Key Features
+SoundFlow provides a comprehensive suite of tools organized into a powerful, extensible architecture.
+
+### Core Architecture & Design
* **Cross-Platform Compatibility:** Runs seamlessly on Windows, macOS, Linux, Android, iOS, and FreeBSD, ensuring broad deployment options.
+* **High Performance:** Optimized for real-time audio processing with SIMD support and efficient memory management.
+* **Modular Component Architecture:** Build custom audio pipelines by connecting sources, modifiers, mixers, and analyzers.
+* **Extensibility:** Easily add custom audio components, effects, and visualizers to tailor the engine to your specific needs.
+* **Plug & Play Integrations:** Extend SoundFlow's capabilities with official integration packages, such as the WebRTC Audio Processing Module.
+* **Backend Agnostic:** Supports the `MiniAudio` backend out of the box, with the ability to add others.
+
+### Advanced Audio I/O & Device Management
* **Multi-Device Management:** Initialize and manage multiple independent audio playback and capture devices simultaneously, each with its own audio graph.
* **Advanced Device Control:** Fine-tune latency, sharing modes, and platform-specific settings (WASAPI, CoreAudio, ALSA, etc.) for professional-grade control.
* **On-the-fly Device Switching:** Seamlessly switch between audio devices during runtime without interrupting the audio graph.
-* **Modular Component Architecture:** Build custom audio pipelines by connecting sources, modifiers, mixers, and analyzers.
-* **Plug & Play Integrations:** Extend SoundFlow's capabilities with official integration packages, such as the WebRTC Audio Processing Module for advanced noise suppression, echo cancellation, and automatic gain control.
-* **Extensibility:** Easily add custom audio components, effects, and visualizers to tailor the engine to your specific needs.
-* **Pluggable Codec System:** Extend format support dynamically via `ICodecFactory`. Includes built-in support for WAV, MP3, and FLAC (via MiniAudio), with extensive format support available via extensions.
-* **Robust Metadata Handling:** Read and write metadata tags (ID3v1, ID3v2, Vorbis Comments, MP4 Atoms) and embedded Cue Sheets for a wide range of formats (MP3, FLAC, OGG, M4A, WAV, AIFF).
-* **High Performance:** Optimized for real-time audio processing with SIMD support and efficient memory management.
+
+### Core Audio Processing & Playback
* **Playback:** Play audio from various sources, including files, streams, and in-memory assets.
* **Recording:** Capture audio input and save it to different encoding formats.
* **Mixing:** Combine multiple audio streams with precise control over volume and panning.
* **Effects:** Apply a wide range of audio effects, including reverb, chorus, delay, equalization, and more.
+
+### Analysis, Formats & Streaming
+* **Pluggable Codec System:** Extend format support dynamically via `ICodecFactory`. Includes built-in support for WAV, MP3, and FLAC (via MiniAudio), with extensive format support available via extensions.
+* **Robust Metadata Handling:** Read and write metadata tags (ID3v1, ID3v2, Vorbis Comments, MP4 Atoms) and embedded Cue Sheets for a wide range of formats (MP3, FLAC, OGG, M4A, WAV, AIFF).
* **Visualization & Analysis:** Create engaging visual representations with FFT-based spectrum analysis, voice activity detection, and level metering.
* **Surround Sound:** Supports advanced surround sound configurations with customizable speaker positions, delays, and panning methods.
* **HLS Streaming Support:** Integrate internet radio and online audio via HTTP Live Streaming.
-* **Backend Agnostic:** Supports the `MiniAudio` backend out of the box, with the ability to add others.
-* **Synthesis Engine:**
- * **Polyphonic Synthesizer:** A robust synthesis engine supporting unison, filtering, and modulation envelopes.
- * **SoundFont Support:** Native loading and playback of SoundFont 2 (.sf2) banks.
- * **MPE Support:** Full support for MIDI Polyphonic Expression for per-note control of pitch, timbre, and pressure.
-* **MIDI Ecosystem:**
- * **Cross-Platform I/O:** Send and receive MIDI messages from hardware devices via the PortMidi backend.
- * **Routing & Effects:** Graph-based MIDI routing with a suite of modifiers including Arpeggiators, Harmonizers, Randomizers, and Velocity curves.
- * **Parameter Mapping:** Real-time MIDI mapping system allows controlling any engine parameter (Volume, Filter Cutoff, etc.) via external hardware controllers.
-* **Non-Destructive Audio & MIDI Editing:**
- * **Compositions & Tracks:** Organize projects into multi-track compositions supporting both Audio and MIDI tracks.
- * **Hybrid Timeline:** Mix audio clips and MIDI segments on the same timeline.
- * **Sequencing:** Sample-accurate MIDI sequencing with quantization, swing, and tempo map support.
- * **Project Persistence:** Save/Load full projects including audio assets, MIDI sequences, tempo maps, and routing configurations.
+### Synthesis Engine
+* **Polyphonic Synthesizer:** A robust synthesis engine supporting unison, filtering, and modulation envelopes.
+* **SoundFont Support:** Native loading and playback of SoundFont 2 (.sf2) banks.
+* **MPE Support:** Full support for MIDI Polyphonic Expression for per-note control of pitch, timbre, and pressure.
+
+### MIDI Ecosystem
+* **Cross-Platform I/O:** Send and receive MIDI messages from hardware devices via the PortMidi backend.
+* **Routing & Effects:** Graph-based MIDI routing with a suite of modifiers including Arpeggiators, Harmonizers, Randomizers, and Velocity curves.
+* **Parameter Mapping:** Real-time MIDI mapping system allows controlling any engine parameter (Volume, Filter Cutoff, etc.) via external hardware controllers.
+
+### Non-Destructive Audio & MIDI Editing
+* **Compositions & Tracks:** Organize projects into multi-track compositions supporting both Audio and MIDI tracks.
+* **Hybrid Timeline:** Mix audio clips and MIDI segments on the same timeline.
+* **Sequencing:** Sample-accurate MIDI sequencing with quantization, swing, and tempo map support.
+* **Project Persistence:** Save/Load full projects including audio assets, MIDI sequences, tempo maps, and routing configurations, with optional digital signing for integrity.
+
+### Comprehensive Security Suite
+* **Audio Encryption:** High-performance, seekable stream encryption using AES-256-CTR, packaged in a secure container format.
+* **Digital Signatures:** Ensure file integrity and authenticity for projects and audio containers using ECDSA digital signatures.
+* **Audio Watermarking:** Embed robust, inaudible ownership data (DSSS) or fragile integrity verification markers (LSB) directly into audio signals.
+* **Acoustic Fingerprinting:** Identify audio content by generating and matching robust acoustic fingerprints against a database.
## Getting Started
To begin using SoundFlow, the easiest way is to install the NuGet package:
@@ -71,26 +93,32 @@ You can also find a wide variety of practical applications, complex audio graphs
SoundFlow's architecture supports adding specialized audio processing capabilities via dedicated NuGet packages. These extensions integrate external libraries, making their features available within the SoundFlow ecosystem.
### SoundFlow.Codecs.FFMpeg
-
This package integrates the massive **FFmpeg** library into SoundFlow. While the core engine handles common formats, this extension unlocks decoding and encoding for virtually any audio format in existence.
-* **Decoders/Encoders:** Adds support for MP3, AAC, OGG Vorbis, Opus, ALAC, AC3, PCM variations, and many more.
+* **Decoders/Encoders:** Adds support for MP3 (encoder by LAME), AAC, OGG Vorbis, Opus, ALAC, AC3, PCM variations, and many more.
* **Container Support:** Handles complex containers like M4A, MKA, and others.
* **Automatic Registration:** simply registering the factory enables the engine to auto-detect and play these formats transparently.
-### SoundFlow.Midi.PortMidi
+To install this extension:
+```bash
+dotnet add package SoundFlow.Codecs.FFMpeg
+```
+### SoundFlow.Midi.PortMidi
This package provides the backend implementation for MIDI hardware I/O using **PortMidi**.
* **Hardware Access:** Enumerates and connects to physical MIDI keyboards, synthesizers, and controllers on Windows, macOS, and Linux.
* **Synchronization:** Provides high-precision clock synchronization, allowing SoundFlow to act as a MIDI Clock Master or Slave.
-### SoundFlow.Extensions.WebRtc.Apm
+To install this extension:
+```bash
+dotnet add package SoundFlow.Midi.PortMidi
+```
+### SoundFlow.Extensions.WebRtc.Apm
This package provides an integration with a native library based on the **WebRTC Audio Processing Module (APM)**. The WebRTC APM is a high-quality suite of algorithms commonly used in voice communication applications to improve audio quality.
Features included in this extension:
-
* **Acoustic Echo Cancellation (AEC):** Reduces echoes caused by playback audio being picked up by the microphone.
* **Noise Suppression (NS):** Reduces steady-state background noise.
* **Automatic Gain Control (AGC):** Automatically adjusts the audio signal level to a desired target.
@@ -99,6 +127,11 @@ Features included in this extension:
**Note:** The WebRTC APM native library has specific requirements, notably supporting only certain sample rates (8000, 16000, 32000, or 48000 Hz). Ensure your audio devices are initialized with one of these rates when using this extension.
+To install this extension:
+```bash
+dotnet add package SoundFlow.Extensions.WebRtc.Apm
+```
+
## API Reference
Comprehensive API documentation will be available on the **[SoundFlow Documentation](https://lsxprime.github.io/soundflow-docs/)**.
@@ -114,6 +147,7 @@ The **[Documentation](https://lsxprime.github.io/soundflow-docs/)** provides a w
* **Analysis:** Getting RMS level, analyzing frequency spectrum.
* **Visualization:** Creating level meters, waveform displays, and spectrum analyzers.
* **Composition:** Managing audio projects, including creating, editing, and saving multi-track compositions.
+* **Security:** Encrypting audio, signing files, and embedding robust ownership watermarks.
**(Note:** You can also find extensive example code in the `Samples` folder of the repository.)
@@ -129,83 +163,47 @@ We sincerely appreciate the foundational work provided by the following projects
* **[miniaudio](https://github.com/mackron/miniaudio)** - Provides a lightweight and efficient audio I/O backend.
* **[FFmpeg](https://ffmpeg.org/)** - The leading multimedia framework, powering our codec extension.
+* **[LAME Project](https://lame.sourceforge.io/)** - For the high-quality MP3 encoder used in the FFMpeg extension.
* **[PortMidi](https://github.com/PortMidi/portmidi)** - Enables cross-platform MIDI I/O.
* **[WebRTC Audio Processing Module (APM)](https://gitlab.freedesktop.org/pulseaudio/webrtc-audio-processing)** - Offers advanced audio processing (AEC, AGC, Noise Suppression).
## Support This Project
-SoundFlow is an open-source project driven by passion and community needs. Maintaining and developing a project of this scale, especially with thorough audio testing, requires significant time and resources.
-
-Currently, development and testing are primarily done using built-in computer speakers. **Your support will directly help improve the quality of SoundFlow by enabling the purchase of dedicated headphones and audio equipment for more accurate and comprehensive testing across different audio setups.**
-
-Beyond equipment, your contributions, no matter the size, help to:
-
-* **Dedicate more time to development:** Allowing for faster feature implementation, bug fixes, and improvements.
-* **Enhance project quality:** Enabling better testing, documentation, and overall project stability (including better audio testing with proper equipment!).
-* **Sustain long-term maintenance:** Ensuring SoundFlow remains actively maintained and relevant for the community.
-
-You can directly support SoundFlow and help me get essential headphones through:
+SoundFlow is an open-source project driven by passion and community needs. Maintaining and developing a project of this scale requires significant time and resources.
-* **AirTM:** For simple one-time donations with various payment options like Direct Bank Transfer (ACH), Debit / Credit Card via Moonpay, Stablecoins, and more than 500 banks and e-wallets.
+Your support is crucial for the continued development and maintenance of SoundFlow. Contributions help dedicate more time to the project, improve documentation, and acquire necessary hardware for robust testing. For instance, funds will directly help purchase dedicated audio equipment for more accurate testing, moving beyond basic built-in speakers to ensure high-quality output for everyone.
- [Donate using AirTM](https://airtm.me/lsxprime)
+If you find this project useful, please consider one of the following ways to support it:
-* **USDT (Tron/TRC20):** Supporting directly by sending to the following USDT wallet address.
+* **[❤️ Sponsor on Ko-fi](https://ko-fi.com/lsxprime)** - For simple one-time or recurring donations.
+* **[💸 Donate via PayPal](https://paypal.me/LSXPrime)** - For quick and easy one-time contributions.
+* **[🌐 Donate using AirTM](https://airtm.me/lsxprime)** - Offers various payment options like Bank Transfer, Debit/Credit Card, and more.
+* **[💎 USDT (Tron/TRC20)](https://github.com/LSXPrime/SoundFlow#)** - Send to the following wallet address: `TKZzeB71XacY3Av5rnnQVrz2kQqgzrkjFn`
+ * **Important:** Please ensure you are sending USDT via the **TRC20 (Tron)** network. Sending funds on any other network may result in their permanent loss.
- `TKZzeB71XacY3Av5rnnQVrz2kQqgzrkjFn`
-
- **Important:** Please ensure you are sending USDT via the **TRC20 (Tron)** network. Sending funds on any other network may result in their permanent loss.
-
-**By becoming a sponsor or making a donation, you directly contribute to the future of SoundFlow and help ensure it sounds great for everyone. Thank you for your generosity!**
+**Thank you for your generosity and for helping ensure SoundFlow sounds great for everyone!**
## License
SoundFlow is released under the [MIT License](LICENSE.md).
-## An Ethical Stance
-
-**While building powerful tools to help make human life better is commendable, we must also acknowledge the horrific injustices taking place in Palestine and across the region.** Israel’s actions since October 7th, 2023 have escalated into a brutal campaign of ethnic cleansing disguised as “defense.” The consequences are devastating, particularly in Gaza, but extend to Lebanon, Syria, and Iran.
-
-This is not a conflict between equal sides; it's a systematic assault by an occupying power on a stateless population struggling for basic human rights.
-
-**The situation in Gaza is catastrophic:**
-
-* **Massacres of Civilians:** Over 61,200 Palestinians are dead, including tens of thousands of women and children. Israel indiscriminately bombs densely populated areas, targeting civilian shelters as well as hospitals like the Al-Shifa Hospital, which provided crucial healthcare to over a million people in Gaza, obliterating their lifeline and leaving them with no access to essential medical care.
-* **Starvation as Warfare:** Israel's relentless siege blocks vital supplies of food, water, medicine, fuel, and even building materials for repairs, pushing the population towards starvation. Children are dying before aid can reach them. The UN has condemned this as a collective punishment that violates international humanitarian law.
-* **Forced Displacement & Land Confiscation:** Nearly 90% of Gaza’s 2.1 million inhabitants have been displaced multiple times, trapped in overcrowded camps with no end in sight to the siege and relentless bombings. This systematic displacement aims to erase Palestinian identity from the land they rightfully call home, forcing them onto crowded, contaminated, and dangerous territory while Israel continues to confiscate land for its own settlements.
-* **Destruction of Infrastructure:** Israel systematically targets essential infrastructure - hospitals, schools, power stations, water treatment plants – crippling Gaza's ability to function and leaving people without necessities like clean water and electricity.
-
-**Syria:**
+## Citing SoundFlow
-* **Israel's Brutal Attacks:** Israel has intensified its brutal attacks on Syrian territory, striking government buildings in Damascus with impunity, escalating tensions with the already fractured nation and claiming territory under a false pretense of defense for the Druze minority facing internal conflict. The world remains silent as they violate Syria's sovereignty.
-* **Tensions between Druze and Bedouin:** Druze factions in Southern Syria have engaged in intense violence against Bedouin tribes, involving killings, forced displacement, and humiliation. Israel explicitly supported these Druze interests by conducting airstrikes in Damascus on July 16, 2025. These strikes were claimed as a "warning" in defense of the Druze amidst their clashes, showcasing Israel's direct military backing in the conflict.
+If you use SoundFlow in your research or project, you can cite it using the following format:
-**Lebanon:** Israel’s violation of Lebanon’s airspace and frequent incursions have heightened fears of another devastating war. Targeted killings and bombings are displacing Lebanese civilians, further destabilizing a nation struggling to recover from past conflicts.
-
-**Iran:**
-
-* **Bombing of Civilians:** Israeli strikes on Iran that began on June 13, 2025, have resulted in significant civilian casualties. According to a US-based human rights group, at least 950 people have been killed, including 380 identified civilians, with over 3,450 others wounded. Iran's Health Ministry has reported that over 90% of the injuries occurred among civilians, including women and children.
-* **A "Crybaby" Reaction to Retaliation:** While inflicting heavy civilian casualties in Iran, Israel has shown a different face when faced with retaliation. After Iran launched missiles and drones in response, killing 29 people and wounding over 3,000 in Israel, Israeli Prime Minister Benjamin Netanyahu called the Iranian regime "weak" and appealed for international support. This reaction highlights a pattern of aggression without accepting the consequences when an opponent can strike back with force.
-* **Begging for US Intervention and Failure to Achieve Goals:** Israel's military campaign, dubbed "Operation Rising Lion," aimed to destroy Iran's nuclear program. However, some of Iran's nuclear facilities are buried deep underground, reportedly beyond the reach of Israeli weapons. This led to Israel seeking and receiving direct military intervention from the United States, which used "bunker buster" bombs on Iranian nuclear sites. Despite the joint effort, assessments indicate that while Iran's nuclear program was set back, it was not eliminated. Reports suggest that some sites are repairable and that Iran retains the materials and knowledge to rebuild, potentially being more motivated to develop nuclear weapons to deter future attacks.
-
-**The situation in Egypt is also not that good**
-
-* **Tensions between Egypt and Israel:** Tensions between Egypt and Israel have reached their highest level since the war began, primarily due to Israel's policies aimed at forcibly displacing Palestinians toward Egypt's borders. Egypt views this as a direct threat to its national security and rejects any plan for mass displacement into Sinai. Egypt has expressed strong objections to Israel's continued military control over the Philadelphi Corridor and its refusal to reopen border crossings.
-* **Pressure on Borders:** Israel pressures Egypt to accept the flow of Palestinian refugees fleeing the carnage in Gaza, threatening regional stability and exacerbating existing tensions within Sinai. This pressure plays on long-standing historical grievances between Palestinians and Egyptian authorities who see this influx as a destabilizing force under Israeli manipulation.
-* **Threatening Peace Treaties:** By flagrantly violating Egypt's territorial sovereignty through incursions and air strikes, Israel threatens the fragile peace agreement signed in 1979.
-
-**This is not simply a regional conflict; it’s a systematic pattern of violence driven by illegal occupation, apartheid policies, and blatant disregard for international law.**
-
-The international community remains largely complicit, failing to hold Israel accountable for its crimes against humanity and enabling the expansionism that fuels this ongoing catastrophe.
-
-**We urge you as developers to consider:**
-
-* **Boycotting all products and services that directly benefit the Israeli military machine.** Your money funds their brutal weapons and technology used in Gaza, Syria, Lebanon, and Iran – investing in businesses that profit from human suffering is ethically untenable.
-* **Using your platform and influence to raise awareness about this injustice.** Silence empowers aggressors. Speak out against Israeli policies and war crimes. Demand accountability for every Palestinian life lost under their relentless assault.
-* **Supporting Palestinian-led organizations working on the ground in Palestine, Syria, Lebanon, and Egypt:** They provide vital aid, resist oppression, document human rights abuses, and fight for justice.
-
-**Your skills are powerful tools.** Use them responsibly and ethically. The reality is brutal: a child's body shattered by bombs in Gaza, families torn apart in Lebanon while their homes are reduced to rubble, land stolen while its rightful owners cower under the threat of death.
-
-**Don’t be complicit.** History is not written by the victors alone – stand up for justice against this violent expansionism before it engulfs the entire region.
+### APA
+```
+Abdallah, A. (2026). SoundFlow: A high-performance, secure audio and MIDI engine for .NET (Version 1.4.0) [Computer software]. https://github.com/LSXPrime/SoundFlow
+```
-Learn more about the reality on the ground from reliable sources, not colonizer-backed websites.
\ No newline at end of file
+### BibTeX
+```
+@software{abdallah_soundflow_2026,
+ author = {Abdallah, Ahmed},
+ title = {{SoundFlow: A high-performance, secure audio and MIDI engine for .NET}},
+ url = {https://github.com/LSXPrime/SoundFlow},
+ version = {1.4.0},
+ year = {2026},
+ note = {Cross-platform audio processing, synthesis, and content protection framework}
+}
+```
\ No newline at end of file
diff --git a/SOUNDFLOW-THIRD-PARTY-NOTICES.txt b/SOUNDFLOW-THIRD-PARTY-NOTICES.txt
new file mode 100644
index 0000000..8614f82
--- /dev/null
+++ b/SOUNDFLOW-THIRD-PARTY-NOTICES.txt
@@ -0,0 +1,124 @@
+SoundFlow Third-Party Notices
+=============================
+
+This file contains the license notices for the SoundFlow library and the
+third-party native libraries it wraps or utilizes.
+
+--------------------------------------------------------------------------------
+1. SoundFlow (and sub-libraries)
+License: MIT
+Copyright (c) 2026 Ahmed Abdallah (LSXPrime)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--------------------------------------------------------------------------------
+2. MiniAudio
+License: MIT or Unlicense (Dual Licensed)
+Author: David Reid
+
+(Notice used under MIT terms)
+Copyright (c) David Reid
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+--------------------------------------------------------------------------------
+3. PortMidi
+License: MIT-style
+Copyright (c) 1999-2000 Ross Bencina and Phil Burk
+Copyright (c) 2001-2009 Roger B. Dannenberg
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Non-binding requests from the PortMusic community:
+- Any person wishing to distribute modifications to the Software is requested to
+ send the modifications to the original developer so that they can be
+ incorporated into the canonical version.
+- It is also requested that these non-binding requests be included along with
+ the license above.
+
+--------------------------------------------------------------------------------
+4. WebRTC Audio Processing Module (APM)
+License: BSD-3-Clause
+Copyright (c) 2011, The WebRTC project authors. All rights reserved.
+
+The SoundFlow WebRTC extension utilizes a standalone version of the WebRTC APM,
+maintained by the PulseAudio project.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of Google nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+--------------------------------------------------------------------------------
+5. FFmpeg
+License: LGPL v2.1 or later
+Copyright (c) the FFmpeg developers
+
+SoundFlow.Codecs.FFMpeg utilizes FFmpeg libraries for audio decoding and encoding.
+The FFmpeg binaries used are licensed under the GNU Lesser General Public
+License (LGPL) version 2.1 or later.
+
+FFmpeg is a trademark of Fabrice Bellard, originator of the FFmpeg project.
+
+Compliance Disclosure:
+- The native wrapper (soundflow-ffmpeg) is built using static linking to
+ FFmpeg libraries with the following configuration:
+ --disable-gpl --disable-nonfree --enable-pic --disable-shared --enable-static
+- This configuration ensures the binary is LGPL-compliant and does not contain
+ GPL or non-free code.
+- As per LGPL requirements for static linking, the source code for the
+ SoundFlow FFmpeg wrapper (soundflow-ffmpeg.c/.h) is provided in this
+ distribution/repository under the MIT license.
+- This allows users to modify and re-link the native wrapper against different
+ versions of the FFmpeg libraries.
+- The source code for FFmpeg can be found at: https://ffmpeg.org/download.html
+--------------------------------------------------------------------------------
\ No newline at end of file
diff --git a/STATEMENT.md b/STATEMENT.md
new file mode 100644
index 0000000..c12a85b
--- /dev/null
+++ b/STATEMENT.md
@@ -0,0 +1,47 @@
+# Our Ethical Stance
+
+**While building powerful tools to help make human life better is commendable, we must also acknowledge the horrific injustices taking place in Palestine and across the region.** Israel’s actions since October 7th, 2023 have escalated into a brutal campaign of ethnic cleansing disguised as “defense.” The consequences are devastating, particularly in Gaza, but extend to Lebanon, Syria, and Iran.
+
+This is not a conflict between equal sides; it's a systematic assault by an occupying power on a stateless population struggling for basic human rights.
+
+**The situation in Gaza is catastrophic:**
+
+* **Massacres of Civilians:** Over 61,200 Palestinians are dead, including tens of thousands of women and children. Israel indiscriminately bombs densely populated areas, targeting civilian shelters as well as hospitals like the Al-Shifa Hospital, which provided crucial healthcare to over a million people in Gaza, obliterating their lifeline and leaving them with no access to essential medical care.
+* **Starvation as Warfare:** Israel's relentless siege blocks vital supplies of food, water, medicine, fuel, and even building materials for repairs, pushing the population towards starvation. Children are dying before aid can reach them. The UN has condemned this as a collective punishment that violates international humanitarian law.
+* **Forced Displacement & Land Confiscation:** Nearly 90% of Gaza’s 2.1 million inhabitants have been displaced multiple times, trapped in overcrowded camps with no end in sight to the siege and relentless bombings. This systematic displacement aims to erase Palestinian identity from the land they rightfully call home, forcing them onto crowded, contaminated, and dangerous territory while Israel continues to confiscate land for its own settlements.
+* **Destruction of Infrastructure:** Israel systematically targets essential infrastructure - hospitals, schools, power stations, water treatment plants – crippling Gaza's ability to function and leaving people without necessities like clean water and electricity.
+
+**Syria:**
+
+* **Israel's Brutal Attacks:** Israel has intensified its brutal attacks on Syrian territory, striking government buildings in Damascus with impunity, escalating tensions with the already fractured nation and claiming territory under a false pretense of defense for the Druze minority facing internal conflict. The world remains silent as they violate Syria's sovereignty.
+* **Tensions between Druze and Bedouin:** Druze factions in Southern Syria have engaged in intense violence against Bedouin tribes, involving killings, forced displacement, and humiliation. Israel explicitly supported these Druze interests by conducting airstrikes in Damascus on July 16, 2025. These strikes were claimed as a "warning" in defense of the Druze amidst their clashes, showcasing Israel's direct military backing in the conflict.
+
+**Lebanon:** Israel’s violation of Lebanon’s airspace and frequent incursions have heightened fears of another devastating war. Targeted killings and bombings are displacing Lebanese civilians, further destabilizing a nation struggling to recover from past conflicts.
+
+**Iran:**
+
+* **Bombing of Civilians:** Israeli strikes on Iran that began on June 13, 2025, have resulted in significant civilian casualties. According to a US-based human rights group, at least 950 people have been killed, including 380 identified civilians, with over 3,450 others wounded. Iran's Health Ministry has reported that over 90% of the injuries occurred among civilians, including women and children.
+* **A "Crybaby" Reaction to Retaliation:** While inflicting heavy civilian casualties in Iran, Israel has shown a different face when faced with retaliation. After Iran launched missiles and drones in response, killing 29 people and wounding over 3,000 in Israel, Israeli Prime Minister Benjamin Netanyahu called the Iranian regime "weak" and appealed for international support. This reaction highlights a pattern of aggression without accepting the consequences when an opponent can strike back with force.
+* **Begging for US Intervention and Failure to Achieve Goals:** Israel's military campaign, dubbed "Operation Rising Lion," aimed to destroy Iran's nuclear program. However, some of Iran's nuclear facilities are buried deep underground, reportedly beyond the reach of Israeli weapons. This led to Israel seeking and receiving direct military intervention from the United States, which used "bunker buster" bombs on Iranian nuclear sites. Despite the joint effort, assessments indicate that while Iran's nuclear program was set back, it was not eliminated. Reports suggest that some sites are repairable and that Iran retains the materials and knowledge to rebuild, potentially being more motivated to develop nuclear weapons to deter future attacks.
+
+**The situation in Egypt is also not that good**
+
+* **Tensions between Egypt and Israel:** Tensions between Egypt and Israel have reached their highest level since the war began, primarily due to Israel's policies aimed at forcibly displacing Palestinians toward Egypt's borders. Egypt views this as a direct threat to its national security and rejects any plan for mass displacement into Sinai. Egypt has expressed strong objections to Israel's continued military control over the Philadelphi Corridor and its refusal to reopen border crossings.
+* **Pressure on Borders:** Israel pressures Egypt to accept the flow of Palestinian refugees fleeing the carnage in Gaza, threatening regional stability and exacerbating existing tensions within Sinai. This pressure plays on long-standing historical grievances between Palestinians and Egyptian authorities who see this influx as a destabilizing force under Israeli manipulation.
+* **Threatening Peace Treaties:** By flagrantly violating Egypt's territorial sovereignty through incursions and air strikes, Israel threatens the fragile peace agreement signed in 1979.
+
+**This is not simply a regional conflict; it’s a systematic pattern of violence driven by illegal occupation, apartheid policies, and blatant disregard for international law.**
+
+The international community remains largely complicit, failing to hold Israel accountable for its crimes against humanity and enabling the expansionism that fuels this ongoing catastrophe.
+
+**We urge you as developers to consider:**
+
+* **Boycotting all products and services that directly benefit the Israeli military machine.** Your money funds their brutal weapons and technology used in Gaza, Syria, Lebanon, and Iran – investing in businesses that profit from human suffering is ethically untenable.
+* **Using your platform and influence to raise awareness about this injustice.** Silence empowers aggressors. Speak out against Israeli policies and war crimes. Demand accountability for every Palestinian life lost under their relentless assault.
+* **Supporting Palestinian-led organizations working on the ground in Palestine, Syria, Lebanon, and Egypt:** They provide vital aid, resist oppression, document human rights abuses, and fight for justice.
+
+**Your skills are powerful tools.** Use them responsibly and ethically. The reality is brutal: a child's body shattered by bombs in Gaza, families torn apart in Lebanon while their homes are reduced to rubble, land stolen while its rightful owners cower under the threat of death.
+
+**Don’t be complicit.** History is not written by the victors alone – stand up for justice against this violent expansionism before it engulfs the entire region.
+
+Learn more about the reality on the ground from reliable sources, not colonizer-backed websites.
diff --git a/Samples/SoundFlow.Samples.EditingMixer/PersistenceExamples.cs b/Samples/SoundFlow.Samples.EditingMixer/PersistenceExamples.cs
index 5ade0f7..bff7348 100644
--- a/Samples/SoundFlow.Samples.EditingMixer/PersistenceExamples.cs
+++ b/Samples/SoundFlow.Samples.EditingMixer/PersistenceExamples.cs
@@ -7,6 +7,8 @@
using SoundFlow.Editing.Persistence;
using SoundFlow.Enums;
using SoundFlow.Providers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Utils;
using SoundFlow.Structs;
namespace SoundFlow.Samples.EditingMixer;
@@ -24,7 +26,7 @@ public static class PersistenceExamples
public static void Run()
{
- Console.WriteLine("\nSoundFlow Editing - Persistence Examples");
+ Console.WriteLine("\nSoundFlow Editing — Persistence Examples");
Console.WriteLine("========================================");
Directory.CreateDirectory(ProjectSaveDirectory);
@@ -39,6 +41,8 @@ public static void Run()
Console.WriteLine(" 2. Create, Save, and Load a Project (Embed Small Media, No Consolidate)");
Console.WriteLine(" 3. Load Project with Missing Media and Relink");
Console.WriteLine(" 4. Demonstrate Dirty Flag");
+ Console.WriteLine(" 5. Create, Sign, and Verify a Secure Project");
+ Console.WriteLine(" 6. Attempt to Verify a Tampered Project");
Console.WriteLine(" 0. Back to Main Menu / Exit");
Console.Write("Enter your choice: ");
@@ -50,6 +54,8 @@ public static void Run()
case 2: RunPersistenceExample(SaveAndLoadSimpleProject_Embed, "Save/Load Simple (Embed)"); break;
case 3: RunPersistenceExample(LoadWithMissingMediaAndRelink, "Load Missing & Relink"); break;
case 4: RunPersistenceExample(DemonstrateDirtyFlag, "Demonstrate Dirty Flag"); break;
+ case 5: RunPersistenceExample(CreateSignAndVerifySecureProject, "Sign & Verify Secure Project"); break;
+ case 6: RunPersistenceExample(VerifyTamperedProject, "Verify Tampered Project"); break;
case 0: running = false; break;
default: Console.WriteLine("Invalid choice. Please try again."); break;
}
@@ -134,7 +140,7 @@ private static void PlayComposition(Composition composition, string message = "P
private static async Task SaveAndLoadSimpleProject_Consolidate()
{
- var projectName = "SimpleProject_Consolidated";
+ const string projectName = "SimpleProject_Consolidated";
var projectFilePath = Path.Combine(ProjectSaveDirectory, $"{projectName}.sfproj");
Console.WriteLine($"Creating composition: {projectName}");
@@ -393,4 +399,89 @@ private static Task DemonstrateDirtyFlag()
composition.Dispose();
return Task.CompletedTask;
}
+
+ private static async Task CreateSignAndVerifySecureProject()
+ {
+ var projectName = "SecureProject_Signed";
+ var projectFilePath = Path.Combine(ProjectSaveDirectory, $"{projectName}.sfproj");
+
+ Console.WriteLine("Generating ECDSA key pair for project signing...");
+ var keys = SignatureKeyGenerator.Generate();
+
+ Console.WriteLine($"Creating composition: {projectName}");
+ var composition = new Composition(AudioEngine, Format, projectName);
+ var track = new Track("Secure Track");
+ composition.Editor.AddTrack(track);
+
+ // Add dummy segment
+ var beepDuration = TimeSpan.FromSeconds(1.0);
+ var beepProvider = DemoAudio.GenerateShortBeep(beepDuration);
+ track.AddSegment(new AudioSegment(Format, beepProvider, TimeSpan.Zero, beepDuration, TimeSpan.Zero, "Beep", ownsDataProvider: true));
+
+ // Save with signing
+ Console.WriteLine($"Saving and signing project to: {projectFilePath}");
+ await CompositionProjectManager.SaveProjectAsync(AudioEngine, composition, projectFilePath, new ProjectSaveOptions
+ {
+ SigningConfiguration = new SignatureConfiguration { PrivateKeyPem = keys.PrivateKeyPem }
+ });
+ Console.WriteLine("Project saved and signed (.sig file created).");
+ composition.Dispose();
+
+ // Verify before loading
+ Console.WriteLine("Verifying project integrity...");
+ var verifyResult = await CompositionProjectManager.VerifyProjectAsync(projectFilePath, null, new SignatureConfiguration { PublicKeyPem = keys.PublicKeyPem });
+
+ if (verifyResult is { IsSuccess: true, Value: true })
+ {
+ Console.WriteLine("SUCCESS: Project verification passed. The file is authentic.");
+
+ // Proceed to load
+ var (loadedComposition, _) = await CompositionProjectManager.LoadProjectAsync(AudioEngine, Format, projectFilePath);
+ Console.WriteLine("Project loaded successfully.");
+ loadedComposition.Dispose();
+ }
+ else
+ {
+ Console.WriteLine($"FAILURE: Project verification failed! {verifyResult.Error?.Message}");
+ }
+ }
+
+ private static async Task VerifyTamperedProject()
+ {
+ var projectName = "TamperedProject";
+ var projectFilePath = Path.Combine(ProjectSaveDirectory, $"{projectName}.sfproj");
+
+ Console.WriteLine($"Generating ECDSA key pair...");
+ var keys = SignatureKeyGenerator.Generate();
+
+ Console.WriteLine($"Creating and saving valid project: {projectName}");
+ var composition = new Composition(AudioEngine, Format, projectName);
+ await CompositionProjectManager.SaveProjectAsync(AudioEngine, composition, projectFilePath, new ProjectSaveOptions
+ {
+ SigningConfiguration = new SignatureConfiguration { PrivateKeyPem = keys.PrivateKeyPem }
+ });
+ composition.Dispose();
+
+ // Simulating tampering
+ Console.WriteLine("Simulating tampering: Modifying project file content...");
+ var originalContent = await File.ReadAllTextAsync(projectFilePath);
+ // Replace something benign but structurally valid to ensure it's not just a JSON parse error, but a signature mismatch.
+ var tamperedContent = originalContent.Replace(projectName, "HackedProjectName");
+ await File.WriteAllTextAsync(projectFilePath, tamperedContent);
+
+ // Verify
+ Console.WriteLine("Attempting to verify tampered project...");
+ var verifyResult = await CompositionProjectManager.VerifyProjectAsync(projectFilePath, null, new SignatureConfiguration { PublicKeyPem = keys.PublicKeyPem });
+
+ if (verifyResult is { IsSuccess: true, Value: true })
+ {
+ Console.WriteLine("FAILURE: Tampered project was accepted as valid! (This is bad)");
+ }
+ else
+ {
+ Console.WriteLine("SUCCESS: Tampered project was correctly rejected.");
+ if (!verifyResult.Value) Console.WriteLine("Reason: Signature mismatch (bool check).");
+ if (verifyResult.IsFailure) Console.WriteLine($"Reason: {verifyResult.Error?.Message}");
+ }
+ }
}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Recording/DeviceService.cs b/Samples/SoundFlow.Samples.Recording/DeviceService.cs
new file mode 100644
index 0000000..f5144a1
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Recording/DeviceService.cs
@@ -0,0 +1,54 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Recording;
+
+///
+/// Helper service to list and select audio capture devices.
+///
+public static class DeviceService
+{
+ ///
+ /// Lists available capture devices and prompts the user to select one.
+ ///
+ /// The audio engine instance.
+ /// The selected device info, or null if cancelled/invalid.
+ public static DeviceInfo? SelectInputDevice(AudioEngine engine)
+ {
+ var devices = engine.CaptureDevices;
+
+ if (devices.Length == 0)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("No audio capture devices found.");
+ Console.ResetColor();
+ return null;
+ }
+
+ Console.WriteLine("\nAvailable Input Devices:");
+ for (var i = 0; i < devices.Length; i++)
+ {
+ var dev = devices[i];
+ var defaultMarker = dev.IsDefault ? " [Default]" : "";
+ Console.WriteLine($" {i + 1}. {dev.Name}{defaultMarker}");
+ }
+
+ while (true)
+ {
+ Console.Write("\nSelect device number (or '0' for System Default): ");
+ var input = Console.ReadLine()?.Trim();
+
+ if (input == "0")
+ {
+ return devices.FirstOrDefault(d => d.IsDefault);
+ }
+
+ if (int.TryParse(input, out var index) && index > 0 && index <= devices.Length)
+ {
+ return devices[index - 1];
+ }
+
+ Console.WriteLine("Invalid selection. Please try again.");
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Recording/Program.cs b/Samples/SoundFlow.Samples.Recording/Program.cs
new file mode 100644
index 0000000..f73d707
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Recording/Program.cs
@@ -0,0 +1,82 @@
+using SoundFlow.Backends.MiniAudio;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Utils;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Samples.Recording;
+
+public static class Program
+{
+ public static async Task Main()
+ {
+ Console.WriteLine("=== SoundFlow Recorder Sample ===");
+ Console.WriteLine("Capabilities: Audio Capture, Metadata Embedding, Authenticated Save.\n");
+
+ Log.OnLog += entry => Console.WriteLine(entry);
+
+ using var engine = new MiniAudioEngine();
+
+ try
+ {
+ // 1. Device Selection
+ var device = DeviceService.SelectInputDevice(engine);
+ if (device == null) return;
+
+ // 2. Configuration: Digital Signing
+ SignatureConfiguration? signConfig = null;
+ string? publicKey = null; // Kept for verification step
+
+ Console.Write("\nEnable Authenticated Save (Digital Signature)? (y/N): ");
+ if (IsYes())
+ {
+ Console.WriteLine("Generating ephemeral ECDSA-P384 keys...");
+ var keys = SignatureKeyGenerator.Generate();
+ signConfig = new SignatureConfiguration { PrivateKeyPem = keys.PrivateKeyPem };
+ publicKey = keys.PublicKeyPem;
+ Console.WriteLine("Keys generated. Private key loaded into recorder.");
+ }
+
+ // 3. Configuration: Metadata
+ SoundTags? tags = null;
+ Console.Write("Add Metadata Tags? (y/N): ");
+ if (IsYes())
+ {
+ tags = new SoundTags();
+ Console.Write(" Title: ");
+ tags.Title = Console.ReadLine() ?? "Recorded Audio";
+ Console.Write(" Artist: ");
+ tags.Artist = Console.ReadLine() ?? Environment.UserName;
+ tags.Year = (uint)DateTime.Now.Year;
+ }
+
+ // 4. File Path
+ var fileName = $"Rec_{DateTime.Now:yyyyMMdd_HHmmss}.wav";
+ var outputPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, fileName);
+
+ // 5. Run Recording
+ await RecordingService.RecordAsync(engine, device.Value, outputPath, signConfig, tags);
+
+ // 6. Post-Recording Verification (if applicable)
+ if (publicKey != null)
+ {
+ await VerificationService.VerifyRecordingAsync(outputPath, publicKey);
+ }
+
+ Console.WriteLine($"\nFile location: {outputPath}");
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"\nCritical Error: {ex.Message}");
+ Console.ResetColor();
+ }
+
+ Console.WriteLine("\nSample finished.");
+ }
+
+ private static bool IsYes()
+ {
+ return Console.ReadLine()?.Trim().StartsWith("y", StringComparison.OrdinalIgnoreCase) ?? false;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Recording/RecordingService.cs b/Samples/SoundFlow.Samples.Recording/RecordingService.cs
new file mode 100644
index 0000000..aa5e955
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Recording/RecordingService.cs
@@ -0,0 +1,92 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Components;
+using SoundFlow.Enums;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Recording;
+
+///
+/// Manages the recording session, applying configuration and handling state.
+///
+public static class RecordingService
+{
+ ///
+ /// Starts a recording session.
+ ///
+ /// The audio engine.
+ /// The selected input device.
+ /// The file path to record to.
+ /// Optional configuration for digital signing.
+ /// Optional metadata tags.
+ public static async Task RecordAsync(
+ AudioEngine engine,
+ DeviceInfo deviceInfo,
+ string outputPath,
+ SignatureConfiguration? signingConfig,
+ SoundTags? tags)
+ {
+ // 1. Initialize Capture Device (Standard CD Quality)
+ var format = new AudioFormat
+ {
+ SampleRate = 48000,
+ Channels = 1,
+ Format = SampleFormat.F32,
+ Layout = ChannelLayout.Mono
+ };
+
+ using var captureDevice = engine.InitializeCaptureDevice(deviceInfo, format);
+
+ // 2. Setup Recorder
+ // Using "wav" format. Could be "mp3" or "flac" if codecs are registered.
+ using var recorder = new Recorder(captureDevice, outputPath, "wav");
+
+ // Apply signing configuration if provided
+ recorder.SigningConfiguration = signingConfig;
+
+ // 3. Start Recording
+ Console.WriteLine($"\nInitializing recording to '{outputPath}'...");
+ captureDevice.Start();
+
+ var result = recorder.StartRecording(tags);
+ if (!result.IsSuccess)
+ {
+ Console.WriteLine($"Failed to start recording: {result.Error}");
+ return;
+ }
+
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine(" >> RECORDING STARTED << ");
+ Console.ResetColor();
+ Console.WriteLine("Press any key to stop...");
+
+ // 4. Monitoring Loop
+ // In a real app, you might attach an AudioAnalyzer (e.g., LevelMeter) here
+ // to show VU meters in the console.
+ while (!Console.KeyAvailable)
+ {
+ await Task.Delay(100);
+ }
+ Console.ReadKey(true); // Consume the key press
+
+ // 5. Stop Recording
+ // This triggers:
+ // a) Encoder finalization
+ // b) Metadata writing (SoundTags)
+ // c) Digital Signing (if configured)
+ Console.WriteLine("\nStopping recording...");
+ result = await recorder.StopRecordingAsync();
+ if (!result.IsSuccess)
+ {
+ Console.WriteLine($"Failed to stop recording: {result.Error}");
+ return;
+ }
+
+ captureDevice.Stop();
+
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("Recording saved.");
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Recording/SoundFlow.Samples.Recording.csproj b/Samples/SoundFlow.Samples.Recording/SoundFlow.Samples.Recording.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Recording/SoundFlow.Samples.Recording.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Recording/VerificationService.cs b/Samples/SoundFlow.Samples.Recording/VerificationService.cs
new file mode 100644
index 0000000..e53f95a
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Recording/VerificationService.cs
@@ -0,0 +1,56 @@
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Recording;
+
+///
+/// Helper to verify the authenticity of a signed recording.
+///
+public static class VerificationService
+{
+ public static async Task VerifyRecordingAsync(string filePath, string publicKeyPem)
+ {
+ Console.WriteLine("\n--- Authenticated Save Verification ---");
+ var sigPath = filePath + ".sig";
+
+ if (!File.Exists(sigPath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("Error: Signature file not found. Verification impossible.");
+ Console.ResetColor();
+ return;
+ }
+
+ Console.WriteLine("Signature file found.");
+ Console.WriteLine("Verifying integrity and authenticity...");
+
+ try
+ {
+ var signature = await File.ReadAllTextAsync(sigPath);
+ var config = new SignatureConfiguration { PublicKeyPem = publicKeyPem };
+
+ var result = await FileAuthenticator.VerifyFileAsync(filePath, signature, config);
+
+ if (result.IsSuccess && result.Value)
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: The recording is authentic and has not been tampered with.");
+ }
+ else
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: Verification failed! The file may have been modified.");
+ if (result.IsFailure) Console.WriteLine($"Details: {result.Error?.Message}");
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error during verification: {ex.Message}");
+ }
+ finally
+ {
+ Console.ResetColor();
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/FilePreparationService.cs b/Samples/SoundFlow.Samples.Security.Authentication/FilePreparationService.cs
new file mode 100644
index 0000000..bf7bc75
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/FilePreparationService.cs
@@ -0,0 +1,44 @@
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// Handles the selection and creation of the target file for signing and verification.
+///
+public static class FilePreparationService
+{
+ ///
+ /// Prompts the user to select an input file, or creates a dummy file if no input is given.
+ ///
+ /// The path to the selected or created file.
+ public static async Task GetTargetFileAsync()
+ {
+ Console.WriteLine("\n--- Phase 2: File Selection ---");
+ Console.Write("Enter path to input audio file (leave empty to create a dummy file): ");
+ var inputPath = Console.ReadLine()?.Trim().Replace("\"", "");
+
+ if (string.IsNullOrEmpty(inputPath))
+ {
+ inputPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "dummy_audio.wav");
+ await CreateDummyFileAsync(inputPath);
+ Console.WriteLine($"Created dummy file at: {inputPath}");
+ }
+ else if (!File.Exists(inputPath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error: File not found at {inputPath}");
+ Console.ResetColor();
+ return null;
+ }
+
+ return inputPath;
+ }
+
+ ///
+ /// Creates a 1MB file with random data.
+ ///
+ private static async Task CreateDummyFileAsync(string path)
+ {
+ var buffer = new byte[1024 * 1024]; // 1MB
+ Random.Shared.NextBytes(buffer);
+ await File.WriteAllBytesAsync(path, buffer);
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/KeyManagementService.cs b/Samples/SoundFlow.Samples.Security.Authentication/KeyManagementService.cs
new file mode 100644
index 0000000..ff6ba76
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/KeyManagementService.cs
@@ -0,0 +1,53 @@
+using SoundFlow.Security.Utils;
+
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// Manages the creation, storage, and retrieval of cryptographic keys for the sample.
+///
+public static class KeyManagementService
+{
+ ///
+ /// Ensures that private and public key files exist, prompting the user to generate them if they don't,
+ /// or if the user requests regeneration.
+ ///
+ /// The path to the private key file.
+ /// The path to the public key file.
+ public static void EnsureKeysExist(string privateKeyPath, string publicKeyPath)
+ {
+ var keysDir = Path.GetDirectoryName(privateKeyPath);
+ Directory.CreateDirectory(keysDir!);
+
+ Console.WriteLine("--- Phase 1: Key Management ---");
+ if (File.Exists(privateKeyPath) && File.Exists(publicKeyPath))
+ {
+ Console.WriteLine($"Found existing keys in {keysDir}");
+ Console.Write("Do you want to generate NEW keys? This will invalidate existing signatures. (y/N): ");
+ var response = Console.ReadKey();
+ Console.WriteLine();
+ if (response.Key == ConsoleKey.Y)
+ {
+ GenerateAndSaveKeys(privateKeyPath, publicKeyPath);
+ }
+ }
+ else
+ {
+ Console.WriteLine("No keys found.");
+ GenerateAndSaveKeys(privateKeyPath, publicKeyPath);
+ }
+ }
+
+ ///
+ /// Generates a new ECDSA-P384 key pair and saves it to the specified files.
+ ///
+ private static void GenerateAndSaveKeys(string privPath, string pubPath)
+ {
+ Console.WriteLine("Generating new ECDSA-P384 Key Pair...");
+ var keys = SignatureKeyGenerator.Generate();
+
+ File.WriteAllText(privPath, keys.PrivateKeyPem);
+ File.WriteAllText(pubPath, keys.PublicKeyPem);
+
+ Console.WriteLine($"Keys saved to {Path.GetDirectoryName(privPath)}");
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/Program.cs b/Samples/SoundFlow.Samples.Security.Authentication/Program.cs
new file mode 100644
index 0000000..20367d1
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/Program.cs
@@ -0,0 +1,50 @@
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// This sample program demonstrates ECDSA-P384 file signing and verification.
+///
+public static class Program
+{
+ public static async Task Main()
+ {
+ Console.WriteLine("--- SoundFlow Digital Signature Sample ---");
+ Console.WriteLine("This tool demonstrates ECDSA-P384 file signing and verification.\n");
+
+ var keysDir = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "Keys");
+ var privateKeyPath = Path.Combine(keysDir, "private.pem");
+ var publicKeyPath = Path.Combine(keysDir, "public.pem");
+
+ try
+ {
+ // Phase 1: Key Management
+ KeyManagementService.EnsureKeysExist(privateKeyPath, publicKeyPath);
+ var privateKey = await File.ReadAllTextAsync(privateKeyPath);
+ var publicKey = await File.ReadAllTextAsync(publicKeyPath);
+
+ // Phase 2: File Selection
+ var inputPath = await FilePreparationService.GetTargetFileAsync();
+ if (inputPath is null) return;
+
+ // Phase 3: Signing
+ var signedSuccessfully = await SigningService.SignFileAndSaveSignatureAsync(inputPath, privateKey);
+ if (!signedSuccessfully) return;
+
+ // Phase 4: Verification (Positive)
+ Console.WriteLine("\n--- Phase 4: Verification (Clean State) ---");
+ await VerificationService.VerifyFileAsync(inputPath, publicKey);
+
+ // Phase 5: Verification (Tampered)
+ await TamperingService.TamperAndVerifyAsync(inputPath,
+ () => VerificationService.VerifyFileAsync(inputPath, publicKey));
+
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"\nA critical, unhandled error occurred: {ex.Message}");
+ Console.ResetColor();
+ }
+
+ Console.WriteLine("\n=== End of Sample ===");
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/SigningService.cs b/Samples/SoundFlow.Samples.Security.Authentication/SigningService.cs
new file mode 100644
index 0000000..346a2dd
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/SigningService.cs
@@ -0,0 +1,40 @@
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// Encapsulates the logic for digitally signing a file.
+///
+public static class SigningService
+{
+ ///
+ /// Signs a file using the provided private key and saves the signature to a corresponding .sig file.
+ ///
+ /// The path to the file to sign.
+ /// The private key in PEM format.
+ /// True if signing was successful, otherwise false.
+ public static async Task SignFileAndSaveSignatureAsync(string inputPath, string privateKey)
+ {
+ Console.WriteLine("\n--- Phase 3: Signing ---");
+ var sigPath = inputPath + ".sig";
+ var signConfig = new SignatureConfiguration { PrivateKeyPem = privateKey };
+
+ Console.Write($"Signing {Path.GetFileName(inputPath)}...");
+
+ var signResult = await FileAuthenticator.SignFileAsync(inputPath, signConfig);
+ if (signResult.IsSuccess)
+ {
+ await File.WriteAllTextAsync(sigPath, signResult.Value);
+ Console.WriteLine(" Done!");
+ Console.WriteLine($"Signature saved to: {sigPath}");
+ Console.WriteLine($"Signature (truncated): {signResult.Value?[..30]}...");
+ return true;
+ }
+
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($" Failed: {signResult.Error?.Message}");
+ Console.ResetColor();
+ return false;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/SoundFlow.Samples.Security.Authentication.csproj b/Samples/SoundFlow.Samples.Security.Authentication/SoundFlow.Samples.Security.Authentication.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/SoundFlow.Samples.Security.Authentication.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/TamperingService.cs b/Samples/SoundFlow.Samples.Security.Authentication/TamperingService.cs
new file mode 100644
index 0000000..a48d9f1
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/TamperingService.cs
@@ -0,0 +1,45 @@
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// Provides methods to simulate file tampering for verification testing.
+///
+public static class TamperingService
+{
+ ///
+ /// Creates a backup of a file, tampers with the original, executes a verification action,
+ /// and then restores the original file, ensuring cleanup.
+ ///
+ /// The path to the file to tamper with.
+ /// An async function to execute on the tampered file.
+ public static async Task TamperAndVerifyAsync(string filePath, Func verificationAction)
+ {
+ Console.WriteLine("\n--- Phase 5: Verification (Tampered State) ---");
+ Console.WriteLine("Simulating tampering by modifying one byte in the file...");
+
+ var backupPath = filePath + ".bak";
+ File.Copy(filePath, backupPath, true);
+
+ try
+ {
+ // Tamper with the file by flipping the bits of the first byte.
+ await using (var fs = new FileStream(filePath, FileMode.Open, FileAccess.ReadWrite))
+ {
+ var firstByte = fs.ReadByte();
+ if (firstByte != -1)
+ {
+ fs.Seek(0, SeekOrigin.Begin);
+ fs.WriteByte((byte)(firstByte ^ 0xFF));
+ }
+ }
+
+ // Execute the provided verification logic on the now-tampered file.
+ await verificationAction();
+ }
+ finally
+ {
+ // Ensure the original file is always restored.
+ File.Move(backupPath, filePath, true);
+ Console.WriteLine("Restored original file.");
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Authentication/VerificationService.cs b/Samples/SoundFlow.Samples.Security.Authentication/VerificationService.cs
new file mode 100644
index 0000000..463e89e
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Authentication/VerificationService.cs
@@ -0,0 +1,54 @@
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.Authentication;
+
+///
+/// Encapsulates the logic for verifying the digital signature of a file.
+///
+public static class VerificationService
+{
+ ///
+ /// Verifies a file's signature against its public key and prints the result to the console.
+ ///
+ /// The path to the data file.
+ /// The public key in PEM format.
+ public static async Task VerifyFileAsync(string filePath, string publicKeyPem)
+ {
+ Console.Write($"Verifying {Path.GetFileName(filePath)}... ");
+
+ var sigPath = filePath + ".sig";
+ var verifyConfig = new SignatureConfiguration { PublicKeyPem = publicKeyPem };
+
+ if (!File.Exists(sigPath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("Signature file missing!");
+ Console.ResetColor();
+ return;
+ }
+
+ var signature = await File.ReadAllTextAsync(sigPath);
+ var result = await FileAuthenticator.VerifyFileAsync(filePath, signature, verifyConfig);
+
+ if (result is { IsSuccess: true, Value: true })
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("VALID / AUTHENTIC");
+ }
+ else
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("INVALID / TAMPERED");
+ if (result.IsSuccess)
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine(" -> Signature verification failed, well, successfully.");
+ }
+
+ if (result.IsFailure) Console.WriteLine($" -> Error Details: {result.Error?.Message}");
+ }
+
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/DecryptionService.cs b/Samples/SoundFlow.Samples.Security.Encryption/DecryptionService.cs
new file mode 100644
index 0000000..5788c0f
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/DecryptionService.cs
@@ -0,0 +1,102 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.Encryption;
+
+///
+/// Encapsulates the logic for decrypting a SoundFlow container file.
+///
+public static class DecryptionService
+{
+ ///
+ /// Decrypts a SoundFlow container file and saves the raw audio to a WAV file.
+ /// Optionally verifies the file authenticity if a public key is provided.
+ ///
+ /// The audio engine for encoding.
+ /// The path to the encrypted container file.
+ /// The path to save the decrypted WAV file.
+ /// The 32-byte secret key used for encryption.
+ /// The PEM-encoded public key. If provided, the file signature (embedded or detached) will be verified.
+ public static async Task DecryptAsync(AudioEngine engine, string inputFile, string outputFile, byte[] secretKey,
+ string? publicKey = null)
+ {
+ Console.WriteLine($"Loading encrypted container '{inputFile}'...");
+
+ Result providerResult;
+
+ // The provider returned by Decrypt/VerifyAndDecrypt takes ownership of this stream.
+ var fileStream = new FileStream(inputFile, FileMode.Open, FileAccess.Read);
+
+ if (!string.IsNullOrEmpty(publicKey))
+ {
+ Console.WriteLine("Verifying digital signature before decryption...");
+
+ // Check for detached signature first
+ string? signature = null;
+ var sigPath = inputFile + ".sig";
+ if (File.Exists(sigPath))
+ {
+ Console.WriteLine("Found detached signature file.");
+ signature = await File.ReadAllTextAsync(sigPath);
+ }
+ else
+ {
+ Console.WriteLine("No detached signature found. Checking for embedded signature...");
+ }
+
+ var signConfig = new SignatureConfiguration
+ {
+ PublicKeyPem = publicKey
+ };
+
+ // This reads the whole file to verify (handling embedded or detached), then rewinds for decryption.
+ providerResult = await AudioEncryptor.VerifyAndDecryptAsync(fileStream, secretKey, signConfig, signature);
+ }
+ else
+ {
+ // Standard decryption without verification
+ providerResult = AudioEncryptor.Decrypt(fileStream, secretKey);
+ }
+
+ if (providerResult.IsFailure)
+ {
+ // If provider creation fails, we must manually dispose the stream.
+ await fileStream.DisposeAsync();
+ throw new InvalidOperationException($"Failed to decrypt/verify file: {providerResult.Error?.Message}");
+ }
+
+ // The provider now owns the fileStream and will dispose it when the provider is disposed.
+ using var provider = providerResult.Value!;
+
+ Console.WriteLine($"Saving decrypted audio to '{outputFile}'...");
+
+ // Save the decrypted stream to a WAV file
+ await using var outStream = new FileStream(outputFile, FileMode.Create);
+ var wavFormat = new AudioFormat
+ {
+ SampleRate = provider.SampleRate,
+ Channels = provider.FormatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(provider.FormatInfo?.ChannelCount ?? 2)
+ };
+
+ using var encoder = engine.CreateEncoder(outStream, "wav", wavFormat);
+
+ // Stream copy loop: read from the decrypting provider and write to the WAV encoder.
+ var buffer = new float[4096];
+ while (true)
+ {
+ var read = provider.ReadBytes(buffer);
+ if (read == 0) break;
+ encoder.Encode(buffer.AsSpan(0, read));
+ }
+
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("Decryption complete.");
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/EncryptionService.cs b/Samples/SoundFlow.Samples.Security.Encryption/EncryptionService.cs
new file mode 100644
index 0000000..a5d741a
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/EncryptionService.cs
@@ -0,0 +1,74 @@
+using System.Security.Cryptography;
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.Encryption;
+
+///
+/// Encapsulates the logic for encrypting an audio file.
+///
+public static class EncryptionService
+{
+ ///
+ /// Encrypts a source audio file into a secure SoundFlow container.
+ /// Optionally signs the output file if a private key is provided.
+ ///
+ /// The audio engine for decoding.
+ /// The path to the original audio file.
+ /// The path to save the encrypted container file.
+ /// The 32-byte secret key for encryption.
+ /// The PEM-encoded private key. If provided, the output will be digitally signed.
+ /// If true and is provided, the signature will be embedded in the file header.
+ public static async Task EncryptAsync(AudioEngine engine, string sourceFile, string outputFile, byte[] secretKey, string? privateKey = null, bool embedSignature = false)
+ {
+ Console.WriteLine($"Loading '{sourceFile}' for encryption...");
+ await using var sourceStream = new FileStream(sourceFile, FileMode.Open, FileAccess.Read);
+ using var provider = new AssetDataProvider(engine, sourceStream);
+
+ // Generate a random 12-byte IV (Nonce). This MUST be unique for each encryption operation
+ // with the same key to ensure security.
+ var iv = RandomNumberGenerator.GetBytes(12);
+
+ var config = new EncryptionConfiguration
+ {
+ Key = secretKey,
+ Iv = iv
+ };
+
+ // If a private key is provided, enable digital signing.
+ SignatureConfiguration? signConfig = null;
+ if (!string.IsNullOrEmpty(privateKey))
+ {
+ Console.WriteLine(embedSignature ? "Digital Signing enabled (Embedded)." : "Digital Signing enabled (Detached).");
+ signConfig = new SignatureConfiguration { PrivateKeyPem = privateKey };
+ }
+
+ Console.WriteLine($"Streaming encrypted data to container '{outputFile}'...");
+
+ // Critical: When signing is enabled, the destination stream must be Readable and Seekable
+ // because the signer needs to read the file hash after encryption.
+ // FileAccess.ReadWrite allows this.
+ await using var destinationStream = new FileStream(outputFile, FileMode.Create, FileAccess.ReadWrite);
+
+ // Pass embedSignature flag to AudioEncryptor
+ var signature = await AudioEncryptor.EncryptAsync(provider, destinationStream, config, signConfig, embedSignature);
+
+ // If a detached signature was generated (signature is not null), save it to a sidecar file.
+ if (signature != null)
+ {
+ var sigPath = outputFile + ".sig";
+ await File.WriteAllTextAsync(sigPath, signature);
+ Console.WriteLine($"Detached signature saved to '{sigPath}'.");
+ }
+ else if (signConfig != null && embedSignature)
+ {
+ Console.WriteLine("Signature embedded successfully.");
+ }
+
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("Encryption complete.");
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/PlaybackService.cs b/Samples/SoundFlow.Samples.Security.Encryption/PlaybackService.cs
new file mode 100644
index 0000000..6f63a9f
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/PlaybackService.cs
@@ -0,0 +1,98 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Components;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.Encryption;
+
+///
+/// Encapsulates the logic for real-time playback of an encrypted audio stream.
+///
+public static class PlaybackService
+{
+ ///
+ /// Plays an encrypted SoundFlow container file in real-time.
+ /// Optionally verifies the digital signature before playback starts.
+ ///
+ /// The audio engine for playback.
+ /// The path to the encrypted container file.
+ /// The 32-byte secret key used for encryption.
+ /// The PEM-encoded public key. If provided, signature verification is enforced.
+ public static async Task PlayEncryptedStreamAsync(AudioEngine engine, string encryptedFile, byte[] secretKey, string? publicKey = null)
+ {
+ Console.WriteLine("Preparing for real-time encrypted playback...");
+
+ Result providerResult;
+ var fileStream = new FileStream(encryptedFile, FileMode.Open, FileAccess.Read);
+
+ if (!string.IsNullOrEmpty(publicKey))
+ {
+ string? signature = null;
+ var sigPath = encryptedFile + ".sig";
+
+ // Check for detached signature
+ if (File.Exists(sigPath))
+ {
+ Console.WriteLine("Found detached signature file.");
+ signature = await File.ReadAllTextAsync(sigPath);
+ }
+ else
+ {
+ Console.WriteLine("No detached signature found. Checking for embedded signature...");
+ }
+
+ Console.WriteLine("Verifying signature...");
+ var signConfig = new SignatureConfiguration { PublicKeyPem = publicKey };
+
+ // This handles verification for both embedded (signature=null) and detached cases.
+ providerResult = await AudioEncryptor.VerifyAndDecryptAsync(fileStream, secretKey, signConfig, signature);
+ }
+ else
+ {
+ providerResult = AudioEncryptor.Decrypt(fileStream, secretKey);
+ }
+
+ if (providerResult.IsFailure)
+ {
+ await fileStream.DisposeAsync();
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error reading/verifying container: {providerResult.Error?.Message}");
+ Console.ResetColor();
+ return;
+ }
+
+ // The provider now owns the fileStream and will dispose it when the provider is disposed.
+ using var provider = providerResult.Value!;
+
+ var format = new AudioFormat
+ {
+ SampleRate = provider.SampleRate,
+ Channels = provider.FormatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(provider.FormatInfo?.ChannelCount ?? 2)
+ };
+
+ using var device = engine.InitializePlaybackDevice(null, format);
+ var player = new SoundPlayer(engine, format, provider);
+ device.MasterMixer.AddComponent(player);
+
+ Console.WriteLine("Starting playback...");
+ device.Start();
+ player.Play();
+
+ Console.WriteLine("Press any key to stop playback...");
+ while (player.State != PlaybackState.Stopped && !Console.KeyAvailable)
+ {
+ await Task.Delay(100);
+ }
+
+ if (Console.KeyAvailable) Console.ReadKey(true);
+
+ player.Stop();
+ device.Stop();
+ Console.WriteLine("Playback stopped.");
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/Program.cs b/Samples/SoundFlow.Samples.Security.Encryption/Program.cs
new file mode 100644
index 0000000..ec30ca4
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/Program.cs
@@ -0,0 +1,141 @@
+using SoundFlow.Backends.MiniAudio;
+using SoundFlow.Security.Utils;
+
+namespace SoundFlow.Samples.Security.Encryption;
+
+///
+/// This sample program demonstrates the Content Encryption feature.
+/// 1. It encrypts an original audio file into a custom container file (.sfa).
+/// 2. It decrypts the container file back into raw audio.
+/// 3. It saves the decrypted audio to a new WAV file.
+/// 4. It verifies that the original and decrypted files are bit-for-bit identical.
+/// 5. It plays the encrypted container in real-time.
+///
+public static class Program
+{
+ // For a real application, this key should be managed securely (e.g., from a key vault or secure store).
+ // It must be 32 bytes (256 bits).
+ private static readonly byte[] SecretKey = "MySuperSecure32ByteEncryptionKey"u8.ToArray();
+
+ public static async Task Main()
+ {
+ Console.WriteLine("--- SoundFlow Content Encryption & Authentication Test ---");
+ Console.WriteLine();
+
+ var originalFile = GetExistingFilePath("Enter path to the source audio file (e.g., original.wav):");
+
+ Console.Write("Enable Authenticated Encryption (Digital Signatures)? (y/N): ");
+ var enableAuth = Console.ReadLine()?.Trim().Equals("y", StringComparison.OrdinalIgnoreCase) ?? false;
+
+ string? privateKey = null;
+ string? publicKey = null;
+ var embedSignature = false;
+
+ if (enableAuth)
+ {
+ Console.Write("Embed Signature in File Header? (y/N): ");
+ embedSignature = Console.ReadLine()?.Trim().Equals("y", StringComparison.OrdinalIgnoreCase) ?? false;
+ Console.WriteLine("Generating ephemeral ECDSA-P384 key pair for this session...");
+ var keys = SignatureKeyGenerator.Generate();
+ privateKey = keys.PrivateKeyPem;
+ publicKey = keys.PublicKeyPem;
+ Console.WriteLine("Keys generated.");
+ }
+
+ var tempDir = Path.Combine(Path.GetTempPath(), "SoundFlowSample-Encryption");
+ if (Directory.Exists(tempDir)) Directory.Delete(tempDir, true);
+ Directory.CreateDirectory(tempDir);
+ var encryptedFile = Path.Combine(tempDir, "encrypted.sfa");
+ var decryptedFile = Path.Combine(tempDir, "decrypted.wav");
+
+ using var engine = new MiniAudioEngine();
+ var success = true;
+
+ try
+ {
+ // Phase 1: Encrypt
+ Console.WriteLine("\n--- Phase 1: Encrypting File ---");
+ await EncryptionService.EncryptAsync(engine, originalFile, encryptedFile, SecretKey, privateKey, embedSignature);
+
+ // Phase 2: Decrypt
+ Console.WriteLine("\n--- Phase 2: Decrypting File ---");
+ await DecryptionService.DecryptAsync(engine, encryptedFile, decryptedFile, SecretKey, publicKey);
+
+ // Phase 3: Verify
+ Console.WriteLine("\n--- Phase 3: Verifying Audio Integrity ---");
+ var areIdentical = await VerificationService.AreFilesIdenticalAsync(engine, originalFile, decryptedFile);
+ if (areIdentical)
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: Original and decrypted files are identical.");
+ }
+ else
+ {
+ success = false;
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: Original and decrypted files DO NOT match.");
+ }
+ Console.ResetColor();
+
+ // Phase 4: Real-time Playback
+ if (success)
+ {
+ Console.WriteLine("\n--- Phase 4: Real-time Encrypted Playback ---");
+ await PlaybackService.PlayEncryptedStreamAsync(engine, encryptedFile, SecretKey, publicKey);
+ }
+ }
+ catch (Exception ex)
+ {
+ success = false;
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"\nAn unexpected error occurred: {ex.Message}");
+ Console.ResetColor();
+ }
+ finally
+ {
+ Console.WriteLine("\n--- Final Result ---");
+ Console.WriteLine(success ? "Encryption tests passed." : "Encryption tests failed.");
+
+ Console.WriteLine("\nCleaning up temporary files...");
+ if (File.Exists(encryptedFile)) File.Delete(encryptedFile);
+ if (File.Exists(encryptedFile + ".sig")) File.Delete(encryptedFile + ".sig");
+ if (File.Exists(decryptedFile)) File.Delete(decryptedFile);
+ if (Directory.Exists(tempDir)) Directory.Delete(tempDir);
+ }
+
+ Console.WriteLine("Encryption sample finished.");
+ }
+
+ ///
+ /// Prompts the user for a file path and loops until a valid, existing file is provided.
+ ///
+ /// The message to display to the user.
+ /// A validated, existing file path.
+ private static string GetExistingFilePath(string promptMessage)
+ {
+ Console.WriteLine(promptMessage);
+ while (true)
+ {
+ Console.Write("> ");
+ var filePath = Console.ReadLine()?.Replace("\"", "");
+
+ if (string.IsNullOrWhiteSpace(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("File path cannot be empty. Please try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ if (!File.Exists(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error: The file '{filePath}' was not found. Please check the path and try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ return filePath;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/SoundFlow.Samples.Security.Encryption.csproj b/Samples/SoundFlow.Samples.Security.Encryption/SoundFlow.Samples.Security.Encryption.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/SoundFlow.Samples.Security.Encryption.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Security.Encryption/VerificationService.cs b/Samples/SoundFlow.Samples.Security.Encryption/VerificationService.cs
new file mode 100644
index 0000000..9ab6b82
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Encryption/VerificationService.cs
@@ -0,0 +1,66 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+
+namespace SoundFlow.Samples.Security.Encryption;
+
+///
+/// Encapsulates the logic for verifying the integrity of audio files.
+///
+public static class VerificationService
+{
+ ///
+ /// Compares two audio files sample by sample to determine if they are identical.
+ ///
+ /// The audio engine for decoding.
+ /// The path to the first audio file.
+ /// The path to the second audio file.
+ /// True if the files are identical, otherwise false.
+ public static async Task AreFilesIdenticalAsync(AudioEngine engine, string originalFile, string decryptedFile)
+ {
+ Console.WriteLine($"Comparing Audio Samples of '{originalFile}' and '{decryptedFile}'...");
+
+ await using var stream1 = new FileStream(originalFile, FileMode.Open);
+ using var provider1 = new StreamDataProvider(engine, stream1);
+
+ await using var stream2 = new FileStream(decryptedFile, FileMode.Open);
+ using var provider2 = new StreamDataProvider(engine, stream2);
+
+ // Check metadata first as a quick failure point.
+ if (provider1.Length != provider2.Length)
+ {
+ Console.WriteLine($" -> Sample count mismatch: Original={provider1.Length}, Decrypted={provider2.Length}.");
+ return false;
+ }
+
+ // Compare samples in chunks for efficiency.
+ var buf1 = new float[8192];
+ var buf2 = new float[8192];
+ long totalRead = 0;
+
+ while (true)
+ {
+ var read1 = provider1.ReadBytes(buf1);
+ var read2 = provider2.ReadBytes(buf2);
+
+ if (read1 != read2)
+ {
+ Console.WriteLine($" -> Read length mismatch at sample offset {totalRead}.");
+ return false;
+ }
+ if (read1 == 0) break;
+
+ for (var i = 0; i < read1; i++)
+ {
+ // Use a small tolerance for floating point comparisons.
+ if (Math.Abs(buf1[i] - buf2[i]) > 1e-4f)
+ {
+ Console.WriteLine($" -> Mismatch at sample {totalRead + i}: Original={buf1[i]}, Decrypted={buf2[i]}");
+ return false;
+ }
+ }
+ totalRead += read1;
+ }
+
+ return true;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Fingerprinting/IdentificationService.cs b/Samples/SoundFlow.Samples.Security.Fingerprinting/IdentificationService.cs
new file mode 100644
index 0000000..a5dc354
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Fingerprinting/IdentificationService.cs
@@ -0,0 +1,51 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Models;
+using SoundFlow.Security.Stores;
+
+namespace SoundFlow.Samples.Security.Fingerprinting;
+
+///
+/// Encapsulates the logic for identifying audio clips.
+///
+public static class IdentificationService
+{
+ ///
+ /// Loads a query clip, runs identification against the store, and prints the result.
+ ///
+ /// The path to the audio clip to identify.
+ /// The audio engine for decoding.
+ /// The fingerprint store to query against.
+ /// The result of the identification process.
+ public static async Task IdentifyClipAsync(string filePath, AudioEngine engine, IFingerprintStore store)
+ {
+ Console.WriteLine($"--- Identifying Clip: {filePath} ---");
+
+ using var provider = new StreamDataProvider(engine, new FileStream(filePath, FileMode.Open, FileAccess.Read));
+
+ Console.WriteLine("Identifying clip against the store...");
+ var result = await AudioIdentifier.IdentifyAsync(provider, store);
+
+ Console.WriteLine("\n--- Identification Result ---");
+ if (result is { IsSuccess: true, Value: not null })
+ {
+ Console.ForegroundColor = ConsoleColor.Cyan;
+ Console.WriteLine("Match Found!");
+ Console.WriteLine($" -> Track ID: {result.Value.TrackId}");
+ Console.WriteLine($" -> Confidence: {result.Value.Confidence} aligned hashes");
+ Console.WriteLine($" -> Time Offset: The clip starts at approximately {result.Value.MatchTimeSeconds:F2} seconds into the original track.");
+ Console.WriteLine($" -> Processing Time: {result.Value.ProcessingTime.TotalMilliseconds:F2} ms");
+ }
+ else
+ {
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("No match found in the database.");
+ }
+
+ Console.ResetColor();
+ Console.WriteLine("-------------------------");
+
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Fingerprinting/IndexingService.cs b/Samples/SoundFlow.Samples.Security.Fingerprinting/IndexingService.cs
new file mode 100644
index 0000000..2884c8c
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Fingerprinting/IndexingService.cs
@@ -0,0 +1,43 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Stores;
+
+namespace SoundFlow.Samples.Security.Fingerprinting;
+
+///
+/// Encapsulates the logic for indexing audio tracks.
+///
+public static class IndexingService
+{
+ ///
+ /// Loads an audio file, generates its fingerprint, and inserts it into the store.
+ ///
+ /// The path to the audio file to index.
+ /// The audio engine for decoding.
+ /// The fingerprint store for persistence.
+ /// The generated Track ID for the indexed file.
+ public static async Task IndexTrackAsync(string filePath, AudioEngine engine, IFingerprintStore store)
+ {
+ Console.WriteLine($"--- Indexing Track: {filePath} ---");
+
+ using var provider = new StreamDataProvider(engine, new FileStream(filePath, FileMode.Open, FileAccess.Read));
+
+ Console.WriteLine("Generating fingerprint...");
+ var fingerprint = AudioIdentifier.GenerateFingerprint(provider);
+
+ // Use the filename as a human-readable TrackId for this example.
+ fingerprint.TrackId = Path.GetFileName(filePath);
+
+ Console.WriteLine($"Generated {fingerprint.Hashes.Count} hashes for track '{fingerprint.TrackId}'.");
+
+ Console.WriteLine("Inserting fingerprint into the store...");
+ await store.InsertAsync(fingerprint);
+
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("Indexing complete.");
+ Console.ResetColor();
+
+ return fingerprint.TrackId;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Fingerprinting/Program.cs b/Samples/SoundFlow.Samples.Security.Fingerprinting/Program.cs
new file mode 100644
index 0000000..6152766
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Fingerprinting/Program.cs
@@ -0,0 +1,108 @@
+using SoundFlow.Backends.MiniAudio;
+using SoundFlow.Security.Stores;
+
+// This sample program demonstrates the core workflow of the Audio Fingerprinting system.
+// 1. It indexes a full-length audio track provided by the user.
+// 2. It attempts to identify a short, re-encoded clip provided by the user against the index.
+// 3. It reports the result, including the matched Track ID, confidence, and time offset.
+
+namespace SoundFlow.Samples.Security.Fingerprinting;
+
+public static class Program
+{
+ public static async Task Main()
+ {
+ Console.WriteLine("--- SoundFlow Audio Fingerprinting Test ---");
+ Console.WriteLine();
+
+ // Setup: Prompt user for required audio files.
+ var originalTrackFile = GetExistingFilePath("Enter the path to the full audio track you want to index (e.g., original.wav):");
+ Console.WriteLine();
+ var queryClipFile = GetExistingFilePath("Enter the path to the short audio clip you want to identify (e.g., clip.wav):");
+ Console.WriteLine();
+
+ // The AudioEngine is required for decoding audio files.
+ using var engine = new MiniAudioEngine();
+
+ // For this example, we use an in-memory store. In a real application,
+ // you would implement IFingerprintStore to connect to a persistent database.
+ var fingerprintStore = new InMemoryFingerprintStore();
+
+ // Phase 1: Indexing
+ try
+ {
+ var originalTrackId = await IndexingService.IndexTrackAsync(originalTrackFile, engine, fingerprintStore);
+ Console.WriteLine($"Indexed track '{originalTrackId}' successfully.");
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"An error occurred during indexing: {ex.Message}");
+ Console.ResetColor();
+ return;
+ }
+
+ Console.WriteLine();
+
+ // Phase 2: Identification
+ try
+ {
+ var result = await IdentificationService.IdentifyClipAsync(queryClipFile, engine, fingerprintStore);
+
+ // Phase 3: Verification
+ Console.WriteLine("--- Verification ---");
+ if (result is not null && result.TrackId == Path.GetFileName(originalTrackFile))
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: The clip was correctly identified.");
+ }
+ else
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: The clip was not identified or matched the wrong track.");
+ }
+ Console.ResetColor();
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"An error occurred during identification: {ex.Message}");
+ Console.ResetColor();
+ }
+
+ Console.WriteLine("\nFingerprinting sample finished.");
+ }
+
+ ///
+ /// Prompts the user for a file path and loops until a valid, existing file is provided.
+ ///
+ /// The message to display to the user.
+ /// A validated, existing file path.
+ private static string GetExistingFilePath(string promptMessage)
+ {
+ Console.WriteLine(promptMessage);
+ while (true)
+ {
+ Console.Write("> ");
+ var filePath = Console.ReadLine()?.Replace("\"", "");
+
+ if (string.IsNullOrWhiteSpace(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("File path cannot be empty. Please try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ if (!File.Exists(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error: The file '{filePath}' was not found. Please check the path and try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ return filePath;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.Fingerprinting/SoundFlow.Samples.Security.Fingerprinting.csproj b/Samples/SoundFlow.Samples.Security.Fingerprinting/SoundFlow.Samples.Security.Fingerprinting.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.Fingerprinting/SoundFlow.Samples.Security.Fingerprinting.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityEmbeddingService.cs b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityEmbeddingService.cs
new file mode 100644
index 0000000..cffce33
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityEmbeddingService.cs
@@ -0,0 +1,62 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Enums;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Providers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Modifiers;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.IntegrityWatermarking;
+
+///
+/// Encapsulates the logic for embedding a fragile integrity watermark.
+///
+public static class IntegrityEmbeddingService
+{
+ ///
+ /// Embeds an integrity watermark into a source audio file and saves the result.
+ ///
+ /// The audio engine for processing.
+ /// The path to the original audio file.
+ /// The path to save the watermarked file.
+ /// The watermark configuration to use.
+ public static async Task EmbedAsync(AudioEngine engine, string sourceFile, string outputFile, WatermarkConfiguration config)
+ {
+ await using var stream = new FileStream(sourceFile, FileMode.Open);
+ using var provider = new AssetDataProvider(engine, stream);
+ Console.WriteLine($"Embedding integrity watermark into '{sourceFile}'...");
+
+ var format = new AudioFormat
+ {
+ SampleRate = provider.SampleRate,
+ Channels = provider.FormatInfo?.ChannelCount ?? 2
+ };
+ var embedder = new IntegrityWatermarkEmbedModifier(config);
+
+ var samples = new float[provider.Length];
+ provider.ReadBytes(samples);
+
+ embedder.Process(samples, format.Channels);
+
+ await SaveWavAsync(engine, outputFile, samples, provider.FormatInfo);
+ Console.WriteLine($"Saved watermarked file to '{outputFile}'.");
+ }
+
+ ///
+ /// Saves a float array of audio samples to a WAV file.
+ ///
+ private static async Task SaveWavAsync(AudioEngine engine, string filePath, float[] samples, SoundFormatInfo? formatInfo)
+ {
+ var format = new AudioFormat
+ {
+ SampleRate = formatInfo?.SampleRate ?? 48000,
+ Channels = formatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(formatInfo?.ChannelCount ?? 2)
+ };
+
+ await using var stream = new FileStream(filePath, FileMode.Create);
+ using var encoder = engine.CreateEncoder(stream, "wav", format);
+ encoder.Encode(samples);
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityVerificationService.cs b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityVerificationService.cs
new file mode 100644
index 0000000..672e8cf
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/IntegrityVerificationService.cs
@@ -0,0 +1,52 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security.Analyzers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.IntegrityWatermarking;
+
+///
+/// Encapsulates the logic for verifying a fragile integrity watermark.
+///
+public static class IntegrityVerificationService
+{
+ ///
+ /// Verifies the integrity of a watermarked audio file.
+ ///
+ /// The audio engine for processing.
+ /// The path to the file to verify.
+ /// The watermark configuration used for embedding.
+ /// True if the file is intact; false if an integrity violation is detected.
+ public static bool VerifyFileIntegrity(AudioEngine engine, string filePath, WatermarkConfiguration config)
+ {
+ Console.WriteLine($"Verifying integrity of '{filePath}'...");
+ using var stream = new FileStream(filePath, FileMode.Open);
+ using var provider = new AssetDataProvider(engine, stream);
+ var format = new AudioFormat { SampleRate = provider.SampleRate, Channels = provider.FormatInfo?.ChannelCount ?? 2 };
+
+ var verifier = new IntegrityWatermarkVerifyAnalyzer(format, config);
+ var violationDetected = false;
+
+ verifier.IntegrityViolationDetected += blockIndex =>
+ {
+ violationDetected = true;
+ Console.WriteLine($" -> Integrity violation detected at block: {blockIndex}");
+ };
+
+ var buffer = new float[16384]; // 16KB buffer
+ while (true)
+ {
+ var read = provider.ReadBytes(buffer);
+ if (read == 0) break;
+
+ // The verifier needs a span of the actual data read
+ verifier.Process(buffer.AsSpan(0, read), format.Channels);
+
+ // Stop checking immediately after the first failure for efficiency
+ if (violationDetected) break;
+ }
+
+ return !violationDetected;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/Program.cs b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/Program.cs
new file mode 100644
index 0000000..ed69dd9
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/Program.cs
@@ -0,0 +1,124 @@
+using SoundFlow.Backends.MiniAudio;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.IntegrityWatermarking;
+
+///
+/// This sample program demonstrates the fragile Integrity Watermarking feature.
+/// 1. It embeds an integrity watermark into a source WAV file.
+/// 2. It verifies that the clean, watermarked file passes the integrity check.
+/// 3. It creates a tampered version of the file by zeroing out a small section of audio.
+/// 4. It verifies that the tampered file FAILS the integrity check.
+///
+public static class Program
+{
+ public static async Task Main()
+ {
+ Console.WriteLine("--- SoundFlow Integrity Watermarking Test (Fragile) ---");
+ Console.WriteLine();
+
+ var originalFile = GetExistingFilePath("Enter the path to the source audio file (e.g., original.wav):");
+
+ var tempDir = Path.Combine(Path.GetTempPath(), "SoundFlowSample-Integrity");
+ Directory.CreateDirectory(tempDir);
+ var watermarkedFile = Path.Combine(tempDir, "integrity-watermarked.wav");
+ var tamperedFile = Path.Combine(tempDir, "integrity-tampered.wav");
+
+ using var engine = new MiniAudioEngine();
+ var config = new WatermarkConfiguration { IntegrityBlockSize = 8192 };
+ var success = true;
+
+ try
+ {
+ Console.WriteLine("\n--- Phase 1: Embedding Watermark ---");
+ await IntegrityEmbeddingService.EmbedAsync(engine, originalFile, watermarkedFile, config);
+
+ Console.WriteLine("\n--- Phase 2: Verifying Clean File ---");
+ var cleanResult = IntegrityVerificationService.VerifyFileIntegrity(engine, watermarkedFile, config);
+ if (cleanResult)
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: Clean file passed integrity check as expected.");
+ }
+ else
+ {
+ success = false;
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: Clean file FAILED integrity check unexpectedly.");
+ }
+
+ Console.ResetColor();
+
+ Console.WriteLine("\n--- Phase 3: Creating Tampered File ---");
+ TamperingService.TamperFileByZeroingData(watermarkedFile, tamperedFile);
+
+ Console.WriteLine("\n--- Phase 4: Verifying Tampered File ---");
+ var tamperedResult = IntegrityVerificationService.VerifyFileIntegrity(engine, tamperedFile, config);
+ if (!tamperedResult)
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: Tampered file FAILED integrity check as expected.");
+ }
+ else
+ {
+ success = false;
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: Tampered file PASSED integrity check unexpectedly.");
+ }
+
+ Console.ResetColor();
+ }
+ catch (Exception ex)
+ {
+ success = false;
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"\nAn unexpected error occurred: {ex.Message}");
+ Console.ResetColor();
+ }
+ finally
+ {
+ Console.WriteLine("\n--- Final Result ---");
+ Console.WriteLine(success ? "All integrity tests passed." : "One or more integrity tests failed.");
+
+ Console.WriteLine("\nCleaning up temporary files...");
+ if (File.Exists(watermarkedFile)) File.Delete(watermarkedFile);
+ if (File.Exists(tamperedFile)) File.Delete(tamperedFile);
+ if (Directory.Exists(tempDir)) Directory.Delete(tempDir);
+ }
+
+ Console.WriteLine("Integrity watermarking sample finished.");
+ }
+
+ ///
+ /// Prompts the user for a file path and loops until a valid, existing file is provided.
+ ///
+ /// The message to display to the user.
+ /// A validated, existing file path.
+ private static string GetExistingFilePath(string promptMessage)
+ {
+ Console.WriteLine(promptMessage);
+ while (true)
+ {
+ Console.Write("> ");
+ var filePath = Console.ReadLine()?.Replace("\"", "");
+
+ if (string.IsNullOrWhiteSpace(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("File path cannot be empty. Please try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ if (!File.Exists(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error: The file '{filePath}' was not found. Please check the path and try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ return filePath;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/SoundFlow.Samples.Security.IntegrityWatermarking.csproj b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/SoundFlow.Samples.Security.IntegrityWatermarking.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/SoundFlow.Samples.Security.IntegrityWatermarking.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/TamperingService.cs b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/TamperingService.cs
new file mode 100644
index 0000000..4fc96b2
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.IntegrityWatermarking/TamperingService.cs
@@ -0,0 +1,49 @@
+namespace SoundFlow.Samples.Security.IntegrityWatermarking;
+
+///
+/// Provides methods to simulate tampering with audio files.
+///
+public static class TamperingService
+{
+ ///
+ /// Creates a tampered version of an audio file by zeroing out a block of its binary data.
+ /// This method operates at the byte level to simulate corruption or malicious editing.
+ ///
+ /// The path to the clean, watermarked file.
+ /// The path where the tampered file will be saved.
+ public static void TamperFileByZeroingData(string inputFile, string outputFile)
+ {
+ Console.WriteLine($"Creating tampered file '{outputFile}' by zeroing out a data block...");
+ var fileBytes = File.ReadAllBytes(inputFile);
+
+ // Find a point roughly in the middle of the data chunk to tamper with.
+ // We start after the standard WAV header (44 bytes) to avoid corrupting the format itself.
+ const int wavHeaderSize = 44;
+ if (fileBytes.Length <= wavHeaderSize)
+ {
+ // The file is too small to tamper with, so just copy it.
+ File.Copy(inputFile, outputFile, true);
+ Console.ForegroundColor = ConsoleColor.Magenta;
+ Console.WriteLine("Warning: File is too small to tamper with. Copied as-is.");
+ Console.ResetColor();
+ return;
+ }
+
+ var tamperPoint = wavHeaderSize + (fileBytes.Length - wavHeaderSize) / 2;
+ const int tamperLength = 1024; // Zero out 1KB of data
+
+ for (var i = 0; i < tamperLength; i++)
+ {
+ var index = tamperPoint + i;
+ if (index < fileBytes.Length)
+ {
+ fileBytes[index] = 0;
+ }
+ }
+
+ File.WriteAllBytes(outputFile, fileBytes);
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("Tampering complete.");
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/AttackSimulationService.cs b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/AttackSimulationService.cs
new file mode 100644
index 0000000..9a33733
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/AttackSimulationService.cs
@@ -0,0 +1,61 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Enums;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Providers;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.OwnershipWatermarking;
+
+///
+/// Encapsulates the logic for simulating an attack on a watermarked audio file.
+///
+public static class AttackSimulationService
+{
+ ///
+ /// Loads a watermarked audio file, modifies its volume, and saves it to a new file.
+ ///
+ /// The audio engine for processing.
+ /// The path to the watermarked audio file.
+ /// The path to save the modified audio file.
+ /// The factor by which to multiply the audio samples' amplitude.
+ public static async Task SimulateVolumeChangeAsync(AudioEngine engine, string inputFile, string outputFile, float volumeMultiplier)
+ {
+ Console.WriteLine($"Loading '{inputFile}', adjusting volume by {(volumeMultiplier - 1.0f) * 100:F0}%, and saving to '{outputFile}'.");
+ await using var stream = new FileStream(inputFile, FileMode.Open);
+ using var provider = new AssetDataProvider(engine, stream);
+
+ // Read all samples
+ var samples = new float[provider.Length];
+ provider.ReadBytes(samples);
+
+ // Modify volume
+ for (var i = 0; i < samples.Length; i++)
+ {
+ samples[i] *= volumeMultiplier;
+ }
+
+ await SaveWavAsync(engine, outputFile, samples, provider.FormatInfo);
+
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("Attack simulation complete.");
+ Console.ResetColor();
+ }
+
+ ///
+ /// Saves a float array of audio samples to a WAV file.
+ ///
+ private static async Task SaveWavAsync(AudioEngine engine, string filePath, float[] samples, SoundFormatInfo? formatInfo)
+ {
+ var format = new AudioFormat
+ {
+ SampleRate = formatInfo?.SampleRate ?? 48000,
+ Channels = formatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(formatInfo?.ChannelCount ?? 2)
+ };
+
+ await using var stream = new FileStream(filePath, FileMode.Create);
+ using var encoder = engine.CreateEncoder(stream, "wav", format);
+ encoder.Encode(samples);
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/Program.cs b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/Program.cs
new file mode 100644
index 0000000..942a3ca
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/Program.cs
@@ -0,0 +1,116 @@
+using SoundFlow.Backends.MiniAudio;
+
+namespace SoundFlow.Samples.Security.OwnershipWatermarking;
+
+///
+/// This sample program demonstrates the robust Ownership Watermarking feature.
+/// 1. It auto-tunes watermark settings for a source WAV file.
+/// 2. It embeds a secret text message into the file.
+/// 3. It saves the watermarked audio.
+/// 4. It simulates a "distribution attack" by altering the volume of the watermarked file.
+/// 5. It then attempts to extract the secret message from the modified file.
+/// 6. It verifies if the extracted message matches the original secret.
+///
+public static class Program
+{
+ private const string SecretMessage = "SoundFlow Ownership Test - Property of LSXPrime";
+ private const string SecretKey = "MySuperSecretKey123!";
+
+ public static async Task Main()
+ {
+ Console.WriteLine("--- SoundFlow Ownership Watermarking Test (Robust) ---");
+ Console.WriteLine();
+
+ var originalFile = GetExistingFilePath("Enter the path to the source audio file you want to watermark (e.g., original.wav):");
+
+ // Define temporary file paths in a temporary directory
+ var tempDir = Path.Combine(Path.GetTempPath(), "SoundFlowSample");
+ Directory.CreateDirectory(tempDir);
+ var watermarkedFile = Path.Combine(tempDir, "watermarked.wav");
+ var modifiedFile = Path.Combine(tempDir, "watermarked-modified.wav");
+
+ using var engine = new MiniAudioEngine();
+
+ try
+ {
+ // Phase 0: Auto-Tuning
+ Console.WriteLine("\n--- Phase 0: Auto-Tuning Configuration ---");
+ var config = await WatermarkTuningService.TuneAsync(engine, originalFile, SecretMessage, SecretKey);
+
+ // Phase 1: Embedding
+ Console.WriteLine("\n--- Phase 1: Embedding Watermark ---");
+ await WatermarkEmbeddingService.EmbedAsync(engine, originalFile, watermarkedFile, SecretMessage, config);
+
+ // Phase 2: Simulate Attack
+ Console.WriteLine("\n--- Phase 2: Simulating Modification Attack (Volume Change) ---");
+ await AttackSimulationService.SimulateVolumeChangeAsync(engine, watermarkedFile, modifiedFile, 0.75f);
+
+ // Phase 3: Extraction & Verification
+ Console.WriteLine("\n--- Phase 3: Extracting Watermark from Modified File ---");
+ var result = await WatermarkExtractionService.ExtractAsync(engine, modifiedFile, config);
+
+ Console.WriteLine("\n--- Verification ---");
+ if (result is { IsSuccess: true, Value: SecretMessage })
+ {
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("SUCCESS: The secret message was correctly extracted from the modified file.");
+ }
+ else
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine("FAILURE: The extracted message did not match the original secret.");
+ Console.WriteLine($" -> Expected: '{SecretMessage}'");
+ Console.WriteLine($" -> Got: '{(result.IsSuccess ? result.Value : result.Error?.Message)}'");
+ }
+ Console.ResetColor();
+ }
+ catch (Exception ex)
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"\nAn unexpected error occurred: {ex.Message}");
+ Console.ResetColor();
+ }
+ finally
+ {
+ // Cleanup generated files
+ Console.WriteLine("\nCleaning up temporary files...");
+ if (File.Exists(watermarkedFile)) File.Delete(watermarkedFile);
+ if (File.Exists(modifiedFile)) File.Delete(modifiedFile);
+ if (Directory.Exists(tempDir)) Directory.Delete(tempDir);
+ Console.WriteLine("Watermarking sample finished.");
+ }
+ }
+
+ ///
+ /// Prompts the user for a file path and loops until a valid, existing file is provided.
+ ///
+ /// The message to display to the user.
+ /// A validated, existing file path.
+ private static string GetExistingFilePath(string promptMessage)
+ {
+ Console.WriteLine(promptMessage);
+ while (true)
+ {
+ Console.Write("> ");
+ var filePath = Console.ReadLine()?.Replace("\"", "");
+
+ if (string.IsNullOrWhiteSpace(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Yellow;
+ Console.WriteLine("File path cannot be empty. Please try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ if (!File.Exists(filePath))
+ {
+ Console.ForegroundColor = ConsoleColor.Red;
+ Console.WriteLine($"Error: The file '{filePath}' was not found. Please check the path and try again.");
+ Console.ResetColor();
+ continue;
+ }
+
+ return filePath;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/SoundFlow.Samples.Security.OwnershipWatermarking.csproj b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/SoundFlow.Samples.Security.OwnershipWatermarking.csproj
new file mode 100644
index 0000000..20abf27
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/SoundFlow.Samples.Security.OwnershipWatermarking.csproj
@@ -0,0 +1,14 @@
+
+
+
+ Exe
+ net8.0
+ enable
+ enable
+
+
+
+
+
+
+
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkEmbeddingService.cs b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkEmbeddingService.cs
new file mode 100644
index 0000000..d69ccc1
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkEmbeddingService.cs
@@ -0,0 +1,36 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.OwnershipWatermarking;
+
+///
+/// Encapsulates the logic for embedding an ownership watermark into an audio file.
+///
+public static class WatermarkEmbeddingService
+{
+ ///
+ /// Embeds a secret message into a source audio file and saves the result.
+ ///
+ /// The audio engine for processing.
+ /// The path to the original audio file.
+ /// The path to save the watermarked audio file.
+ /// The secret message to embed.
+ /// The watermark configuration to use.
+ public static async Task EmbedAsync(AudioEngine engine, string sourceFile, string outputFile, string secretMessage, WatermarkConfiguration config)
+ {
+ await using var stream = new FileStream(sourceFile, FileMode.Open);
+ using var provider = new AssetDataProvider(engine, stream);
+ Console.WriteLine($"Embedding '{secretMessage}' into '{sourceFile}'...");
+
+ Console.WriteLine($"Saving watermarked audio to '{outputFile}'...");
+
+ await using var watermarkedStream = new FileStream(outputFile, FileMode.Create);
+ AudioWatermarker.EmbedOwnershipWatermark(provider, watermarkedStream, secretMessage, config);
+
+ Console.ForegroundColor = ConsoleColor.Green;
+ Console.WriteLine("Embedding complete.");
+ Console.ResetColor();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkExtractionService.cs b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkExtractionService.cs
new file mode 100644
index 0000000..48794ee
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkExtractionService.cs
@@ -0,0 +1,42 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.Security.OwnershipWatermarking;
+
+///
+/// Encapsulates the logic for extracting an ownership watermark from an audio file.
+///
+public static class WatermarkExtractionService
+{
+ ///
+ /// Attempts to extract a secret message from an audio file.
+ ///
+ /// The audio engine for processing.
+ /// The path to the (potentially modified) watermarked audio file.
+ /// The watermark configuration used for embedding.
+ /// A result object containing the extracted message or an error.
+ public static async Task> ExtractAsync(AudioEngine engine, string inputFile, WatermarkConfiguration config)
+ {
+ Console.WriteLine($"Attempting to extract watermark from '{inputFile}'...");
+ await using var stream = new FileStream(inputFile, FileMode.Open);
+ using var provider = new AssetDataProvider(engine, stream);
+
+ var result = await AudioWatermarker.ExtractOwnershipWatermarkAsync(provider, config);
+
+ if (result.IsSuccess)
+ {
+ Console.WriteLine("Extraction successful.");
+ Console.WriteLine($" -> Extracted Payload: '{result.Value}'");
+ }
+ else
+ {
+ Console.WriteLine("Extraction failed.");
+ Console.WriteLine($" -> Reason: {result.Error?.Message}");
+ }
+
+ return result;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkTuningService.cs b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkTuningService.cs
new file mode 100644
index 0000000..45dcf06
--- /dev/null
+++ b/Samples/SoundFlow.Samples.Security.OwnershipWatermarking/WatermarkTuningService.cs
@@ -0,0 +1,40 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Samples.Security.OwnershipWatermarking;
+
+///
+/// Encapsulates the logic for auto-tuning watermark configuration.
+///
+public static class WatermarkTuningService
+{
+ ///
+ /// Analyzes a source audio file to determine the optimal watermark configuration.
+ ///
+ /// The audio engine for processing.
+ /// The path to the original audio file.
+ /// The exact secret message that will be embedded.
+ /// The secret key for the watermark.
+ /// An auto-tuned object.
+ public static async Task TuneAsync(AudioEngine engine, string sourceFile, string secretMessage, string key)
+ {
+ Console.WriteLine($"Analyzing '{sourceFile}' to find optimal watermark settings...");
+
+ await using var tuneStream = new FileStream(sourceFile, FileMode.Open);
+ using var tuneProvider = new AssetDataProvider(engine, tuneStream);
+
+ // Pass the actual secret message so the tuner knows exactly how much data needs to fit in the test slice duration.
+ // Surely you can use a shorter proxy message for more stable starting point
+ var config = await WatermarkTuner.TuneConfigurationAsync(tuneProvider, secretMessage, key);
+
+ Console.ForegroundColor = ConsoleColor.Cyan;
+ Console.WriteLine("Auto-tuning complete.");
+ Console.WriteLine($" -> Optimal Strength: {config.Strength}");
+ Console.WriteLine($" -> Optimal Spread Factor: {config.SpreadFactor}");
+ Console.ResetColor();
+
+ return config;
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/ComponentTests.cs b/Samples/SoundFlow.Samples.SimplePlayer/ComponentTests.cs
index ee92cf2..b33ea09 100644
--- a/Samples/SoundFlow.Samples.SimplePlayer/ComponentTests.cs
+++ b/Samples/SoundFlow.Samples.SimplePlayer/ComponentTests.cs
@@ -1,6 +1,7 @@
using SoundFlow.Abstracts;
using SoundFlow.Backends.MiniAudio;
using SoundFlow.Components;
+using SoundFlow.Enums;
using SoundFlow.Interfaces;
using SoundFlow.Modifiers;
using SoundFlow.Providers;
@@ -20,8 +21,7 @@ public static void Run()
{
Console.WriteLine("SoundFlow Component and Modifier Examples");
Console.WriteLine($"Using Audio Backend: {Engine.GetType().Name}");
-
- /*
+
// Component Examples:
Console.WriteLine("\n--- Component Examples ---");
TestOscillator();
@@ -58,9 +58,9 @@ public static void Run()
TestStreamDataProvider();
TestRawDataProvider();
TestQueueDataProvider();
- */
+
TestNetworkDataProvider().GetAwaiter().GetResult();
- // TestMicrophoneDataProvider();
+ TestMicrophoneDataProvider();
Console.WriteLine("\nAll Tests Finished. Press any key to exit.");
Console.ReadKey();
@@ -76,8 +76,10 @@ public static void Run()
private static void TestOscillator()
{
Console.WriteLine("\n- Testing Oscillator Component -");
- using var oscillator = new Oscillator(Engine, Format)
- { Frequency = 440f, Amplitude = 0.5f, Type = Oscillator.WaveformType.Sine };
+ using var oscillator = new Oscillator(Engine, Format);
+ oscillator.Frequency = 440f;
+ oscillator.Amplitude = 0.5f;
+ oscillator.Type = Oscillator.WaveformType.Sine;
PlayComponentForDuration(oscillator, 3);
}
@@ -134,7 +136,7 @@ private static void TestFilter()
{ Frequency = 440f, Amplitude = 0.5f, Type = Oscillator.WaveformType.Square };
var filter = new Filter(Format)
{
- Type = Filter.FilterType.LowPass,
+ Type = FilterType.LowPass,
CutoffFrequency = 1000f,
Resonance = 0.8f
};
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/DeviceService.cs b/Samples/SoundFlow.Samples.SimplePlayer/DeviceService.cs
new file mode 100644
index 0000000..9f38d73
--- /dev/null
+++ b/Samples/SoundFlow.Samples.SimplePlayer/DeviceService.cs
@@ -0,0 +1,45 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Enums;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.SimplePlayer;
+
+///
+/// Provides helper methods for audio device selection and management.
+///
+public static class DeviceService
+{
+ ///
+ /// Prompts the user to select a single device from a list of available devices.
+ ///
+ /// The audio engine instance to query for devices.
+ /// The type of device to select (Playback or Capture).
+ /// The selected device information, or null if no device is found or selected.
+ public static DeviceInfo? SelectDevice(AudioEngine engine, DeviceType type)
+ {
+ engine.UpdateAudioDevicesInfo();
+ var devices = type == DeviceType.Playback ? engine.PlaybackDevices : engine.CaptureDevices;
+
+ if (devices.Length == 0)
+ {
+ Console.WriteLine($"No {type.ToString().ToLower()} devices found.");
+ return null;
+ }
+
+ Console.WriteLine($"\nPlease select a {type.ToString().ToLower()} device:");
+ for (var i = 0; i < devices.Length; i++)
+ {
+ Console.WriteLine($" {i}: {devices[i].Name} {(devices[i].IsDefault ? "(Default)" : "")}");
+ }
+
+ while (true)
+ {
+ Console.Write("Enter device index: ");
+ if (int.TryParse(Console.ReadLine(), out var index) && index >= 0 && index < devices.Length)
+ {
+ return devices[index];
+ }
+ Console.WriteLine("Invalid index. Please try again.");
+ }
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/EqualizerPresets.cs b/Samples/SoundFlow.Samples.SimplePlayer/EqualizerPresets.cs
index 0d8a162..89289cc 100644
--- a/Samples/SoundFlow.Samples.SimplePlayer/EqualizerPresets.cs
+++ b/Samples/SoundFlow.Samples.SimplePlayer/EqualizerPresets.cs
@@ -1,4 +1,5 @@
+using SoundFlow.Enums;
using SoundFlow.Modifiers;
namespace SoundFlow.Samples.SimplePlayer;
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/PassthroughService.cs b/Samples/SoundFlow.Samples.SimplePlayer/PassthroughService.cs
new file mode 100644
index 0000000..29ee815
--- /dev/null
+++ b/Samples/SoundFlow.Samples.SimplePlayer/PassthroughService.cs
@@ -0,0 +1,48 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Abstracts.Devices;
+using SoundFlow.Components;
+using SoundFlow.Enums;
+using SoundFlow.Providers;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.SimplePlayer;
+
+///
+/// Encapsulates logic for live microphone audio passthrough.
+///
+public static class PassthroughService
+{
+ ///
+ /// Initializes and runs a full-duplex audio stream, piping microphone input to the output.
+ ///
+ public static void Run(AudioEngine engine, AudioFormat format, DeviceConfig deviceConfig)
+ {
+ var captureDeviceInfo = DeviceService.SelectDevice(engine, DeviceType.Capture);
+ if (!captureDeviceInfo.HasValue) return;
+
+ var playbackDeviceInfo = DeviceService.SelectDevice(engine, DeviceType.Playback);
+ if (!playbackDeviceInfo.HasValue) return;
+
+ using var duplexDevice = engine.InitializeFullDuplexDevice(playbackDeviceInfo.Value, captureDeviceInfo.Value, format, deviceConfig);
+
+ duplexDevice.Start();
+
+ using var microphoneProvider = new MicrophoneDataProvider(duplexDevice);
+ using var soundPlayer = new SoundPlayer(engine, format, microphoneProvider);
+
+ duplexDevice.MasterMixer.AddComponent(soundPlayer);
+
+ microphoneProvider.StartCapture();
+ soundPlayer.Play();
+
+ Console.WriteLine("\nLive microphone passthrough is active. Press any key to stop.");
+ Console.ReadKey();
+
+ microphoneProvider.StopCapture();
+ soundPlayer.Stop();
+
+ duplexDevice.MasterMixer.RemoveComponent(soundPlayer);
+
+ duplexDevice.Stop();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/PlaybackService.cs b/Samples/SoundFlow.Samples.SimplePlayer/PlaybackService.cs
new file mode 100644
index 0000000..17062ba
--- /dev/null
+++ b/Samples/SoundFlow.Samples.SimplePlayer/PlaybackService.cs
@@ -0,0 +1,63 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Abstracts.Devices;
+using SoundFlow.Components;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
+using SoundFlow.Providers;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.SimplePlayer;
+
+///
+/// Encapsulates all logic related to audio playback.
+///
+public static class PlaybackService
+{
+ ///
+ /// Prompts the user for a file path (local or URL) and initiates playback.
+ ///
+ public static void PlayFromUserInput(AudioEngine engine, AudioFormat format, DeviceConfig deviceConfig)
+ {
+ Console.Write("Enter audio file path or URL: ");
+ var filePath = Console.ReadLine()?.Trim().Replace("\"", "") ?? string.Empty;
+
+ var isNetworked = Uri.TryCreate(filePath, UriKind.Absolute, out var uriResult)
+ && (uriResult.Scheme == Uri.UriSchemeHttp || uriResult.Scheme == Uri.UriSchemeHttps);
+
+ if (!isNetworked && !File.Exists(filePath))
+ {
+ Console.WriteLine("File not found at the specified path.");
+ return;
+ }
+
+ PlayFile(engine, format, deviceConfig, filePath, isNetworked);
+ }
+
+ ///
+ /// Plays a specified audio file or URL.
+ ///
+ public static void PlayFile(AudioEngine engine, AudioFormat format, DeviceConfig deviceConfig, string path, bool isNetworked = false)
+ {
+ Console.WriteLine(!isNetworked ? "Input is a file path. Opening file stream..." : "Input is a URL. Initializing network stream...");
+
+ var deviceInfo = DeviceService.SelectDevice(engine, DeviceType.Playback);
+ if (!deviceInfo.HasValue) return;
+
+ using var playbackDevice = engine.InitializePlaybackDevice(deviceInfo.Value, format, deviceConfig);
+ playbackDevice.Start();
+
+ using ISoundDataProvider dataProvider = isNetworked
+ ? new NetworkDataProvider(engine, format, path)
+ : new StreamDataProvider(engine, format, new FileStream(path, FileMode.Open, FileAccess.Read));
+
+ using var soundPlayer = new SoundPlayer(engine, format, dataProvider);
+
+ playbackDevice.MasterMixer.AddComponent(soundPlayer);
+ soundPlayer.Play();
+
+ UserInterfaceService.DisplayPlaybackControls(soundPlayer);
+
+ playbackDevice.MasterMixer.RemoveComponent(soundPlayer);
+ playbackDevice.Stop();
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/Program.cs b/Samples/SoundFlow.Samples.SimplePlayer/Program.cs
index 8e0d630..73c0d98 100644
--- a/Samples/SoundFlow.Samples.SimplePlayer/Program.cs
+++ b/Samples/SoundFlow.Samples.SimplePlayer/Program.cs
@@ -3,16 +3,12 @@
using SoundFlow.Backends.MiniAudio;
using SoundFlow.Backends.MiniAudio.Devices;
using SoundFlow.Backends.MiniAudio.Enums;
-using SoundFlow.Components;
-using SoundFlow.Enums;
-using SoundFlow.Interfaces;
-using SoundFlow.Providers;
using SoundFlow.Structs;
namespace SoundFlow.Samples.SimplePlayer;
///
-/// Example program to play audio, record, and apply effects using the refactored SoundFlow library.
+/// A menu-driven example program to demonstrate core SoundFlow features.
///
internal static class Program
{
@@ -20,10 +16,10 @@ internal static class Program
private static readonly AudioEngine Engine = new MiniAudioEngine();
private static readonly AudioFormat Format = AudioFormat.DvdHq;
- // Represents detailed configuration for a MiniAudio device, allowing fine-grained control over general and backend-specific settings, Not essential though.
+ // Represents detailed configuration for a MiniAudio device, allowing fine-grained control over general and backend-specific settings.
private static readonly DeviceConfig DeviceConfig = new MiniAudioDeviceConfig
{
- PeriodSizeInFrames = 960, // 10ms at 48kHz = 480 frames @ 2 channels = 960 frames
+ PeriodSizeInFrames = 9600, // 10ms at 48kHz = 480 frames @ 2 channels = 960 frames
Playback = new DeviceSubConfig
{
ShareMode = ShareMode.Shared // Use shared mode for better compatibility with other applications
@@ -58,13 +54,13 @@ private static void Main()
switch (choice)
{
case '1':
- PlayAudioFromFile();
+ PlaybackService.PlayFromUserInput(Engine, Format, DeviceConfig);
break;
case '2':
- RecordAndPlaybackAudio();
+ RecordingService.RecordAndPlayback(Engine, Format, DeviceConfig, RecordedFilePath);
break;
case '3':
- LiveMicrophonePassthrough();
+ PassthroughService.Run(Engine, Format, DeviceConfig);
break;
case '4':
ComponentTests.Run();
@@ -91,232 +87,4 @@ private static void Main()
Engine.Dispose();
}
}
-
- #region Device Selection Helpers
-
- ///
- /// Prompts the user to select a single device from a list.
- ///
- private static DeviceInfo? SelectDevice(DeviceType type)
- {
- Engine.UpdateAudioDevicesInfo();
- var devices = type == DeviceType.Playback ? Engine.PlaybackDevices : Engine.CaptureDevices;
-
- if (devices.Length == 0)
- {
- Console.WriteLine($"No {type.ToString().ToLower()} devices found.");
- return null;
- }
-
- Console.WriteLine($"\nPlease select a {type.ToString().ToLower()} device:");
- for (var i = 0; i < devices.Length; i++)
- {
- Console.WriteLine($" {i}: {devices[i].Name} {(devices[i].IsDefault ? "(Default)" : "")}");
- }
-
- while (true)
- {
- Console.Write("Enter device index: ");
- if (int.TryParse(Console.ReadLine(), out var index) && index >= 0 && index < devices.Length)
- {
- return devices[index];
- }
- Console.WriteLine("Invalid index. Please try again.");
- }
- }
-
- #endregion
-
- #region Menu Options
-
- private static void PlayAudioFromFile()
- {
- Console.Write("Enter audio file path: ");
- var filePath = Console.ReadLine()?.Replace("\"", "") ?? string.Empty;
- var isNetworked = Uri.TryCreate(filePath, UriKind.Absolute, out var uriResult)
- && (uriResult.Scheme == Uri.UriSchemeHttp || uriResult.Scheme == Uri.UriSchemeHttps);
-
- if (!isNetworked && !File.Exists(filePath))
- {
- Console.WriteLine("File not found at the specified path.");
- return;
- }
-
- Console.WriteLine(!isNetworked ? "Input is a file path. Opening file stream..." : "Input is a URL. Initializing network stream...");
-
- var deviceInfo = SelectDevice(DeviceType.Playback);
- if (!deviceInfo.HasValue) return;
-
- var playbackDevice = Engine.InitializePlaybackDevice(deviceInfo.Value, Format, DeviceConfig);
- playbackDevice.Start();
-
- using ISoundDataProvider dataProvider = isNetworked ? new NetworkDataProvider(Engine, Format, filePath) : new StreamDataProvider(Engine, Format, new FileStream(filePath, FileMode.Open, FileAccess.Read));
- using var soundPlayer = new SoundPlayer(Engine, Format, dataProvider);
-
- playbackDevice.MasterMixer.AddComponent(soundPlayer);
- soundPlayer.Play();
-
- PlaybackControls(soundPlayer);
-
- playbackDevice.MasterMixer.RemoveComponent(soundPlayer);
- playbackDevice.Stop();
- playbackDevice.Dispose();
- }
-
- private static void LiveMicrophonePassthrough()
- {
- var captureDeviceInfo = SelectDevice(DeviceType.Capture);
- if (!captureDeviceInfo.HasValue) return;
-
- var playbackDeviceInfo = SelectDevice(DeviceType.Playback);
- if (!playbackDeviceInfo.HasValue) return;
-
- using var duplexDevice = Engine.InitializeFullDuplexDevice(playbackDeviceInfo.Value, captureDeviceInfo.Value, Format, DeviceConfig);
-
- duplexDevice.Start();
-
- using var microphoneProvider = new MicrophoneDataProvider(duplexDevice);
- using var soundPlayer = new SoundPlayer(Engine, Format, microphoneProvider);
-
- duplexDevice.MasterMixer.AddComponent(soundPlayer);
-
- microphoneProvider.StartCapture();
- soundPlayer.Play();
-
- Console.WriteLine("\nLive microphone passthrough is active. Press any key to stop.");
- Console.ReadKey();
-
- microphoneProvider.StopCapture();
- soundPlayer.Stop();
-
- duplexDevice.MasterMixer.RemoveComponent(soundPlayer);
-
- duplexDevice.Stop();
- }
-
- private static void RecordAndPlaybackAudio()
- {
- var captureDeviceInfo = SelectDevice(DeviceType.Capture);
- if (!captureDeviceInfo.HasValue) return;
-
- using var captureDevice = Engine.InitializeCaptureDevice(captureDeviceInfo.Value, Format, DeviceConfig);
- captureDevice.Start();
-
- var stream = new FileStream(RecordedFilePath, FileMode.Create, FileAccess.Write, FileShare.Read);
- using (var recorder = new Recorder(captureDevice, stream))
- {
- Console.WriteLine("Recording started. Press 's' to stop, 'p' to pause/resume.");
- recorder.StartRecording();
-
- while (recorder.State != PlaybackState.Stopped)
- {
- var key = Console.ReadKey(true).Key;
- switch (key)
- {
- case ConsoleKey.S:
- recorder.StopRecording();
- break;
- case ConsoleKey.P:
- if (recorder.State == PlaybackState.Paused)
- {
- recorder.ResumeRecording();
- Console.WriteLine("Recording resumed.");
- }
- else
- {
- recorder.PauseRecording();
- Console.WriteLine("Recording paused.");
- }
- break;
- }
- }
- }
-
- stream.Dispose();
- captureDevice.Stop();
-
- Console.WriteLine($"\nRecording finished. File saved to: {RecordedFilePath}");
- Console.WriteLine("Press 'p' to play back or any other key to skip.");
- if (Console.ReadKey(true).Key != ConsoleKey.P) return;
-
- // Playback
- var playbackDeviceInfo = SelectDevice(DeviceType.Playback);
- if (!playbackDeviceInfo.HasValue) return;
-
- using var playbackDevice = Engine.InitializePlaybackDevice(playbackDeviceInfo.Value, Format, DeviceConfig);
- playbackDevice.Start();
-
- using var dataProvider = new StreamDataProvider(Engine, Format, new FileStream(RecordedFilePath, FileMode.Open, FileAccess.Read));
- using var soundPlayer = new SoundPlayer(Engine, Format, dataProvider);
-
- playbackDevice.MasterMixer.AddComponent(soundPlayer);
- soundPlayer.Play();
-
- PlaybackControls(soundPlayer);
-
- playbackDevice.MasterMixer.RemoveComponent(soundPlayer);
- playbackDevice.Stop();
- }
-
- #endregion
-
- #region Playback Controls UI
-
- private static void PlaybackControls(ISoundPlayer player)
- {
- Console.WriteLine("\n--- Playback Controls ---");
- Console.WriteLine("'P': Play/Pause | 'S': Seek | 'V': Volume | '+/-': Speed | 'R': Reset Speed | Any other: Stop");
-
- using var timer = new System.Timers.Timer(500);
- timer.AutoReset = true;
- timer.Elapsed += (_, _) =>
- {
- if (player.State != PlaybackState.Stopped)
- {
- Console.Write($"\rTime: {TimeSpan.FromSeconds(player.Time):mm\\:ss\\.ff} / {TimeSpan.FromSeconds(player.Duration):mm\\:ss\\.ff} | Speed: {player.PlaybackSpeed:F1}x | Vol: {player.Volume:F1} ");
- }
- };
- timer.Start();
-
- while (player.State is PlaybackState.Playing or PlaybackState.Paused)
- {
- var keyInfo = Console.ReadKey(true);
- switch (keyInfo.Key)
- {
- case ConsoleKey.P:
- if (player.State == PlaybackState.Playing) player.Pause();
- else player.Play();
- break;
- case ConsoleKey.S:
- Console.Write("\nEnter seek time in seconds (e.g., 5.0): ");
- if (float.TryParse(Console.ReadLine(), out var seekTime)) player.Seek(TimeSpan.FromSeconds(seekTime));
- else Console.WriteLine("Invalid seek time.");
- break;
- case ConsoleKey.OemPlus or ConsoleKey.Add:
- player.PlaybackSpeed = Math.Min(player.PlaybackSpeed + 0.1f, 4.0f);
- break;
- case ConsoleKey.OemMinus or ConsoleKey.Subtract:
- player.PlaybackSpeed = Math.Max(0.1f, player.PlaybackSpeed - 0.1f);
- break;
- case ConsoleKey.R:
- player.PlaybackSpeed = 1.0f;
- break;
- case ConsoleKey.V:
- Console.Write("\nEnter volume (0.0 to 2.0): ");
- if (float.TryParse(Console.ReadLine(), out var volume))
- player.Volume = Math.Clamp(volume, 0.0f, 2.0f);
- else
- Console.WriteLine("Invalid volume.");
- break;
- default:
- player.Stop();
- break;
- }
- }
-
- timer.Stop();
- Console.WriteLine("\nPlayback stopped. ");
- }
-
- #endregion
}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/RecordingService.cs b/Samples/SoundFlow.Samples.SimplePlayer/RecordingService.cs
new file mode 100644
index 0000000..7efd3c2
--- /dev/null
+++ b/Samples/SoundFlow.Samples.SimplePlayer/RecordingService.cs
@@ -0,0 +1,69 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Abstracts.Devices;
+using SoundFlow.Components;
+using SoundFlow.Enums;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Samples.SimplePlayer;
+
+///
+/// Encapsulates all logic related to audio recording.
+///
+public static class RecordingService
+{
+ ///
+ /// Manages a complete recording session, including device selection, recording controls,
+ /// and an option to play back the recorded file.
+ ///
+ public static void RecordAndPlayback(AudioEngine engine, AudioFormat format, DeviceConfig deviceConfig, string outputFilePath)
+ {
+ var captureDeviceInfo = DeviceService.SelectDevice(engine, DeviceType.Capture);
+ if (!captureDeviceInfo.HasValue) return;
+
+ using var captureDevice = engine.InitializeCaptureDevice(captureDeviceInfo.Value, format, deviceConfig);
+ captureDevice.Start();
+
+ // The stream must be disposed manually after the recorder is done with it.
+ var stream = new FileStream(outputFilePath, FileMode.Create, FileAccess.Write, FileShare.Read);
+ try
+ {
+ using var recorder = new Recorder(captureDevice, stream);
+ Console.WriteLine("Recording started. Press 's' to stop, 'p' to pause/resume.");
+ recorder.StartRecording();
+
+ while (recorder.State != PlaybackState.Stopped)
+ {
+ var key = Console.ReadKey(true).Key;
+ switch (key)
+ {
+ case ConsoleKey.S:
+ recorder.StopRecording();
+ break;
+ case ConsoleKey.P:
+ if (recorder.State == PlaybackState.Paused)
+ {
+ recorder.ResumeRecording();
+ Console.WriteLine("Recording resumed.");
+ }
+ else
+ {
+ recorder.PauseRecording();
+ Console.WriteLine("Recording paused.");
+ }
+ break;
+ }
+ }
+ }
+ finally
+ {
+ stream.Dispose();
+ captureDevice.Stop();
+ }
+
+ Console.WriteLine($"\nRecording finished. File saved to: {outputFilePath}");
+ Console.WriteLine("Press 'p' to play back or any other key to skip.");
+ if (Console.ReadKey(true).Key != ConsoleKey.P) return;
+
+ PlaybackService.PlayFile(engine, format, deviceConfig, outputFilePath);
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SimplePlayer/UserInterfaceService.cs b/Samples/SoundFlow.Samples.SimplePlayer/UserInterfaceService.cs
new file mode 100644
index 0000000..95045ce
--- /dev/null
+++ b/Samples/SoundFlow.Samples.SimplePlayer/UserInterfaceService.cs
@@ -0,0 +1,90 @@
+using SoundFlow.Components;
+using SoundFlow.Enums;
+
+namespace SoundFlow.Samples.SimplePlayer;
+
+///
+/// Provides reusable UI components for the console application.
+///
+public static class UserInterfaceService
+{
+ ///
+ /// Displays an interactive set of playback controls for an ISoundPlayer instance.
+ ///
+ /// The player to control.
+ public static void DisplayPlaybackControls(SoundPlayer player)
+ {
+ Console.WriteLine("\n--- Playback Controls ---");
+ Console.WriteLine("'P': Play/Pause | 'S': Seek | 'V': Volume | '+/-': Speed | 'T': Switch Time Stretch Quality | 'R': Reset Speed | Any other: Stop");
+
+ using var timer = new System.Timers.Timer(500);
+ timer.AutoReset = true;
+ timer.Elapsed += (_, _) =>
+ {
+ if (player.State != PlaybackState.Stopped)
+ {
+ // Use Console.SetCursorPosition to prevent flickering/scrolling on some terminals
+ var originalLeft = Console.CursorLeft;
+ var originalTop = Console.CursorTop;
+ Console.SetCursorPosition(0, Console.CursorTop);
+ Console.Write(new string(' ', Console.WindowWidth - 1)); // Clear the line
+ Console.SetCursorPosition(0, Console.CursorTop);
+ Console.Write($"\rTime: {TimeSpan.FromSeconds(player.Time):mm\\:ss\\.ff} / {TimeSpan.FromSeconds(player.Duration):mm\\:ss\\.ff} | Speed: {player.PlaybackSpeed:F1}x | Vol: {player.Volume:F1} ");
+ if (originalLeft < Console.WindowWidth)
+ {
+ Console.SetCursorPosition(originalLeft, originalTop);
+ }
+ }
+ };
+ timer.Start();
+
+ var defaultQuality = WsolaPerformancePreset.Balanced;
+ var currentQuality = defaultQuality;
+
+ while (player.State is PlaybackState.Playing or PlaybackState.Paused)
+ {
+ var keyInfo = Console.ReadKey(true);
+ switch (keyInfo.Key)
+ {
+ case ConsoleKey.P:
+ if (player.State == PlaybackState.Playing) player.Pause();
+ else player.Play();
+ break;
+ case ConsoleKey.S:
+ Console.Write("\nEnter seek time in seconds (e.g., 5.0): ");
+ if (double.TryParse(Console.ReadLine(), out var seekTime)) player.Seek(TimeSpan.FromSeconds(seekTime));
+ else Console.WriteLine("Invalid seek time.");
+ break;
+ case ConsoleKey.OemPlus or ConsoleKey.Add:
+ player.PlaybackSpeed = Math.Min(player.PlaybackSpeed + 0.1f, 4.0f);
+ break;
+ case ConsoleKey.OemMinus or ConsoleKey.Subtract:
+ player.PlaybackSpeed = Math.Max(0.1f, player.PlaybackSpeed - 0.1f);
+ break;
+ case ConsoleKey.T:
+ currentQuality = (WsolaPerformancePreset)(((int)currentQuality + 1) % 4);
+ player.SetTimeStretchQuality(currentQuality);
+ Console.WriteLine($"\nTime Stretch Quality set to: {currentQuality}");
+ break;
+ case ConsoleKey.R:
+ player.PlaybackSpeed = 1.0f;
+ break;
+ case ConsoleKey.V:
+ Console.Write("\nEnter volume (0.0 to 2.0): ");
+ if (float.TryParse(Console.ReadLine(), out var volume))
+ player.Volume = Math.Clamp(volume, 0.0f, 2.0f);
+ else
+ Console.WriteLine("Invalid volume.");
+ break;
+ default:
+ player.Stop();
+ break;
+ }
+ }
+
+ timer.Stop();
+ // Clear the status line after stopping
+ Console.Write(new string(' ', Console.WindowWidth - 1) + "\r");
+ Console.WriteLine("\nPlayback stopped.");
+ }
+}
\ No newline at end of file
diff --git a/Samples/SoundFlow.Samples.SwitchDevices/Program.cs b/Samples/SoundFlow.Samples.SwitchDevices/Program.cs
index e643ab3..547d343 100644
--- a/Samples/SoundFlow.Samples.SwitchDevices/Program.cs
+++ b/Samples/SoundFlow.Samples.SwitchDevices/Program.cs
@@ -260,7 +260,7 @@ private static void LoopbackSwitchExample()
if (key == ConsoleKey.S)
{
- // NOTE: To switch a loopback source, we present the user with a list of *playback* devices.
+ // NOTE: To switch a loopback source, we present the user with a list of playback devices.
// The selected playback device will become the new source for loopback capture.
var newDeviceInfo = SelectDevice(DeviceType.Playback);
if (newDeviceInfo.HasValue)
diff --git a/SoundFlow.sln b/SoundFlow.sln
index 34e91b9..823c251 100644
--- a/SoundFlow.sln
+++ b/SoundFlow.sln
@@ -34,6 +34,18 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Midi.Basi
EndProject
Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Midi.SynthesisModifiers", "Samples\SoundFlow.Samples.Midi.SynthesisModifiers\SoundFlow.Samples.Midi.SynthesisModifiers.csproj", "{5BE23721-8208-4323-8C7C-7CA9F7EF54D1}"
EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Security.Fingerprinting", "Samples\SoundFlow.Samples.Security.Fingerprinting\SoundFlow.Samples.Security.Fingerprinting.csproj", "{E2199245-1DDE-444C-9A32-299A6DF91ABB}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Security.OwnershipWatermarking", "Samples\SoundFlow.Samples.Security.OwnershipWatermarking\SoundFlow.Samples.Security.OwnershipWatermarking.csproj", "{BF025223-ABE1-4BFC-BCEF-0C5F873FAC79}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Security.IntegrityWatermarking", "Samples\SoundFlow.Samples.Security.IntegrityWatermarking\SoundFlow.Samples.Security.IntegrityWatermarking.csproj", "{40824ED4-AD3F-4292-9671-38B3CFDEA17C}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Security.Encryption", "Samples\SoundFlow.Samples.Security.Encryption\SoundFlow.Samples.Security.Encryption.csproj", "{76CA50F8-24C9-4D0C-B42B-EE839994FEAE}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Security.Authentication", "Samples\SoundFlow.Samples.Security.Authentication\SoundFlow.Samples.Security.Authentication.csproj", "{DD934AB4-CD4F-427C-9367-3A48C1156783}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SoundFlow.Samples.Recording", "Samples\SoundFlow.Samples.Recording\SoundFlow.Samples.Recording.csproj", "{C51F35EF-3C8F-4865-A51A-99AF616191C3}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
@@ -88,6 +100,30 @@ Global
{5BE23721-8208-4323-8C7C-7CA9F7EF54D1}.Debug|Any CPU.Build.0 = Debug|Any CPU
{5BE23721-8208-4323-8C7C-7CA9F7EF54D1}.Release|Any CPU.ActiveCfg = Release|Any CPU
{5BE23721-8208-4323-8C7C-7CA9F7EF54D1}.Release|Any CPU.Build.0 = Release|Any CPU
+ {E2199245-1DDE-444C-9A32-299A6DF91ABB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E2199245-1DDE-444C-9A32-299A6DF91ABB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {E2199245-1DDE-444C-9A32-299A6DF91ABB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {E2199245-1DDE-444C-9A32-299A6DF91ABB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {BF025223-ABE1-4BFC-BCEF-0C5F873FAC79}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {BF025223-ABE1-4BFC-BCEF-0C5F873FAC79}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {BF025223-ABE1-4BFC-BCEF-0C5F873FAC79}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {BF025223-ABE1-4BFC-BCEF-0C5F873FAC79}.Release|Any CPU.Build.0 = Release|Any CPU
+ {40824ED4-AD3F-4292-9671-38B3CFDEA17C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {40824ED4-AD3F-4292-9671-38B3CFDEA17C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {40824ED4-AD3F-4292-9671-38B3CFDEA17C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {40824ED4-AD3F-4292-9671-38B3CFDEA17C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {76CA50F8-24C9-4D0C-B42B-EE839994FEAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {76CA50F8-24C9-4D0C-B42B-EE839994FEAE}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {76CA50F8-24C9-4D0C-B42B-EE839994FEAE}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {76CA50F8-24C9-4D0C-B42B-EE839994FEAE}.Release|Any CPU.Build.0 = Release|Any CPU
+ {DD934AB4-CD4F-427C-9367-3A48C1156783}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {DD934AB4-CD4F-427C-9367-3A48C1156783}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {DD934AB4-CD4F-427C-9367-3A48C1156783}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {DD934AB4-CD4F-427C-9367-3A48C1156783}.Release|Any CPU.Build.0 = Release|Any CPU
+ {C51F35EF-3C8F-4865-A51A-99AF616191C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C51F35EF-3C8F-4865-A51A-99AF616191C3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {C51F35EF-3C8F-4865-A51A-99AF616191C3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C51F35EF-3C8F-4865-A51A-99AF616191C3}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(NestedProjects) = preSolution
{D4326C2E-CDB3-47D4-8116-BB3872E81323} = {7841F9AA-861F-4684-8DEC-2828FB3AE048}
@@ -102,5 +138,11 @@ Global
{24565722-D172-4D70-AE63-D3E6EA0F033E} = {BE06375D-0A93-4916-A551-9875089314CC}
{5B70C53E-EB4A-4328-BC7C-43FCFEF3FCF5} = {BE06375D-0A93-4916-A551-9875089314CC}
{5BE23721-8208-4323-8C7C-7CA9F7EF54D1} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {E2199245-1DDE-444C-9A32-299A6DF91ABB} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {BF025223-ABE1-4BFC-BCEF-0C5F873FAC79} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {40824ED4-AD3F-4292-9671-38B3CFDEA17C} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {76CA50F8-24C9-4D0C-B42B-EE839994FEAE} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {DD934AB4-CD4F-427C-9367-3A48C1156783} = {BE06375D-0A93-4916-A551-9875089314CC}
+ {C51F35EF-3C8F-4865-A51A-99AF616191C3} = {BE06375D-0A93-4916-A551-9875089314CC}
EndGlobalSection
EndGlobal
diff --git a/Src/Abstracts/AudioAnalyzer.cs b/Src/Abstracts/AudioAnalyzer.cs
index 6807656..81e626b 100644
--- a/Src/Abstracts/AudioAnalyzer.cs
+++ b/Src/Abstracts/AudioAnalyzer.cs
@@ -1,3 +1,4 @@
+using System.Diagnostics.CodeAnalysis;
using SoundFlow.Interfaces;
using SoundFlow.Structs;
@@ -6,6 +7,7 @@ namespace SoundFlow.Abstracts;
///
/// Base class for audio analyzer components that extract data for visualizers.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public abstract class AudioAnalyzer : IMidiMappable
{
///
@@ -44,7 +46,7 @@ protected AudioAnalyzer(AudioFormat format, IVisualizer? visualizer = null)
///
/// Processes the audio data and sends it to the visualizer.
///
- public void Process(Span buffer, int channels)
+ public void Process(ReadOnlySpan buffer, int channels)
{
if (!Enabled) return;
@@ -60,5 +62,5 @@ public void Process(Span buffer, int channels)
///
/// The audio buffer.
/// The number of channels in the buffer.
- protected abstract void Analyze(Span buffer, int channels);
+ protected abstract void Analyze(ReadOnlySpan buffer, int channels);
}
diff --git a/Src/Abstracts/SoundComponent.cs b/Src/Abstracts/SoundComponent.cs
index e92f764..b3cb18e 100644
--- a/Src/Abstracts/SoundComponent.cs
+++ b/Src/Abstracts/SoundComponent.cs
@@ -1,4 +1,5 @@
using System.Buffers;
+using System.Diagnostics.CodeAnalysis;
using System.Numerics;
using System.Runtime.CompilerServices;
using SoundFlow.Components;
@@ -10,6 +11,7 @@ namespace SoundFlow.Abstracts;
///
/// Base class for audio processing components.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public abstract class SoundComponent : IDisposable, IMidiMappable
{
private static readonly ArrayPool BufferPool = ArrayPool.Shared;
diff --git a/Src/Abstracts/SoundModifier.cs b/Src/Abstracts/SoundModifier.cs
index 3239522..cfe4001 100644
--- a/Src/Abstracts/SoundModifier.cs
+++ b/Src/Abstracts/SoundModifier.cs
@@ -1,7 +1,7 @@
+using System.Diagnostics.CodeAnalysis;
using SoundFlow.Interfaces;
using SoundFlow.Midi.Interfaces;
using SoundFlow.Midi.Structs;
-using SoundFlow.Structs;
namespace SoundFlow.Abstracts;
@@ -9,6 +9,7 @@ namespace SoundFlow.Abstracts;
/// An abstract representation of a sound modifier.
/// Implementations of this class alter audio data to apply various effects.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public abstract class SoundModifier : IMidiMappable, IMidiControllable
{
///
diff --git a/Src/Abstracts/SoundPlayerBase.cs b/Src/Abstracts/SoundPlayerBase.cs
index 0df768f..0d26d3c 100644
--- a/Src/Abstracts/SoundPlayerBase.cs
+++ b/Src/Abstracts/SoundPlayerBase.cs
@@ -10,7 +10,7 @@ namespace SoundFlow.Abstracts;
///
public abstract class SoundPlayerBase : SoundComponent, ISoundPlayer
{
- private protected int RawSamplePosition;
+ private int _rawSamplePosition;
private readonly ISoundDataProvider _dataProvider;
private float _currentFractionalFrame;
@@ -20,9 +20,10 @@ public abstract class SoundPlayerBase : SoundComponent, ISoundPlayer
private int _loopStartSamples;
private int _loopEndSamples = -1;
private readonly WsolaTimeStretcher _timeStretcher;
- private readonly float[] _timeStretcherInputBuffer;
+ private float[] _timeStretcherInputBuffer;
private int _timeStretcherInputBufferValidSamples;
private int _timeStretcherInputBufferReadOffset;
+ private WsolaConfig _timeStretchConfig;
///
public float PlaybackSpeed
@@ -40,6 +41,34 @@ public float PlaybackSpeed
}
}
+ ///
+ /// Gets or sets the detailed configuration for the WSOLA time stretcher.
+ /// Allows fine-tuning of window size, hop size, and search radius.
+ ///
+ public WsolaConfig TimeStretchConfig
+ {
+ get => _timeStretchConfig;
+ set
+ {
+ ArgumentNullException.ThrowIfNull(value);
+ if (_timeStretchConfig != value)
+ {
+ _timeStretchConfig = value;
+ _timeStretcher.Configure(value);
+ EnsureTimeStretcherBufferSize();
+ }
+ }
+ }
+
+ ///
+ /// Sets the time stretcher configuration based on a performance preset.
+ ///
+ /// The desired balance between quality and performance.
+ public void SetTimeStretchQuality(WsolaPerformancePreset preset)
+ {
+ TimeStretchConfig = WsolaConfig.FromPreset(preset);
+ }
+
///
public PlaybackState State { get; internal set; }
@@ -53,7 +82,7 @@ public float PlaybackSpeed
public float Time =>
_dataProvider.Length == 0 || Format.Channels == 0 || Format.SampleRate == 0
? 0
- : (float)RawSamplePosition / Format.Channels / Format.SampleRate;
+ : (float)_rawSamplePosition / Format.Channels / Format.SampleRate;
///
public float Duration =>
@@ -80,7 +109,7 @@ public float PlaybackSpeed
///
- /// Constructor for BaseSoundPlayer.
+ /// Constructor for Base SoundPlayer.
///
/// The audio engine instance.
/// The audio device instance.
@@ -93,7 +122,10 @@ protected SoundPlayerBase(AudioEngine engine, AudioFormat format, ISoundDataProv
var initialSampleRate = format.SampleRate > 0 ? format.SampleRate : 44100;
var resampleBufferFrames = Math.Max(256, initialSampleRate / 10);
_resampleBuffer = new float[resampleBufferFrames * initialChannels];
- _timeStretcher = new WsolaTimeStretcher(initialChannels, _playbackSpeed);
+
+ // Initialize with default fast config
+ _timeStretchConfig = WsolaConfig.FromPreset(WsolaPerformancePreset.Fast);
+ _timeStretcher = new WsolaTimeStretcher(initialChannels, _playbackSpeed, _timeStretchConfig);
_timeStretcherInputBuffer = new float[Math.Max(_timeStretcher.MinInputSamplesToProcess * 2, 8192 * initialChannels)];
}
@@ -110,8 +142,8 @@ protected override void GenerateAudio(Span output, int channels)
// Proactively check for looping before generating audio. This handles loops where a specific end point is set.
if (IsLooping && _loopEndSamples != -1)
{
- // Ensure loop is valid and we've reached or passed the end point.
- if (_loopStartSamples < _loopEndSamples && RawSamplePosition >= _loopEndSamples)
+ // Ensure loop is valid, and we've reached or passed the end point.
+ if (_loopStartSamples < _loopEndSamples && _rawSamplePosition >= _loopEndSamples)
Seek(_loopStartSamples, channels);
}
@@ -133,7 +165,7 @@ protected override void GenerateAudio(Span output, int channels)
break; // Exit the read loop
}
- RawSamplePosition += samplesReadThisCall;
+ _rawSamplePosition += samplesReadThisCall;
outputSlice = outputSlice.Slice(samplesReadThisCall);
}
return;
@@ -162,8 +194,8 @@ protected override void GenerateAudio(Span output, int channels)
// If still not enough data after filling and the provider is truly exhausted and can't provide more data, end of stream.
if (_resampleBufferValidSamples < samplesRequiredInBufferForInterpolation)
{
- RawSamplePosition += totalSourceSamplesAdvancedThisCall;
- RawSamplePosition = Math.Min(RawSamplePosition, _dataProvider.Length);
+ _rawSamplePosition += totalSourceSamplesAdvancedThisCall;
+ _rawSamplePosition = Math.Min(_rawSamplePosition, _dataProvider.Length);
HandleEndOfStream(output[outputBufferOffset..], channels);
return;
}
@@ -212,8 +244,8 @@ protected override void GenerateAudio(Span output, int channels)
}
// Update raw sample position based on actual source samples advanced.
- RawSamplePosition += totalSourceSamplesAdvancedThisCall;
- RawSamplePosition = Math.Min(RawSamplePosition, _dataProvider.Length);
+ _rawSamplePosition += totalSourceSamplesAdvancedThisCall;
+ _rawSamplePosition = Math.Min(_rawSamplePosition, _dataProvider.Length);
}
///
@@ -234,7 +266,7 @@ private int FillResampleBuffer(int minSamplesRequiredInOutputBuffer, int channel
}
// When playback speed is close to 1.0, use simpler interpolation.
- if (Math.Abs(_playbackSpeed - 1.0f) < 0.1f)
+ if (Math.Abs(_playbackSpeed - 1.0f) < 0.001f)
{
// Implement a persistent read loop instead of a single read call.
var totalDirectRead = 0;
@@ -363,6 +395,39 @@ private int FillResampleBuffer(int minSamplesRequiredInOutputBuffer, int channel
return totalSourceSamplesRepresented;
}
+ private void EnsureTimeStretcherBufferSize()
+ {
+ var minSize = _timeStretcher.MinInputSamplesToProcess;
+ // Keep the logic similar to constructor: * 2 or min 8192*channels
+ var targetSize = Math.Max(minSize * 2, 8192 * Format.Channels);
+
+ if (_timeStretcherInputBuffer.Length < targetSize)
+ {
+ // Compact existing data to start before resizing
+ if (_timeStretcherInputBufferValidSamples > 0)
+ {
+ if (_timeStretcherInputBufferReadOffset > 0)
+ {
+ var remaining = _timeStretcherInputBufferValidSamples - _timeStretcherInputBufferReadOffset;
+ if (remaining > 0)
+ {
+ Buffer.BlockCopy(_timeStretcherInputBuffer, _timeStretcherInputBufferReadOffset * sizeof(float),
+ _timeStretcherInputBuffer, 0, remaining * sizeof(float));
+ }
+ _timeStretcherInputBufferValidSamples = remaining;
+ _timeStretcherInputBufferReadOffset = 0;
+ }
+ }
+ else
+ {
+ _timeStretcherInputBufferValidSamples = 0;
+ _timeStretcherInputBufferReadOffset = 0;
+ }
+
+ Array.Resize(ref _timeStretcherInputBuffer, targetSize);
+ }
+ }
+
///
/// Handles the end-of-stream condition, including looping and stopping.
/// This is called when the data provider is fully exhausted (ReadBytes returns 0).
@@ -383,8 +448,8 @@ protected virtual void HandleEndOfStream(Span remainingOutputBuffer, int
if (currentlyValidInResample < spaceToFill)
{
var sourceSamplesFromFinalFill = FillResampleBuffer(Math.Max(currentlyValidInResample, spaceToFill), channels);
- RawSamplePosition += sourceSamplesFromFinalFill;
- RawSamplePosition = Math.Min(RawSamplePosition, _dataProvider.Length);
+ _rawSamplePosition += sourceSamplesFromFinalFill;
+ _rawSamplePosition = Math.Min(_rawSamplePosition, _dataProvider.Length);
}
var toCopy = Math.Min(spaceToFill, _resampleBufferValidSamples);
@@ -548,7 +613,7 @@ private bool Seek(int sampleOffset, int channels)
sampleOffset = (sampleOffset / channels) * channels;
sampleOffset = Math.Clamp(sampleOffset, 0, maxSeekableSample);
_dataProvider.Seek(sampleOffset);
- RawSamplePosition = sampleOffset;
+ _rawSamplePosition = sampleOffset;
_currentFractionalFrame = 0f;
_resampleBufferValidSamples = 0;
_timeStretcher.Reset();
diff --git a/Src/Backends/MiniAudio/MiniAudioDecoder.cs b/Src/Backends/MiniAudio/MiniAudioDecoder.cs
index 318168d..92177a9 100644
--- a/Src/Backends/MiniAudio/MiniAudioDecoder.cs
+++ b/Src/Backends/MiniAudio/MiniAudioDecoder.cs
@@ -159,6 +159,7 @@ private void ConvertToFloat(Span samples, ulong framesRead, Span na
case SampleFormat.S24:
for (var i = 0; i < sampleCount; i++)
{
+ // Read 3 bytes as a 24-bit integer
var sample24 = (nativeBuffer[i * 3] << 0) | (nativeBuffer[i * 3 + 1] << 8) | (nativeBuffer[i * 3 + 2] << 16);
if ((sample24 & 0x800000) != 0) // Sign extension for negative values
sample24 |= unchecked((int)0xFF000000);
@@ -225,7 +226,12 @@ private MiniAudioResult ReadCallback(nint pDecoder, nint pBufferOut, ulong bytes
}
var size = (int)bytesToRead;
-
+ if (size <= 0)
+ {
+ pBytesRead = 0;
+ return MiniAudioResult.Success;
+ }
+
// Use ArrayPool to avoid allocating a new buffer on every read
if (_rentedReadBuffer == null || _rentedReadBuffer.Length < size)
{
@@ -253,7 +259,7 @@ private MiniAudioResult ReadCallback(nint pDecoder, nint pBufferOut, ulong bytes
{
// Swallow exception to prevent runtime crash, signal I/O error to miniaudio
pBytesRead = 0;
- Log.Critical("[MiniAudioDecoder] Failed to read PCM frames from decoder.");
+ Log.Critical("Failed to read PCM frames from decoder.");
return MiniAudioResult.IoError;
}
}
@@ -276,7 +282,7 @@ private MiniAudioResult SeekCallback(nint _, long byteOffset, SeekPoint point)
catch (NotSupportedException)
{
// Some streams claim CanSeek but throw on Length or Position
- Log.Critical("[MiniAudioDecoder] Stream does not support seeking.");
+ Log.Critical("Stream does not support seeking.");
return MiniAudioResult.InvalidOperation;
}
@@ -285,7 +291,7 @@ private MiniAudioResult SeekCallback(nint _, long byteOffset, SeekPoint point)
}
catch (Exception)
{
- Log.Critical("[MiniAudioDecoder] Failed to seek stream.");
+ Log.Critical("Failed to seek stream.");
return MiniAudioResult.IoError;
}
}
diff --git a/Src/Backends/MiniAudio/MiniAudioEncoder.cs b/Src/Backends/MiniAudio/MiniAudioEncoder.cs
index 61f2a16..75508de 100644
--- a/Src/Backends/MiniAudio/MiniAudioEncoder.cs
+++ b/Src/Backends/MiniAudio/MiniAudioEncoder.cs
@@ -120,7 +120,7 @@ private MiniAudioResult WriteCallback(nint pEncoder, nint pBufferIn, ulong bytes
catch (Exception)
{
pBytesWritten = 0;
- Log.Critical("[MiniAudioEncoder] Failed to write PCM frames to encoder.");
+ Log.Critical("Failed to write PCM frames to encoder.");
return MiniAudioResult.IoError;
}
}
@@ -145,7 +145,7 @@ private MiniAudioResult SeekCallback(nint pEncoder, long byteOffset, SeekPoint p
}
catch (Exception)
{
- Log.Critical("[MiniAudioEncoder] Failed to seek stream.");
+ Log.Critical("Failed to seek stream.");
return MiniAudioResult.IoError;
}
}
diff --git a/Src/Backends/MiniAudio/runtimes/android-arm/native/libminiaudio.so b/Src/Backends/MiniAudio/runtimes/android-arm/native/libminiaudio.so
index 168ea2d..7a68a97 100644
Binary files a/Src/Backends/MiniAudio/runtimes/android-arm/native/libminiaudio.so and b/Src/Backends/MiniAudio/runtimes/android-arm/native/libminiaudio.so differ
diff --git a/Src/Backends/MiniAudio/runtimes/android-arm64/native/libminiaudio.so b/Src/Backends/MiniAudio/runtimes/android-arm64/native/libminiaudio.so
index 17dfefc..3356e30 100644
Binary files a/Src/Backends/MiniAudio/runtimes/android-arm64/native/libminiaudio.so and b/Src/Backends/MiniAudio/runtimes/android-arm64/native/libminiaudio.so differ
diff --git a/Src/Backends/MiniAudio/runtimes/android-x64/native/libminiaudio.so b/Src/Backends/MiniAudio/runtimes/android-x64/native/libminiaudio.so
index 3e98cbb..74a1c61 100644
Binary files a/Src/Backends/MiniAudio/runtimes/android-x64/native/libminiaudio.so and b/Src/Backends/MiniAudio/runtimes/android-x64/native/libminiaudio.so differ
diff --git a/Src/Backends/MiniAudio/runtimes/iOS-arm64/native/miniaudio.framework/Info.plist b/Src/Backends/MiniAudio/runtimes/iOS-arm64/native/miniaudio.framework/Info.plist
index 4f32831..c7eeb59 100644
Binary files a/Src/Backends/MiniAudio/runtimes/iOS-arm64/native/miniaudio.framework/Info.plist and b/Src/Backends/MiniAudio/runtimes/iOS-arm64/native/miniaudio.framework/Info.plist differ
diff --git a/Src/Backends/MiniAudio/runtimes/win-arm64/native/miniaudio.dll b/Src/Backends/MiniAudio/runtimes/win-arm64/native/miniaudio.dll
new file mode 100644
index 0000000..b3744c8
Binary files /dev/null and b/Src/Backends/MiniAudio/runtimes/win-arm64/native/miniaudio.dll differ
diff --git a/Src/Backends/MiniAudio/runtimes/win-x64/native/miniaudio.dll b/Src/Backends/MiniAudio/runtimes/win-x64/native/miniaudio.dll
index 449134d..ba7911a 100644
Binary files a/Src/Backends/MiniAudio/runtimes/win-x64/native/miniaudio.dll and b/Src/Backends/MiniAudio/runtimes/win-x64/native/miniaudio.dll differ
diff --git a/Src/Backends/MiniAudio/runtimes/win-x86/native/miniaudio.dll b/Src/Backends/MiniAudio/runtimes/win-x86/native/miniaudio.dll
index 047d854..b14e18e 100644
Binary files a/Src/Backends/MiniAudio/runtimes/win-x86/native/miniaudio.dll and b/Src/Backends/MiniAudio/runtimes/win-x86/native/miniaudio.dll differ
diff --git a/Src/Components/Oscillator.cs b/Src/Components/Oscillator.cs
index a2240d3..61e9a0c 100644
--- a/Src/Components/Oscillator.cs
+++ b/Src/Components/Oscillator.cs
@@ -29,7 +29,8 @@ public enum WaveformType
Sawtooth,
///
- /// A triangle wave, containing only odd harmonics. Note: This implementation is not band-limited, but aliasing is less pronounced than with square or saw waves.
+ /// A triangle wave, containing only odd harmonics.
+ /// Note: This implementation is not band-limited, but aliasing is less pronounced than with square or saw waves.
///
Triangle,
diff --git a/Src/Components/Recorder.cs b/Src/Components/Recorder.cs
index 2ca096d..1e48e26 100644
--- a/Src/Components/Recorder.cs
+++ b/Src/Components/Recorder.cs
@@ -1,13 +1,13 @@
using SoundFlow.Abstracts;
+using SoundFlow.Abstracts.Devices;
using SoundFlow.Enums;
using SoundFlow.Interfaces;
-using SoundFlow.Exceptions;
-using System.Collections.ObjectModel;
-using SoundFlow.Abstracts.Devices;
-using SoundFlow.Backends.MiniAudio;
-using SoundFlow.Structs;
using SoundFlow.Metadata;
using SoundFlow.Metadata.Models;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+using System.Collections.ObjectModel;
namespace SoundFlow.Components;
@@ -61,6 +61,13 @@ public class Recorder : IDisposable
///
public AudioProcessCallback? ProcessCallback;
+ ///
+ /// Gets or sets the configuration for digitally signing the recorded file.
+ /// If set, a detached signature file (.sig) will be generated upon stopping the recording.
+ /// Only applies when recording to a file.
+ ///
+ public SignatureConfiguration? SigningConfiguration { get; set; }
+
private readonly AudioCaptureDevice _captureDevice;
private ISoundEncoder? _encoder;
private readonly List _modifiers = [];
@@ -69,7 +76,7 @@ public class Recorder : IDisposable
private readonly AudioFormat _format;
private SoundTags? _tagsToWrite;
-
+
///
/// Initializes a new instance of the class to record audio to a file.
///
@@ -81,15 +88,20 @@ public class Recorder : IDisposable
FilePath = filePath;
}
-
///
/// Initializes a new instance of the class to record audio to a stream.
///
/// The capture device to record from.
- /// The stream to write encoded recorded audio to.
+ /// The stream to write encoded recorded audio to, disposed when recording stops.
/// The string identifier for the desired encoding format (e.g., "wav", "flac"). Defaults to "wav".
+ /// Thrown if the provided stream is not writable.
public Recorder(AudioCaptureDevice captureDevice, Stream stream, string formatId = "wav")
{
+ if (!stream.CanWrite)
+ {
+ throw new ArgumentException("The provided stream is not writable.", nameof(stream));
+ }
+
_captureDevice = captureDevice;
_engine = captureDevice.Engine;
SampleFormat = captureDevice.Format.Format;
@@ -114,8 +126,9 @@ public Recorder(AudioCaptureDevice captureDevice, AudioProcessCallback callback)
SampleRate = captureDevice.Format.SampleRate;
Channels = captureDevice.Format.Channels;
FormatId = string.Empty; // No encoding format needed for callback mode.
+ _format = captureDevice.Format;
}
-
+
///
/// Gets a read-only list of components applied to the recorder.
///
@@ -131,34 +144,44 @@ public Recorder(AudioCaptureDevice captureDevice, AudioProcessCallback callback)
/// If recording to a file or stream, it initializes an audio encoder.
///
/// Optional metadata tags to write to the file upon completion of the recording.
- /// Thrown if an invalid stream or callback is provided.
- public void StartRecording(SoundTags? tags = null)
+ /// A indicating success or failure.
+ public Result StartRecording(SoundTags? tags = null)
{
if ((Stream == Stream.Null || !Stream.CanWrite) && ProcessCallback == null)
- throw new ArgumentException("A valid writable stream or callback must be provided.");
+ return new ValidationError("A valid writable stream or callback must be provided.");
- if (State == PlaybackState.Playing) return;
+ if (State == PlaybackState.Playing) return Result.Fail(new DuplicateRequestError("Starting recording"));
_tagsToWrite = tags;
if (!string.IsNullOrEmpty(FormatId))
{
- _encoder = _engine.CreateEncoder(Stream, FormatId, _format);
+ try
+ {
+ _encoder = _engine.CreateEncoder(Stream, FormatId, _format);
+ }
+ catch (Exception ex)
+ {
+ return new InvalidOperationError($"Failed to create audio encoder for format '{FormatId}'.", ex);
+ }
}
_captureDevice.OnAudioProcessed += OnAudioProcessed;
State = PlaybackState.Playing;
+ return Result.Ok();
}
///
/// Resumes recording from a paused state.
/// Has no effect if the recorder is not in the state.
///
- public void ResumeRecording()
+ /// A indicating success.
+ public Result ResumeRecording()
{
if (State != PlaybackState.Paused)
- return;
+ return Result.Fail(new DuplicateRequestError("Resuming recording"));
State = PlaybackState.Playing;
+ return Result.Ok();
}
///
@@ -166,44 +189,97 @@ public void ResumeRecording()
/// Audio data is no longer processed or encoded until recording is resumed.
/// Has no effect if the recorder is not in the state.
///
- public void PauseRecording()
+ /// A indicating success.
+ public Result PauseRecording()
{
if (State != PlaybackState.Playing)
- return;
+ return Result.Fail(new DuplicateRequestError("Pausing recording"));
State = PlaybackState.Paused;
+ return Result.Ok();
}
///
/// Stops the recording process and releases resources.
- /// If recording to a file, it finalizes the encoding process and writes metadata tags if provided.
+ /// If recording to a file, it finalizes the encoding process, writes metadata tags if provided,
+ /// and generates a digital signature if configured.
///
- public async Task StopRecordingAsync()
+ /// A indicating success or failure.
+ public async Task StopRecordingAsync()
{
if (State == PlaybackState.Stopped)
- return;
+ return Result.Fail(new DuplicateRequestError("Stopping recording"));
_captureDevice.OnAudioProcessed -= OnAudioProcessed;
_encoder?.Dispose();
_encoder = null;
State = PlaybackState.Stopped;
+
+ try
+ {
+ await Stream.DisposeAsync();
+ }
+ catch (Exception ex)
+ {
+ return new IoError("Disposing the underlying stream", ex);
+ }
try
{
- if (!string.IsNullOrEmpty(FilePath))
- if (_tagsToWrite != null) await SoundMetadataWriter.WriteTagsAsync(FilePath, _tagsToWrite);
+ if (!string.IsNullOrEmpty(FilePath) && File.Exists(FilePath))
+ {
+ // 1. Write Tags
+ if (_tagsToWrite != null)
+ {
+ try
+ {
+ await SoundMetadataWriter.WriteTagsAsync(FilePath, _tagsToWrite);
+ }
+ catch (Exception ex)
+ {
+ return new IoError($"writing metadata tags to '{FilePath}'", ex);
+ }
+ }
+
+ // 2. Sign File (Authentic Recording)
+ if (SigningConfiguration != null)
+ {
+ var signResult = await FileAuthenticator.SignFileAsync(FilePath, SigningConfiguration);
+ if (signResult is { IsFailure: true, Error: not null })
+ {
+ return Result.Fail(signResult.Error);
+ }
+
+ var sigPath = FilePath + ".sig";
+ try
+ {
+ await File.WriteAllTextAsync(sigPath, signResult.Value);
+ }
+ catch (UnauthorizedAccessException ex)
+ {
+ return new AccessDeniedError(sigPath, ex);
+ }
+ catch (IOException ex)
+ {
+ return new IoError($"writing signature file to '{sigPath}'", ex);
+ }
+ }
+ }
}
finally
{
_tagsToWrite = null;
}
+
+ return Result.Ok();
}
///
/// Synchronously stops the recording process.
///
- public void StopRecording() => StopRecordingAsync().GetAwaiter().GetResult();
+ /// A indicating success or failure.
+ public Result StopRecording() => StopRecordingAsync().GetAwaiter().GetResult();
///
/// Adds a to the recording pipeline.
diff --git a/Src/Components/VoiceActivityDetector.cs b/Src/Components/VoiceActivityDetector.cs
index 644948b..8148568 100644
--- a/Src/Components/VoiceActivityDetector.cs
+++ b/Src/Components/VoiceActivityDetector.cs
@@ -110,7 +110,7 @@ public VoiceActivityDetector(AudioFormat format, int fftSize = 1024, float energ
///
/// Analyzes audio buffer for voice activity.
///
- protected override void Analyze(Span buffer, int channels)
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
{
AddSamplesToBuffer(buffer, channels);
@@ -160,7 +160,7 @@ protected override void Analyze(Span buffer, int channels)
}
}
- private void AddSamplesToBuffer(Span buffer, int channels)
+ private void AddSamplesToBuffer(ReadOnlySpan buffer, int channels)
{
if (channels == 1)
{
diff --git a/Src/Components/WsolaTimeStretcher.cs b/Src/Components/WsolaTimeStretcher.cs
index 808e46d..f3f80fc 100644
--- a/Src/Components/WsolaTimeStretcher.cs
+++ b/Src/Components/WsolaTimeStretcher.cs
@@ -1,28 +1,130 @@
namespace SoundFlow.Components;
+///
+/// Defines performance presets for the WSOLA time stretcher.
+/// These presets balance between CPU usage, latency, and audio quality.
+///
+public enum WsolaPerformancePreset
+{
+ ///
+ /// Optimized for low latency and low CPU usage.
+ /// Suitable for speech or when performance is critical.
+ /// Window: 1024, Hop: 512, Search: 128.
+ ///
+ Fast,
+
+ ///
+ /// The standard configuration offering a good trade-off between quality and performance.
+ /// Suitable for general music playback.
+ /// Window: 2048, Hop: 1024, Search: 256.
+ ///
+ Balanced,
+
+ ///
+ /// Optimized for smoother audio stretching with fewer artifacts, at the cost of higher latency and CPU usage.
+ /// Window: 4096, Hop: 2048, Search: 512.
+ ///
+ HighQuality,
+
+ ///
+ /// Maximum quality configuration for complex polyphonic material.
+ /// Window: 8192, Hop: 4096, Search: 1024.
+ ///
+ Audiophile
+}
+
+///
+/// Configuration container for WSOLA parameters.
+///
+public class WsolaConfig
+{
+ ///
+ /// The length of the analysis window in frames.
+ /// Should be even (typically power of 2).
+ ///
+ public int WindowSizeFrames { get; }
+
+ ///
+ /// The hop size in frames used by the synthesis stage (output hop).
+ /// This value remains fixed across speeds. The analysis hop is derived from this value and .
+ /// Typically, 1/2 of for stable overlap behavior.
+ ///
+ public int AnalysisHopFrames { get; }
+
+ ///
+ /// The range of frames to search for the best overlap match.
+ ///
+ public int SearchRadiusFrames { get; }
+
+ ///
+ /// Creates a custom WSOLA configuration.
+ ///
+ /// Length of the analysis window in frames. Should be even (typically power of 2).
+ /// Synthesis hop size in frames (output hop). Must be positive and less than window size.
+ /// The range of frames to search for the best overlap match.
+ /// Thrown if values are invalid.
+ public WsolaConfig(int windowSizeFrames, int analysisHopFrames, int searchRadiusFrames)
+ {
+ if (windowSizeFrames < 128)
+ throw new ArgumentOutOfRangeException(nameof(windowSizeFrames), "Window size must be at least 128 frames.");
+ if (windowSizeFrames % 2 != 0)
+ throw new ArgumentOutOfRangeException(nameof(windowSizeFrames), "Window size must be even.");
+ if (analysisHopFrames <= 0 || analysisHopFrames >= windowSizeFrames)
+ throw new ArgumentOutOfRangeException(nameof(analysisHopFrames), "Hop size must be positive and less than window size.");
+ if (searchRadiusFrames < 0)
+ throw new ArgumentOutOfRangeException(nameof(searchRadiusFrames), "Search radius cannot be negative.");
+
+ WindowSizeFrames = windowSizeFrames;
+ AnalysisHopFrames = analysisHopFrames;
+ SearchRadiusFrames = searchRadiusFrames;
+ }
+
+ ///
+ /// Creates a configuration based on a predefined preset.
+ ///
+ /// The performance preset.
+ /// A configured WsolaConfig instance.
+ public static WsolaConfig FromPreset(WsolaPerformancePreset preset)
+ {
+ return preset switch
+ {
+ WsolaPerformancePreset.Fast => new WsolaConfig(1024, 512, 128),
+ WsolaPerformancePreset.Balanced => new WsolaConfig(2048, 1024, 256),
+ WsolaPerformancePreset.HighQuality => new WsolaConfig(4096, 2048, 512),
+ WsolaPerformancePreset.Audiophile => new WsolaConfig(8192, 4096, 1024),
+ _ => new WsolaConfig(1024, 512, 128) // Default to Fast
+ };
+ }
+}
+
///
/// Implements the WSOLA (Waveform Similarity Overlap-Add) algorithm for real-time time stretching
/// and pitch preservation of audio. It allows changing playback speed without altering pitch.
+/// Optimized using unsafe pointer arithmetic and SIMD (Single Instruction, Multiple Data).
///
public class WsolaTimeStretcher
{
private int _channels;
private float _speed = 1.0f;
- internal const int DefaultWindowSizeFrames = 1024;
- private const int NominalAnalysisHopFrames = DefaultWindowSizeFrames / 4;
- private const int SearchRadiusFrames = (NominalAnalysisHopFrames * 3) / 8;
+ // Configurable Parameters
+ private int _windowSizeFrames;
+ private int _synthesisHopFrames;
+ private int _searchRadiusFrames;
+
+ // Speed-Dependent Parameters
+ private int _analysisHopFrames;
private int _windowSizeSamples;
private float[] _inputBufferInternal = [];
private int _inputBufferValidSamples;
private int _inputBufferReadPos;
- private float[] _analysisWindow = [];
+ private int _nominalInputPos;
+
private float[] _prevOutputTail = [];
private int _actualPrevTailLength;
private float[] _currentAnalysisFrame = [];
private float[] _outputOverlapBuffer = [];
- private int _nominalHopSynthesisFrames;
private bool _isFirstFrame = true;
private bool _isFlushing;
@@ -31,13 +133,63 @@ public class WsolaTimeStretcher
///
/// The initial number of audio channels. Defaults to 2 if not positive.
/// The initial playback speed. Defaults to 1.0f.
- public WsolaTimeStretcher(int initialChannels = 2, float initialSpeed = 1.0f)
+ /// Optional configuration object. Defaults to Fast preset.
+ public WsolaTimeStretcher(int initialChannels = 2, float initialSpeed = 1.0f, WsolaConfig? config = null)
{
+ ApplyConfig(config ?? WsolaConfig.FromPreset(WsolaPerformancePreset.Fast));
+
initialChannels = initialChannels <= 0 ? 2 : initialChannels;
SetChannels(initialChannels);
SetSpeed(initialSpeed);
}
+ ///
+ /// Updates the internal configuration parameters.
+ /// This is a heavy operation that will clear buffers and reset the processing state.
+ ///
+ /// The new configuration to apply.
+ public void Configure(WsolaConfig config)
+ {
+ ArgumentNullException.ThrowIfNull(config);
+
+ // If config hasn't effectively changed, do nothing
+ if (_windowSizeFrames == config.WindowSizeFrames &&
+ _synthesisHopFrames == config.AnalysisHopFrames &&
+ _searchRadiusFrames == config.SearchRadiusFrames)
+ {
+ return;
+ }
+
+ ApplyConfig(config);
+
+ // Re-initialize buffers with new sizes (Force re-allocation)
+ var currentChannels = _channels;
+ _channels = -1;
+ SetChannels(currentChannels);
+
+ // Recalculate speed-dependent analysis hop and ensure buffer sizing
+ SetSpeed(_speed);
+ }
+
+ ///
+ /// Updates the internal configuration parameters based on a preset.
+ ///
+ /// The performance preset to apply.
+ public void Configure(WsolaPerformancePreset preset)
+ {
+ Configure(WsolaConfig.FromPreset(preset));
+ }
+
+ private void ApplyConfig(WsolaConfig config)
+ {
+ _windowSizeFrames = config.WindowSizeFrames;
+ _synthesisHopFrames = config.AnalysisHopFrames;
+ _searchRadiusFrames = config.SearchRadiusFrames;
+
+ // Ensure derived hop is valid even before SetSpeed runs.
+ _analysisHopFrames = Math.Max(1, _synthesisHopFrames);
+ }
+
///
/// Sets the number of audio channels for the time stretcher. Reinitializes internal buffers if channels change.
///
@@ -47,23 +199,20 @@ public void SetChannels(int channels)
{
if (channels <= 0) throw new ArgumentOutOfRangeException(nameof(channels), "Channels must be positive.");
if (_channels == channels) return;
+
_channels = channels;
- _windowSizeSamples = DefaultWindowSizeFrames * _channels;
+ _windowSizeSamples = _windowSizeFrames * _channels;
+
+ // Buffer A for correlation / tail storage
_prevOutputTail = new float[Math.Max(_channels, _windowSizeSamples - _channels)];
- const int maxInputReachFrames = NominalAnalysisHopFrames + SearchRadiusFrames + DefaultWindowSizeFrames;
- _inputBufferInternal = new float[maxInputReachFrames * _channels * 2];
- // Initialize Hann window for smooth fading.
- _analysisWindow = new float[DefaultWindowSizeFrames];
- for (var i = 0; i < DefaultWindowSizeFrames; i++)
- {
- _analysisWindow[i] = 0.5f * (1 - (float)Math.Cos(2 * Math.PI * i / (DefaultWindowSizeFrames - 1)));
- }
+
+ // _currentAnalysisFrame holds the raw data for the current window
_currentAnalysisFrame = new float[_windowSizeSamples];
- // Ensure the overlap buffer is large enough for the current speed's hop size
- var requiredBufferSize = Math.Max(_windowSizeSamples, _nominalHopSynthesisFrames * _channels);
- _outputOverlapBuffer = new float[requiredBufferSize];
-
+ // Overlap buffer holds the full reconstructed window
+ _outputOverlapBuffer = new float[_windowSizeSamples];
+
+ EnsureInternalInputBufferCapacity();
ResetState();
}
@@ -76,17 +225,27 @@ public void SetSpeed(float speed)
{
if (speed <= 0) throw new ArgumentOutOfRangeException(nameof(speed), "Speed must be positive.");
_speed = speed;
- // Calculate nominal synthesis hop frames based on the inverse of the speed.
- _nominalHopSynthesisFrames = (int)Math.Max(1, Math.Round(NominalAnalysisHopFrames / _speed));
- // Resize buffer if the new hop size exceeds the window size.
- if (_channels > 0)
+ // Keep synthesis hop fixed. Derive analysis hop from speed.
+ _analysisHopFrames = (int)Math.Max(1, Math.Round(_synthesisHopFrames * _speed));
+
+ EnsureInternalInputBufferCapacity();
+ }
+
+ ///
+ /// Ensures the internal input buffer is large enough for the current configuration and speed.
+ /// This accounts for analysis hop, search radius, and one full window of lookahead.
+ ///
+ private void EnsureInternalInputBufferCapacity()
+ {
+ if (_channels <= 0) return;
+
+ var maxInputReachFrames = _analysisHopFrames + _searchRadiusFrames + _windowSizeFrames;
+ var requiredSamples = maxInputReachFrames * _channels * 3; // *3 for safety/overlap margin
+
+ if (_inputBufferInternal.Length < requiredSamples)
{
- var requiredBufferSize = Math.Max(_windowSizeSamples, _nominalHopSynthesisFrames * _channels);
- if (_outputOverlapBuffer.Length < requiredBufferSize)
- {
- _outputOverlapBuffer = new float[requiredBufferSize];
- }
+ _inputBufferInternal = new float[requiredSamples];
}
}
@@ -94,7 +253,7 @@ public void SetSpeed(float speed)
/// Gets the minimum number of input samples required in the internal buffer to perform a processing step.
///
public int MinInputSamplesToProcess =>
- (NominalAnalysisHopFrames + SearchRadiusFrames) * _channels + _windowSizeSamples;
+ (_analysisHopFrames + _searchRadiusFrames) * _channels + _windowSizeSamples;
///
/// Resets the internal state of the time stretcher, clearing all buffers and flags.
@@ -104,6 +263,7 @@ private void ResetState()
{
_inputBufferValidSamples = 0;
_inputBufferReadPos = 0;
+ _nominalInputPos = 0;
Array.Clear(_prevOutputTail, 0, _prevOutputTail.Length);
_actualPrevTailLength = 0;
_isFirstFrame = true;
@@ -121,6 +281,18 @@ private void ResetState()
/// The current playback speed.
public float GetTargetSpeed() => _speed;
+ ///
+ /// Calculates a raised-cosine fade value for crossfading.
+ ///
+ /// The current frame index.
+ /// The total number of frames in the fade.
+ /// A value between 0.0 and 1.0 (approximating an S-curve).
+ private static float Fade(int i, int n)
+ {
+ if (n <= 1) return 1f;
+ return 0.5f - 0.5f * MathF.Cos(MathF.PI * i / (n - 1));
+ }
+
///
/// Processes a segment of audio data for time stretching.
///
@@ -129,7 +301,7 @@ private void ResetState()
/// Output parameter: The number of samples consumed from the input span.
/// Output parameter: The number of *original* source samples that the generated output represents.
/// The number of samples written to the output span.
- public int Process(ReadOnlySpan input, Span output,
+ public unsafe int Process(ReadOnlySpan input, Span output,
out int samplesConsumedFromInputBuffer,
out int sourceSamplesRepresentedByOutput)
{
@@ -137,14 +309,20 @@ public int Process(ReadOnlySpan input, Span output,
sourceSamplesRepresentedByOutput = 0;
if (_channels == 0 || output.IsEmpty) return 0;
- // Copy incoming input data into the internal buffer, shifting existing data if necessary.
+ // 1. Manage Input Buffer
if (!input.IsEmpty)
{
+ // If the buffer is full, or we have significant discarded data, shift left.
if (_inputBufferReadPos > 0 && _inputBufferValidSamples > _inputBufferReadPos)
+ {
Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float), _inputBufferInternal, 0,
(_inputBufferValidSamples - _inputBufferReadPos) * sizeof(float));
- _inputBufferValidSamples -= _inputBufferReadPos;
- _inputBufferReadPos = 0;
+
+ _nominalInputPos -= _inputBufferReadPos;
+ _inputBufferValidSamples -= _inputBufferReadPos;
+ _inputBufferReadPos = 0;
+ }
+
var spaceInInputBuffer = _inputBufferInternal.Length - _inputBufferValidSamples;
var toCopy = Math.Min(spaceInInputBuffer, input.Length);
if (toCopy > 0)
@@ -158,265 +336,310 @@ public int Process(ReadOnlySpan input, Span output,
var samplesWrittenToOutput = 0;
var totalSourceSamplesForThisCall = 0;
- // Loop to generate as much output as possible given available input and output buffer space.
- while (samplesWrittenToOutput < output.Length)
+ var currentWindowSizeFrames = _windowSizeFrames;
+ var currentSearchRadiusFrames = _searchRadiusFrames;
+ var currentChannels = _channels;
+ var currentWindowSizeSamples = _windowSizeSamples;
+
+ var analysisHopSamples = _analysisHopFrames * currentChannels;
+ var searchRadiusSamples = currentSearchRadiusFrames * currentChannels;
+
+ // Fixed synthesis parameters
+ var hopSynFrames = _synthesisHopFrames;
+ var overlapFrames = currentWindowSizeFrames - hopSynFrames;
+ if (overlapFrames < 0) overlapFrames = 0;
+
+ var hopSynSamples = hopSynFrames * currentChannels;
+ var overlapSamples = overlapFrames * currentChannels;
+
+ // Pin arrays once to avoid pinning/unpinning in the tight loop
+ fixed (float* pInputBase = _inputBufferInternal)
+ fixed (float* pPrevTailBase = _prevOutputTail)
+ fixed (float* pOutputOverlap = _outputOverlapBuffer)
+ fixed (float* pCurrentAnalysis = _currentAnalysisFrame)
{
- // Check if enough input samples are available to process a full window + search area.
- if (_inputBufferValidSamples - _inputBufferReadPos < MinInputSamplesToProcess)
+ while (samplesWrittenToOutput < output.Length)
{
- // If not flushing and not enough data, shift remaining data and return.
- if (!_isFlushing || (_inputBufferValidSamples - _inputBufferReadPos < _windowSizeSamples))
- {
- if (_inputBufferReadPos > 0 && _inputBufferValidSamples > _inputBufferReadPos)
- Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float),
- _inputBufferInternal, 0,
- (_inputBufferValidSamples - _inputBufferReadPos) * sizeof(float));
- _inputBufferValidSamples -= _inputBufferReadPos;
- _inputBufferReadPos = 0;
- sourceSamplesRepresentedByOutput = totalSourceSamplesForThisCall;
- return samplesWrittenToOutput;
- }
- }
+ // Determine the base (nominal) position in the input buffer for this frame's processing.
+ var basePosInInput = _nominalInputPos;
- var bestOffsetFromNominalFrames = 0;
+ // Calculate required valid samples in the buffer for the search and a full window read.
+ var requiredSamples = basePosInInput + searchRadiusSamples + currentWindowSizeSamples;
- // If not the first frame and a previous output tail exists, perform WSOLA's search for best overlap.
- if (!_isFirstFrame && _actualPrevTailLength > 0)
- {
- var synthesisHopSamples = _nominalHopSynthesisFrames * _channels;
- // Length of the overlap region to compare.
- var compareLengthSamples =
- Math.Min(_actualPrevTailLength, _windowSizeSamples - synthesisHopSamples);
- compareLengthSamples = Math.Max(0, compareLengthSamples);
- var compareLengthFrames = compareLengthSamples / _channels;
- const int minValidOverlapForSearch = SearchRadiusFrames / 4;
- float prevTailEnergy = 0;
- if (compareLengthFrames > 0)
+ // Check Data Availability
+ if (_inputBufferValidSamples < requiredSamples)
{
- for (var iS = 0; iS < compareLengthSamples; ++iS)
- prevTailEnergy += _prevOutputTail[iS] * _prevOutputTail[iS];
+ if (!_isFlushing || (_inputBufferValidSamples < basePosInInput + currentWindowSizeSamples))
+ {
+ // Shift if we have dead space and need to read more
+ if (_inputBufferReadPos > 0)
+ {
+ Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float),
+ _inputBufferInternal, 0,
+ (_inputBufferValidSamples - _inputBufferReadPos) * sizeof(float));
+
+ _nominalInputPos -= _inputBufferReadPos;
+ _inputBufferValidSamples -= _inputBufferReadPos;
+ _inputBufferReadPos = 0;
+ }
+
+ sourceSamplesRepresentedByOutput = totalSourceSamplesForThisCall;
+ return samplesWrittenToOutput;
+ }
}
- var silenceThreshold = 1e-7f * compareLengthSamples; // Threshold to avoid correlation on silence.
+ var bestOffsetSamples = 0;
- // Only perform search if previous tail has significant energy and enough length for meaningful correlation.
- if (prevTailEnergy > silenceThreshold && compareLengthFrames > minValidOverlapForSearch &&
- compareLengthSamples > 0)
+ // WSOLA Search Phase
+ if (!_isFirstFrame && _actualPrevTailLength > 0 && overlapFrames > 0)
{
- var maxNcc = -2.0;
- // Calculate mean and sum of squared deviations for the previous output tail (A).
- double sumA = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS) sumA += _prevOutputTail[iS];
- var meanA = sumA / compareLengthSamples;
- double sumADevSq = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS)
- {
- var d = _prevOutputTail[iS] - meanA;
- sumADevSq += d * d;
- }
+ // Compare only within the overlap region to reduce artifacts.
+ var availablePrevTailFrames = _actualPrevTailLength / currentChannels;
+ var compareFrames = Math.Min(overlapFrames, availablePrevTailFrames);
+ var compareSamples = compareFrames * currentChannels;
+
+ const int minValidOverlapDivider = 4;
+ var minValidOverlapForSearch = currentSearchRadiusFrames / minValidOverlapDivider;
- // Pre-calculate NCC for delta 0 (nominal hop) as a baseline.
- var candidateStartAtDelta0 = _inputBufferReadPos + NominalAnalysisHopFrames * _channels;
- if (NominalAnalysisHopFrames > 0 &&
- (candidateStartAtDelta0 + compareLengthSamples <= _inputBufferValidSamples))
+ // Fast energy check using Left channel only to avoid stereo cancellation issues.
+ float prevTailEnergy = 0;
+ if (compareFrames > 0)
{
- double sumBd0 = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS)
- sumBd0 += _inputBufferInternal[candidateStartAtDelta0 + iS];
- var meanBd0 = sumBd0 / compareLengthSamples;
- double sumBDevSqD0 = 0, dotProductDevD0 = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS)
+ for (var iF = 0; iF < compareFrames; ++iF)
{
- var dA = _prevOutputTail[iS] - meanA;
- var dB = _inputBufferInternal[candidateStartAtDelta0 + iS] - meanBd0;
- dotProductDevD0 += dA * dB;
- sumBDevSqD0 += dB * dB;
+ var idx = iF * currentChannels; // Left channel at idx
+ var v = pPrevTailBase[idx];
+ prevTailEnergy += v * v;
}
-
- var denominatorD0 = Math.Sqrt(sumADevSq * sumBDevSqD0);
- if (denominatorD0 < 1e-9)
- maxNcc = (sumADevSq < 1e-9 && sumBDevSqD0 < 1e-9) ? 1.0 : 0.0;
- else maxNcc = dotProductDevD0 / denominatorD0;
}
- // Iterate through search radius to find the best overlap.
- for (var currentDeltaFrames = -SearchRadiusFrames;
- currentDeltaFrames <= SearchRadiusFrames;
- currentDeltaFrames++)
+ var silenceThreshold = 1e-7f * compareFrames;
+
+ if (prevTailEnergy > silenceThreshold &&
+ compareFrames > minValidOverlapForSearch &&
+ compareFrames > 0)
{
- if (currentDeltaFrames == 0) continue;
- var trialAnalysisHopFrames = NominalAnalysisHopFrames + currentDeltaFrames;
- if (trialAnalysisHopFrames <= 0) continue;
- var candidateSegmentStartSample = _inputBufferReadPos + trialAnalysisHopFrames * _channels;
- // Check if candidate segment is within valid input data.
- if (candidateSegmentStartSample + compareLengthSamples > _inputBufferValidSamples)
- {
- if (currentDeltaFrames > 0) break;
- continue;
- }
+ var maxNcc = -2.0;
- // Calculate mean and sum of squared deviations for the current candidate segment (B).
- double sumB = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS)
- sumB += _inputBufferInternal[candidateSegmentStartSample + iS];
- var meanB = sumB / compareLengthSamples;
- double sumBDevSq = 0, dotProductDev = 0;
- for (var iS = 0; iS < compareLengthSamples; ++iS)
+ // Pre-Calculate Buffer A (Previous Output Tail) Stats - Left channel only
+ double sumA = 0;
+ for (var i = 0; i < compareFrames; ++i)
{
- var dA = _prevOutputTail[iS] - meanA;
- var dB = _inputBufferInternal[candidateSegmentStartSample + iS] - meanB;
- dotProductDev += dA * dB;
- sumBDevSq += dB * dB;
+ sumA += pPrevTailBase[i * currentChannels];
}
+ var meanA = (float)(sumA / compareFrames);
- // Calculate Normalized Cross-Correlation (NCC).
- double currentNcc;
- var denominator = Math.Sqrt(sumADevSq * sumBDevSq);
- if (denominator < 1e-9) currentNcc = (sumADevSq < 1e-9 && sumBDevSq < 1e-9) ? 1.0 : 0.0;
- else currentNcc = dotProductDev / denominator;
- const float nccQualityThreshold = 0.02f;
- if (currentNcc > maxNcc + nccQualityThreshold)
+ double sumADevSq = 0;
+ for (var i = 0; i < compareFrames; ++i)
{
- maxNcc = currentNcc;
- bestOffsetFromNominalFrames = currentDeltaFrames;
+ var val = pPrevTailBase[i * currentChannels];
+ var d = val - meanA;
+ sumADevSq += d * d;
}
- else if (currentNcc > maxNcc - nccQualityThreshold)
+
+ // Search Loop
+ for (var currentDeltaFrames = -currentSearchRadiusFrames;
+ currentDeltaFrames <= currentSearchRadiusFrames;
+ currentDeltaFrames++)
{
- if (Math.Abs(currentDeltaFrames) < Math.Abs(bestOffsetFromNominalFrames))
+ var currentDeltaSamples = currentDeltaFrames * currentChannels;
+ var candidateSegmentStartSample = basePosInInput + currentDeltaSamples;
+
+ // Bounds Check
+ if (candidateSegmentStartSample < 0 ||
+ candidateSegmentStartSample + compareSamples > _inputBufferValidSamples)
+ {
+ continue;
+ }
+
+ var pB = pInputBase + candidateSegmentStartSample;
+
+ // Calculate Mean B - Left channel only
+ double sumB = 0;
+ for (var i = 0; i < compareFrames; ++i)
+ {
+ sumB += pB[i * currentChannels];
+ }
+ var meanB = (float)(sumB / compareFrames);
+
+ // Calculate Cross-Correlation and SumBDevSq
+ double sumBDevSq = 0;
+ double dotProductDev = 0;
+
+ for (var i = 0; i < compareFrames; ++i)
+ {
+ var a = pPrevTailBase[i * currentChannels];
+ var b = pB[i * currentChannels];
+
+ var dA = a - meanA;
+ var dB = b - meanB;
+
+ dotProductDev += dA * dB;
+ sumBDevSq += dB * dB;
+ }
+
+ // NCC Calculation
+ double currentNcc;
+ var denominator = Math.Sqrt(sumADevSq * sumBDevSq);
+ if (denominator < 1e-9)
+ currentNcc = (sumADevSq < 1e-9 && sumBDevSq < 1e-9) ? 1.0 : 0.0;
+ else
+ currentNcc = dotProductDev / denominator;
+
+ const float nccQualityThreshold = 0.02f;
+
+ // Early exit if we find a near-perfect match.
+ if (currentNcc > 0.995)
{
maxNcc = currentNcc;
- bestOffsetFromNominalFrames = currentDeltaFrames;
+ bestOffsetSamples = currentDeltaSamples;
+ break;
+ }
+
+ if (currentNcc > maxNcc + nccQualityThreshold)
+ {
+ maxNcc = currentNcc;
+ bestOffsetSamples = currentDeltaSamples;
+ }
+ else if (currentNcc > maxNcc - nccQualityThreshold)
+ {
+ if (Math.Abs(currentDeltaSamples) < Math.Abs(bestOffsetSamples))
+ {
+ maxNcc = currentNcc;
+ bestOffsetSamples = currentDeltaSamples;
+ }
}
}
}
}
- }
- // Determine the actual analysis hop based on the best overlap found.
- var actualAnalysisHopFrames = NominalAnalysisHopFrames + bestOffsetFromNominalFrames;
- if (actualAnalysisHopFrames <= 0) actualAnalysisHopFrames = 1;
- var actualAnalysisHopSamples =
- actualAnalysisHopFrames * _channels;
+ // Analysis & Synthesis Phase
+ var chosenSegmentStartSampleInInput = basePosInInput + bestOffsetSamples;
- // Calculate the starting position of the chosen analysis segment in the input buffer.
- var chosenSegmentStartSampleInInput = _inputBufferReadPos +
- (NominalAnalysisHopFrames + bestOffsetFromNominalFrames) * _channels;
+ // If flush handling forces us to read past valid data, clamp
+ if (chosenSegmentStartSampleInInput + currentWindowSizeSamples > _inputBufferValidSamples)
+ {
+ if (_isFlushing)
+ {
+ chosenSegmentStartSampleInInput = _inputBufferValidSamples - currentWindowSizeSamples;
+ if (chosenSegmentStartSampleInInput < 0) chosenSegmentStartSampleInInput = 0;
+ }
+ else
+ {
+ break; // Should have been caught by Availability Check, but safe fallback
+ }
+ }
- // Check if the chosen segment is within valid input data. If not, handle end of input.
- if (chosenSegmentStartSampleInInput + _windowSizeSamples > _inputBufferValidSamples)
- {
- if (_isFlushing)
+ // Extract Raw Frame
+ Buffer.MemoryCopy(
+ pInputBase + chosenSegmentStartSampleInInput,
+ pCurrentAnalysis,
+ currentWindowSizeSamples * sizeof(float),
+ currentWindowSizeSamples * sizeof(float));
+
+ // Overlap-Add with Crossfade
+ if (!_isFirstFrame && _actualPrevTailLength > 0 && overlapFrames > 0)
{
- // If flushing, try to use the last possible full window.
- chosenSegmentStartSampleInInput = _inputBufferValidSamples - _windowSizeSamples;
- if (chosenSegmentStartSampleInInput < _inputBufferReadPos)
+ var framesToFade = Math.Min(overlapFrames, _actualPrevTailLength / currentChannels);
+
+ for (var f = 0; f < framesToFade; f++)
{
- // If even the last full window is beyond current read position, shift buffer and return.
- if (_inputBufferReadPos > 0 && _inputBufferValidSamples > _inputBufferReadPos)
+ var w = Fade(f, framesToFade);
+ var inv = 1f - w;
+ var baseIdx = f * currentChannels;
+
+ for (var ch = 0; ch < currentChannels; ch++)
{
- Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float),
- _inputBufferInternal, 0,
- (_inputBufferValidSamples - _inputBufferReadPos) * sizeof(float));
- _inputBufferValidSamples -= _inputBufferReadPos;
- _inputBufferReadPos = 0;
+ pOutputOverlap[baseIdx + ch] =
+ pPrevTailBase[baseIdx + ch] * inv +
+ pCurrentAnalysis[baseIdx + ch] * w;
}
- sourceSamplesRepresentedByOutput = totalSourceSamplesForThisCall;
- return samplesWrittenToOutput;
}
- }
- else
- {
- // If not flushing and not enough data, shift buffer and return.
- if (_inputBufferReadPos > 0 && _inputBufferValidSamples > _inputBufferReadPos)
+
+ // Remainder of the window (after overlap) is just the current frame
+ var startSamples = framesToFade * currentChannels;
+ if (startSamples < currentWindowSizeSamples)
{
- Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float),
- _inputBufferInternal, 0,
- (_inputBufferValidSamples - _inputBufferReadPos) * sizeof(float));
- _inputBufferValidSamples -= _inputBufferReadPos;
- _inputBufferReadPos = 0;
+ Buffer.MemoryCopy(
+ pCurrentAnalysis + startSamples,
+ pOutputOverlap + startSamples,
+ (currentWindowSizeSamples - startSamples) * sizeof(float),
+ (currentWindowSizeSamples - startSamples) * sizeof(float));
}
- sourceSamplesRepresentedByOutput = totalSourceSamplesForThisCall;
- return samplesWrittenToOutput;
}
- }
-
- // Apply the analysis window to the chosen input segment.
- for (var f = 0; f < DefaultWindowSizeFrames; f++)
- {
- for (var ch = 0; ch < _channels; ch++)
+ else
{
- var readIdx = chosenSegmentStartSampleInInput + f * _channels + ch;
- _currentAnalysisFrame[f * _channels + ch] = _inputBufferInternal[readIdx] * _analysisWindow[f];
+ // First frame: just copy current frame
+ Buffer.MemoryCopy(
+ pCurrentAnalysis,
+ pOutputOverlap,
+ currentWindowSizeSamples * sizeof(float),
+ currentWindowSizeSamples * sizeof(float));
}
- }
- // Perform overlap-add synthesis.
- var currentFrameSynthesisHopSamples = _nominalHopSynthesisFrames * _channels;
- var currentFrameSynthesisOverlapSamples = Math.Max(0, _windowSizeSamples - currentFrameSynthesisHopSamples);
- Array.Clear(_outputOverlapBuffer, 0, _outputOverlapBuffer.Length);
+ // Output
+ var availableInOutputSpan = output.Length - samplesWrittenToOutput;
+ var actualCopyToOutput = Math.Min(hopSynSamples, availableInOutputSpan);
- // Add previous output tail (overlap part) to the output overlap buffer.
- if (!_isFirstFrame && _actualPrevTailLength > 0)
- {
- var overlapToUseFromPrev = Math.Min(_actualPrevTailLength, currentFrameSynthesisOverlapSamples);
- if (overlapToUseFromPrev > 0)
- _prevOutputTail.AsSpan(0, overlapToUseFromPrev)
- .CopyTo(_outputOverlapBuffer.AsSpan(0, overlapToUseFromPrev));
- }
+ if (actualCopyToOutput > 0)
+ {
+ new Span(pOutputOverlap, actualCopyToOutput).CopyTo(output.Slice(samplesWrittenToOutput));
+ samplesWrittenToOutput += actualCopyToOutput;
- // Add the current analysis frame (windowed input segment) to the output overlap buffer.
- for (var i = 0; i < _windowSizeSamples; ++i)
- {
- if (!_isFirstFrame && _actualPrevTailLength > 0 &&
- i < Math.Min(_actualPrevTailLength, currentFrameSynthesisOverlapSamples))
- _outputOverlapBuffer[i] += _currentAnalysisFrame[i];
- else _outputOverlapBuffer[i] = _currentAnalysisFrame[i];
- }
+ // Track source samples represented by this output segment using fixed ratio speed = analysisHop / synthesisHop.
+ totalSourceSamplesForThisCall += _synthesisHopFrames > 0
+ ? (int)Math.Round((double)actualCopyToOutput / (_synthesisHopFrames * currentChannels) * analysisHopSamples)
+ : analysisHopSamples;
+ }
- // Copy the synthesized output segment (non-overlapping part) to the external output span.
- var availableInOutputSpan = output.Length - samplesWrittenToOutput;
- var actualCopyToOutput = Math.Min(currentFrameSynthesisHopSamples, availableInOutputSpan);
+ // Save Tail
+ if (overlapSamples > 0)
+ {
+ if (_prevOutputTail.Length >= overlapSamples)
+ {
+ Buffer.MemoryCopy(
+ pOutputOverlap + hopSynSamples,
+ pPrevTailBase,
+ _prevOutputTail.Length * sizeof(float),
+ overlapSamples * sizeof(float));
+ }
+ else
+ {
+ new Span(pPrevTailBase, _prevOutputTail.Length).Clear();
+ }
+ }
- if (actualCopyToOutput > 0)
- {
- _outputOverlapBuffer.AsSpan(0, actualCopyToOutput).CopyTo(output.Slice(samplesWrittenToOutput));
- samplesWrittenToOutput += actualCopyToOutput;
+ _actualPrevTailLength = overlapSamples;
+ _isFirstFrame = false;
- // Estimate source samples represented by the output. This is proportional to the hop sizes.
- totalSourceSamplesForThisCall += _nominalHopSynthesisFrames > 0
- ? (int)Math.Round((double)actualCopyToOutput / (_nominalHopSynthesisFrames * _channels) * actualAnalysisHopSamples)
- : actualAnalysisHopSamples;
- }
+ // Advance the nominal position by the speed-dependent analysis hop.
+ _nominalInputPos += analysisHopSamples;
- // Store the tail of the current synthesis frame for the next overlap-add step.
- switch (currentFrameSynthesisOverlapSamples)
- {
- case > 0 when
- _prevOutputTail.Length >= currentFrameSynthesisOverlapSamples:
- Array.Copy(_outputOverlapBuffer, currentFrameSynthesisHopSamples, _prevOutputTail, 0,
- currentFrameSynthesisOverlapSamples);
- break;
- case > 0:
- // If _prevOutputTail is too small, clear it to avoid issues (something went off).
- Array.Clear(_prevOutputTail, 0, _prevOutputTail.Length);
- break;
+ // Update discard pointer (keep enough history for negative search offsets).
+ var safeDiscardPoint = _nominalInputPos - searchRadiusSamples;
+ if (safeDiscardPoint > _inputBufferReadPos)
+ {
+ _inputBufferReadPos = safeDiscardPoint;
+ }
}
-
- _actualPrevTailLength = currentFrameSynthesisOverlapSamples;
- _isFirstFrame = false;
- _inputBufferReadPos += actualAnalysisHopSamples;
-
- if (samplesWrittenToOutput >= output.Length) break;
- if (_isFlushing && (_inputBufferValidSamples - _inputBufferReadPos < _channels)) break;
}
- // Shift any remaining data in the internal input buffer to the beginning.
var remainingInternalInput = _inputBufferValidSamples - _inputBufferReadPos;
if (_inputBufferReadPos > 0 && remainingInternalInput > 0)
+ {
Buffer.BlockCopy(_inputBufferInternal, _inputBufferReadPos * sizeof(float), _inputBufferInternal, 0,
remainingInternalInput * sizeof(float));
- _inputBufferValidSamples = remainingInternalInput;
- _inputBufferReadPos = 0;
+
+ _nominalInputPos -= _inputBufferReadPos;
+ _inputBufferValidSamples = remainingInternalInput;
+ _inputBufferReadPos = 0;
+ }
+ else if (remainingInternalInput <= 0)
+ {
+ _inputBufferValidSamples = 0;
+ _inputBufferReadPos = 0;
+ _nominalInputPos = 0;
+ }
sourceSamplesRepresentedByOutput = totalSourceSamplesForThisCall;
return samplesWrittenToOutput;
@@ -435,7 +658,7 @@ public int Flush(Span output)
// Continue processing until output buffer is full or internal buffer can no longer yield a full window.
while (totalFlushed < output.Length &&
- (_inputBufferValidSamples - _inputBufferReadPos >= _windowSizeSamples))
+ (_inputBufferValidSamples >= _windowSizeSamples))
{
var flushedThisCall = Process(ReadOnlySpan.Empty, output.Slice(totalFlushed), out _,
out _);
@@ -446,4 +669,4 @@ public int Flush(Span output)
_isFlushing = false;
return totalFlushed;
}
-}
+}
\ No newline at end of file
diff --git a/Src/Editing/AudioSegment.cs b/Src/Editing/AudioSegment.cs
index 2b47e21..0aa251f 100644
--- a/Src/Editing/AudioSegment.cs
+++ b/Src/Editing/AudioSegment.cs
@@ -23,6 +23,8 @@ public class AudioSegment : IDisposable
private AudioSegmentSettings _settings;
private Track? _parentTrack;
+ // WSOLA State
+ private WsolaConfig _timeStretchConfig;
private WsolaTimeStretcher? _segmentWsolaStretcher;
private float[] _wsolaFeedBuffer = [];
private int _wsolaFeedBufferValidSamples;
@@ -131,6 +133,35 @@ public AudioSegmentSettings Settings
}
}
+ ///
+ /// Gets or sets the configuration for the time-stretching algorithm.
+ /// Changing this resets the internal processing state.
+ ///
+ public WsolaConfig TimeStretchConfig
+ {
+ get => _timeStretchConfig;
+ set
+ {
+ if (value == null) throw new ArgumentNullException(nameof(value));
+ // Check for value equality to avoid unnecessary resets
+ if (_timeStretchConfig.WindowSizeFrames == value.WindowSizeFrames &&
+ _timeStretchConfig.AnalysisHopFrames == value.AnalysisHopFrames &&
+ _timeStretchConfig.SearchRadiusFrames == value.SearchRadiusFrames)
+ {
+ return;
+ }
+
+ _timeStretchConfig = value;
+
+ // Re-initialize buffers to accommodate potentially larger window sizes
+ InitializeWsolaBuffers();
+
+ // Reconfigure or recreate the stretcher
+ FullResetState();
+ MarkDirty();
+ }
+ }
+
///
/// Gets or sets the parent track to which this segment is added.
///
@@ -178,6 +209,9 @@ public AudioSegment(
_ownsDataProvider = ownsDataProvider;
_settings = settings ?? new AudioSegmentSettings();
_settings.ParentSegment = this;
+
+ // Default to Fast preset
+ _timeStretchConfig = WsolaConfig.FromPreset(WsolaPerformancePreset.Fast);
// Validation for initial construction
if (_sourceStartTime < TimeSpan.Zero) throw new ArgumentOutOfRangeException(nameof(sourceStartTime));
@@ -194,9 +228,20 @@ public AudioSegment(
private void InitializeWsolaBuffers()
{
var channels = Format.Channels > 0 ? Format.Channels : 2;
- const int baseBufferSizeFrames = WsolaTimeStretcher.DefaultWindowSizeFrames * 8;
- _wsolaFeedBuffer = new float[baseBufferSizeFrames * channels];
- _wsolaOutputBuffer = new float[baseBufferSizeFrames * channels * 3];
+ // Size buffers based on the current configuration to ensure they are large enough.
+ // x8 factor provides a safe margin for processing multiple hops.
+ var baseBufferSizeFrames = _timeStretchConfig.WindowSizeFrames * 8;
+
+ var requiredSize = baseBufferSizeFrames * channels;
+ if (_wsolaFeedBuffer.Length < requiredSize || _wsolaFeedBuffer.Length > requiredSize * 2)
+ {
+ _wsolaFeedBuffer = new float[requiredSize];
+ }
+
+ if (_wsolaOutputBuffer.Length < requiredSize * 3 || _wsolaOutputBuffer.Length > requiredSize * 6)
+ {
+ _wsolaOutputBuffer = new float[requiredSize * 3];
+ }
}
@@ -230,11 +275,18 @@ internal void FullResetState()
var effectiveStretchFactor = Settings.TimeStretchFactor;
if (Math.Abs(effectiveStretchFactor - 1.0f) > float.Epsilon && SourceDuration > TimeSpan.Zero)
{
- // Create or reconfigure WSOLA if time stretching is enabled.
- _segmentWsolaStretcher ??= new WsolaTimeStretcher(channels, 1.0f / effectiveStretchFactor);
- _segmentWsolaStretcher.SetChannels(channels);
- _segmentWsolaStretcher.SetSpeed(1.0f / effectiveStretchFactor);
- _segmentWsolaStretcher.Reset();
+ if (_segmentWsolaStretcher == null)
+ {
+ _segmentWsolaStretcher = new WsolaTimeStretcher(channels, 1.0f / effectiveStretchFactor, _timeStretchConfig);
+ }
+ else
+ {
+ // Update configuration and reset
+ _segmentWsolaStretcher.SetChannels(channels);
+ _segmentWsolaStretcher.Configure(_timeStretchConfig);
+ _segmentWsolaStretcher.SetSpeed(1.0f / effectiveStretchFactor);
+ _segmentWsolaStretcher.Reset();
+ }
}
else
{
@@ -327,9 +379,12 @@ public TimeSpan GetTotalLoopedDurationOnTimeline()
/// A new object with copied properties.
public AudioSegment Clone(TimeSpan? newTimelineStartTime = null)
{
- return new AudioSegment(Format, SourceDataProvider,
+ var clone = new AudioSegment(Format, SourceDataProvider,
SourceStartTime, SourceDuration, newTimelineStartTime ?? TimelineStartTime, $"{Name} (Clone)",
Settings.Clone());
+
+ clone.TimeStretchConfig = this.TimeStretchConfig;
+ return clone;
}
///
@@ -677,7 +732,7 @@ private bool EnsureMoreWsolaOutputGenerated(int sampleRate, int channels)
var initialWsolaOutputCountThisCall = _wsolaOutputBufferValidSamples;
// Loop until enough output is generated or source runs out.
- while (_wsolaOutputBufferValidSamples - _wsolaOutputBufferReadOffset < WsolaTimeStretcher.DefaultWindowSizeFrames * channels)
+ while (_wsolaOutputBufferValidSamples - _wsolaOutputBufferReadOffset < _timeStretchConfig.WindowSizeFrames * channels)
{
var currentSourcePassExhaustedForWsolaFeed = _sourceSamplesFedToWsolaThisSourcePass >= sourceSamplesInOneSourcePass;
@@ -763,7 +818,7 @@ private bool EnsureMoreWsolaOutputGenerated(int sampleRate, int channels)
// Break if no progress is made or enough output is generated.
if (samplesWrittenToWsolaOut == 0 && samplesConsumedFromFeed == 0) break;
- if (_wsolaOutputBufferValidSamples - _wsolaOutputBufferReadOffset >= WsolaTimeStretcher.DefaultWindowSizeFrames * channels) break;
+ if (_wsolaOutputBufferValidSamples - _wsolaOutputBufferReadOffset >= _timeStretchConfig.WindowSizeFrames * channels) break;
}
// Return true if the output buffer grew during this call.
diff --git a/Src/Editing/Composition.cs b/Src/Editing/Composition.cs
index e88ff55..2c8ea53 100644
--- a/Src/Editing/Composition.cs
+++ b/Src/Editing/Composition.cs
@@ -1,3 +1,4 @@
+using System.Diagnostics.CodeAnalysis;
using SoundFlow.Abstracts;
using SoundFlow.Editing.Mapping;
using SoundFlow.Interfaces;
@@ -12,6 +13,7 @@ namespace SoundFlow.Editing;
/// Represents a complete audio composition, acting as the top-level container for multiple tracks.
/// It serves as a façade, providing access to its data model and specialized services for rendering, editing, and recording.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public sealed class Composition : ISequencerContext, IDisposable, IMidiMappable
{
private readonly Dictionary _objectRegistry = new();
diff --git a/Src/Editing/Mapping/MidiMappingManager.cs b/Src/Editing/Mapping/MidiMappingManager.cs
index b5c7208..89bf4a7 100644
--- a/Src/Editing/Mapping/MidiMappingManager.cs
+++ b/Src/Editing/Mapping/MidiMappingManager.cs
@@ -1,10 +1,8 @@
using System.Reflection;
-using SoundFlow.Abstracts.Devices;
using SoundFlow.Interfaces;
using SoundFlow.Midi.Devices;
using SoundFlow.Midi.Enums;
using SoundFlow.Midi.Structs;
-using SoundFlow.Structs;
using SoundFlow.Utils;
namespace SoundFlow.Editing.Mapping;
@@ -42,7 +40,7 @@ internal MidiMappingManager(Composition composition)
public void AddInputDevice(MidiInputDevice device)
{
if (_subscribedDevices.Contains(device)) return;
-
+
device.OnMessageReceived += OnMidiMessageReceived;
_subscribedDevices.Add(device);
_highResCcParsers[device] = new HighResCcParser(this);
@@ -80,7 +78,7 @@ public bool RemoveMapping(Guid mappingId)
{
var mapping = _mappings.FirstOrDefault(m => m.Id == mappingId);
if (mapping == null) return false;
-
+
_mappings.Remove(mapping);
_memberCache.Remove(mapping.Id);
_composition.MarkDirty();
@@ -96,25 +94,29 @@ private void OnMidiMessageReceived(MidiMessage message, MidiDeviceInfo deviceInf
if (parser.ProcessMessage(message))
return; // The message was part of a high-resolution sequence and has been handled.
}
-
+
// If not handled by the high-res parser, process as a standard 7-bit message.
foreach (var mapping in _mappings)
{
if (!mapping.IsResolved) continue;
var source = mapping.Source;
- var match = source.DeviceName == deviceInfo.Name && (source.Channel == 0 || source.Channel == message.Channel);
+ var match = source.DeviceName == deviceInfo.Name &&
+ (source.Channel == 0 || source.Channel == message.Channel);
if (!match) continue;
match = source.MessageType switch
{
- MidiMappingSourceType.ControlChange => message.Command == MidiCommand.ControlChange && message.ControllerNumber == source.MessageParameter,
- MidiMappingSourceType.NoteOn => message.Command == MidiCommand.NoteOn && message.NoteNumber == source.MessageParameter,
- MidiMappingSourceType.NoteOff => message.Command == MidiCommand.NoteOff && message.NoteNumber == source.MessageParameter,
+ MidiMappingSourceType.ControlChange => message.Command == MidiCommand.ControlChange &&
+ message.ControllerNumber == source.MessageParameter,
+ MidiMappingSourceType.NoteOn => message.Command == MidiCommand.NoteOn &&
+ message.NoteNumber == source.MessageParameter,
+ MidiMappingSourceType.NoteOff => message.Command == MidiCommand.NoteOff &&
+ message.NoteNumber == source.MessageParameter,
MidiMappingSourceType.PitchBend => message.Command == MidiCommand.PitchBend,
_ => false
};
-
+
if (!match) continue;
var inputValue = source.MessageType switch
@@ -125,13 +127,13 @@ private void OnMidiMessageReceived(MidiMessage message, MidiDeviceInfo deviceInf
MidiMappingSourceType.PitchBend => message.PitchBendValue,
_ => -1
};
-
+
if (inputValue == -1) continue;
-
+
ApplyMapping(mapping, inputValue);
}
}
-
+
private void ApplyHighResMapping(int channel, int parameter, int value, MidiDeviceInfo deviceInfo)
{
foreach (var mapping in _mappings)
@@ -153,7 +155,8 @@ private void ApplyHighResMapping(int channel, int parameter, int value, MidiDevi
private void ApplyMapping(MidiMapping mapping, int inputValue)
{
- if (!_composition.TryGetMappableObject(mapping.Target.TargetObjectId, out var targetObject) || targetObject == null)
+ if (!_composition.TryGetMappableObject(mapping.Target.TargetObjectId, out var targetObject) ||
+ targetObject == null)
{
mapping.IsResolved = false; // Mark as unresolved if target is missing
return;
@@ -161,12 +164,17 @@ private void ApplyMapping(MidiMapping mapping, int inputValue)
if (!_memberCache.TryGetValue(mapping.Id, out var memberInfo))
{
- memberInfo = targetObject.GetType().GetMember(mapping.Target.TargetMemberName, BindingFlags.Public | BindingFlags.Instance).FirstOrDefault();
+#pragma warning disable IL2072
+ memberInfo = targetObject.GetType()
+ .GetMember(mapping.Target.TargetMemberName, BindingFlags.Public | BindingFlags.Instance)
+ .FirstOrDefault();
+#pragma warning restore IL2072
if (memberInfo == null)
{
mapping.IsResolved = false; // Mark as unresolved if member is missing
return;
}
+
_memberCache[mapping.Id] = memberInfo;
}
@@ -190,23 +198,24 @@ private void ApplyMapping(MidiMapping mapping, int inputValue)
}
catch (Exception ex)
{
- Log.Error($"[MIDI Mapping] Error applying mapping for '{mapping.Target.TargetMemberName}': {ex.Message}");
+ Log.Error($"Error applying mapping for '{mapping.Target.TargetMemberName}': {ex.Message}");
_memberCache.Remove(mapping.Id);
}
}
- private static void HandleAbsolute(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue, ValueTransformer transformer)
+ private static void HandleAbsolute(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue,
+ ValueTransformer transformer)
{
if (memberInfo is not PropertyInfo propInfo) return;
-
+
var attribute = propInfo.GetCustomAttribute();
if (attribute == null) return;
-
+
var transformedValue = TransformValue(inputValue, transformer, attribute);
var convertedValue = Convert.ChangeType(transformedValue, propInfo.PropertyType);
propInfo.SetValue(targetObject, convertedValue);
}
-
+
private static void HandleToggle(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue, int threshold)
{
if (memberInfo is not PropertyInfo propInfo || propInfo.PropertyType != typeof(bool)) return;
@@ -216,7 +225,8 @@ private static void HandleToggle(MemberInfo memberInfo, IMidiMappable targetObje
propInfo.SetValue(targetObject, !currentValue);
}
- private static void HandleTrigger(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue, MidiMapping mapping)
+ private static void HandleTrigger(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue,
+ MidiMapping mapping)
{
if (memberInfo is not MethodInfo methodInfo) return;
if (inputValue < mapping.ActivationThreshold) return;
@@ -226,20 +236,21 @@ private static void HandleTrigger(MemberInfo memberInfo, IMidiMappable targetObj
if (methodParams.Length != mappingArgs.Count)
{
- Log.Warning($"[MIDI Mapping] Method '{methodInfo.Name}' signature does not match mapping argument count.");
+ Log.Warning($"Method '{methodInfo.Name}' signature does not match mapping argument count.");
return;
}
-
+
var invokeArgs = new object?[methodParams.Length];
for (var i = 0; i < methodParams.Length; i++)
{
var argDef = mappingArgs[i];
var paramInfo = methodParams[i];
-
+
var attribute = paramInfo.GetCustomAttribute();
if (attribute == null)
{
- Log.Warning($"[MIDI Mapping] Method parameter '{paramInfo.Name}' is missing [ControllableParameter] attribute.");
+ Log.Warning(
+ $"Method parameter '{paramInfo.Name}' is missing [ControllableParameter] attribute.");
return;
}
@@ -252,11 +263,12 @@ private static void HandleTrigger(MemberInfo memberInfo, IMidiMappable targetObj
// Convert to the method parameter's actual type
invokeArgs[i] = Convert.ChangeType(value, paramInfo.ParameterType);
}
-
+
methodInfo.Invoke(targetObject, invokeArgs);
}
-
- private static void HandleRelative(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue, ValueTransformer transformer)
+
+ private static void HandleRelative(MemberInfo memberInfo, IMidiMappable targetObject, int inputValue,
+ ValueTransformer transformer)
{
if (memberInfo is not PropertyInfo propInfo || !IsNumericType(propInfo.PropertyType)) return;
@@ -264,20 +276,21 @@ private static void HandleRelative(MemberInfo memberInfo, IMidiMappable targetOb
if (attribute == null) return;
var currentValue = Convert.ToSingle(propInfo.GetValue(targetObject));
-
+
// Standard relative encoder behavior: 64 is center, <64 is down, >64 is up
var delta = inputValue - 64;
-
+
// Scale the delta based on the target range to define the step size
var step = (float)((attribute.MaxValue - attribute.MinValue) / (transformer.SourceMax - transformer.SourceMin));
var newValue = currentValue + (delta * step);
newValue = Math.Clamp(newValue, (float)attribute.MinValue, (float)attribute.MaxValue);
-
+
var convertedValue = Convert.ChangeType(newValue, propInfo.PropertyType);
propInfo.SetValue(targetObject, convertedValue);
}
- private static float TransformValue(int inputValue, ValueTransformer transformer, ControllableParameterAttribute attribute)
+ private static float TransformValue(int inputValue, ValueTransformer transformer,
+ ControllableParameterAttribute attribute)
{
// 1. Normalize MIDI input to [0, 1] based on transformer's source range
var normalizedMidi = (inputValue - transformer.SourceMin) / (transformer.SourceMax - transformer.SourceMin);
@@ -290,7 +303,7 @@ private static float TransformValue(int inputValue, ValueTransformer transformer
MidiMappingCurveType.Logarithmic => MathF.Sqrt(normalizedMidi),
_ => normalizedMidi
};
-
+
// 3. Denormalize from the transformer's target range (e.g. 0-1) to create the final normalized value
var finalNormalized = transformer.TargetMin + normalizedMidi * (transformer.TargetMax - transformer.TargetMin);
finalNormalized = Math.Clamp(finalNormalized, 0.0f, 1.0f);
@@ -302,7 +315,7 @@ private static float TransformValue(int inputValue, ValueTransformer transformer
var maxLog = Math.Log(attribute.MaxValue);
return (float)Math.Exp(minLog + (maxLog - minLog) * finalNormalized);
}
-
+
// Linear scale
return (float)(attribute.MinValue + (attribute.MaxValue - attribute.MinValue) * finalNormalized);
}
@@ -319,6 +332,7 @@ public void Dispose()
{
device.OnMessageReceived -= OnMidiMessageReceived;
}
+
_subscribedDevices.Clear();
_highResCcParsers.Clear();
}
@@ -352,14 +366,28 @@ public bool ProcessMessage(MidiMessage message)
switch (message.ControllerNumber)
{
// NRPN
- case 99: state.NrpnMsb = message.ControllerValue; state.RpnMsb = -1; state.RpnLsb = -1; return true;
- case 98: state.NrpnLsb = message.ControllerValue; return true;
+ case 99:
+ state.NrpnMsb = message.ControllerValue;
+ state.RpnMsb = -1;
+ state.RpnLsb = -1;
+ return true;
+ case 98:
+ state.NrpnLsb = message.ControllerValue;
+ return true;
// RPN
- case 101: state.RpnMsb = message.ControllerValue; state.NrpnMsb = -1; state.NrpnLsb = -1; return true;
- case 100: state.RpnLsb = message.ControllerValue; return true;
-
+ case 101:
+ state.RpnMsb = message.ControllerValue;
+ state.NrpnMsb = -1;
+ state.NrpnLsb = -1;
+ return true;
+ case 100:
+ state.RpnLsb = message.ControllerValue;
+ return true;
+
// Data Entry
- case 6: state.DataMsb = message.ControllerValue; return true;
+ case 6:
+ state.DataMsb = message.ControllerValue;
+ return true;
case 38:
if (state.DataMsb != -1)
{
@@ -376,14 +404,18 @@ public bool ProcessMessage(MidiMessage message)
if (parameter != -1)
{
var value = (state.DataMsb << 7) | message.ControllerValue;
- var device = manager._subscribedDevices.FirstOrDefault(d => manager._highResCcParsers[d] == this);
- if(device != null) manager.ApplyHighResMapping(message.Channel, parameter, value, device.Info);
-
+ var device =
+ manager._subscribedDevices.FirstOrDefault(d => manager._highResCcParsers[d] == this);
+ if (device != null)
+ manager.ApplyHighResMapping(message.Channel, parameter, value, device.Info);
+
// Reset data entry state after use
- state.DataMsb = -1;
+ state.DataMsb = -1;
}
+
return true;
}
+
break;
}
diff --git a/Src/Editing/Persistence/CompositionProjectManager.cs b/Src/Editing/Persistence/CompositionProjectManager.cs
index 3eba963..2e31e92 100644
--- a/Src/Editing/Persistence/CompositionProjectManager.cs
+++ b/Src/Editing/Persistence/CompositionProjectManager.cs
@@ -1,16 +1,19 @@
-using System.Text.Json;
-using SoundFlow.Interfaces;
-using SoundFlow.Providers;
-using SoundFlow.Enums;
-using SoundFlow.Abstracts;
using System.Buffers;
using System.Reflection;
+using System.Text.Json;
using System.Text.Json.Nodes;
+using System.Text.Json.Serialization.Metadata;
+using SoundFlow.Abstracts;
using SoundFlow.Editing.Mapping;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
using SoundFlow.Metadata.Midi;
using SoundFlow.Midi.Abstracts;
using SoundFlow.Midi.Interfaces;
using SoundFlow.Midi.Routing.Nodes;
+using SoundFlow.Providers;
+using SoundFlow.Security;
+using SoundFlow.Security.Configuration;
using SoundFlow.Structs;
using SoundFlow.Utils;
@@ -23,13 +26,7 @@ public static class CompositionProjectManager
{
// The native file version this version of the library is designed to write and read.
// Used for compatibility checks during loading.
- private const string DefaultProjectFileVersion = "1.3.0";
-
- private static readonly JsonSerializerOptions SerializerOptions = new()
- {
- WriteIndented = true,
- PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
- };
+ private const string DefaultProjectFileVersion = "1.4.0";
#region Saving
@@ -40,17 +37,32 @@ public static class CompositionProjectManager
/// The composition to save.
/// The full path where the project file will be saved.
/// Configuration options for the save operation. If null, default options will be used.
+ ///
+ /// An optional JSON type resolver for user-defined types.
+ /// Required if your project contains custom Modifiers or Analyzers in a NativeAOT environment.
+ ///
public static async Task SaveProjectAsync(
AudioEngine engine,
Composition composition,
string projectFilePath,
- ProjectSaveOptions? options = null)
+ ProjectSaveOptions? options = null,
+ IJsonTypeInfoResolver? customTypeResolver = null)
{
ArgumentNullException.ThrowIfNull(composition);
ArgumentException.ThrowIfNullOrEmpty(projectFilePath);
// If no options are provided, create a default instance to avoid null checks everywhere.
options ??= new ProjectSaveOptions();
+
+ // Combine internal context with optional user context
+ var jsonOptions = new JsonSerializerOptions
+ {
+ WriteIndented = true,
+ PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
+ TypeInfoResolver = customTypeResolver != null
+ ? JsonTypeInfoResolver.Combine(SoundFlowJsonContext.Default, customTypeResolver)
+ : SoundFlowJsonContext.Default
+ };
var projectData = new ProjectData
{
@@ -62,9 +74,9 @@ public static async Task SaveProjectAsync(
TicksPerQuarterNote = composition.TicksPerQuarterNote,
TempoTrack = composition.TempoTrack.Select(m => new ProjectTempoMarker
{ Time = m.Time, BeatsPerMinute = m.BeatsPerMinute }).ToList(),
- Modifiers = SerializeEffects(composition.Modifiers),
- Analyzers = SerializeEffects(composition.Analyzers),
- MidiTargets = SerializeEffects(composition.MidiTargets.OfType().Select(n => n.Target)),
+ Modifiers = SerializeEffects(composition.Modifiers, jsonOptions),
+ Analyzers = SerializeEffects(composition.Analyzers, jsonOptions),
+ MidiTargets = SerializeEffects(composition.MidiTargets.OfType().Select(n => n.Target), jsonOptions),
MidiMappings = SerializeMappings(composition.MappingManager.Mappings)
};
@@ -89,8 +101,8 @@ public static async Task SaveProjectAsync(
IsSoloed = track.Settings.IsSoloed,
Volume = track.Settings.Volume,
Pan = track.Settings.Pan,
- Modifiers = SerializeEffects(track.Settings.Modifiers),
- Analyzers = SerializeEffects(track.Settings.Analyzers)
+ Modifiers = SerializeEffects(track.Settings.Modifiers, jsonOptions),
+ Analyzers = SerializeEffects(track.Settings.Analyzers, jsonOptions)
}
};
@@ -138,8 +150,8 @@ public static async Task SaveProjectAsync(
FadeOutCurve = segment.Settings.FadeOutCurve,
TimeStretchFactor = segment.Settings.TimeStretchFactor,
TargetStretchDuration = segment.Settings.TargetStretchDuration,
- Modifiers = SerializeEffects(segment.Settings.Modifiers),
- Analyzers = SerializeEffects(segment.Settings.Analyzers)
+ Modifiers = SerializeEffects(segment.Settings.Modifiers, jsonOptions),
+ Analyzers = SerializeEffects(segment.Settings.Analyzers, jsonOptions)
}
});
}
@@ -158,7 +170,7 @@ public static async Task SaveProjectAsync(
IsEnabled = midiTrack.Settings.IsEnabled,
IsMuted = midiTrack.Settings.IsMuted,
IsSoloed = midiTrack.Settings.IsSoloed,
- MidiModifiers = SerializeEffects(midiTrack.Settings.MidiModifiers)
+ MidiModifiers = SerializeEffects(midiTrack.Settings.MidiModifiers, jsonOptions)
}
};
@@ -183,12 +195,56 @@ public static async Task SaveProjectAsync(
projectData.MidiTracks.Add(projectMidiTrack);
}
- var json = JsonSerializer.Serialize(projectData, SerializerOptions);
+ var typeInfo = (JsonTypeInfo)jsonOptions.GetTypeInfo(typeof(ProjectData));
+ var json = JsonSerializer.Serialize(projectData, typeInfo);
await File.WriteAllTextAsync(projectFilePath, json);
+ // Signing Integration
+ if (options.SigningConfiguration != null)
+ {
+ var sigResult = await FileAuthenticator.SignFileAsync(projectFilePath, options.SigningConfiguration);
+ if (sigResult.IsSuccess)
+ {
+ var sigPath = projectFilePath + ".sig";
+ await File.WriteAllTextAsync(sigPath, sigResult.Value);
+ }
+ else
+ {
+ Log.Warning($"Failed to sign project file: {sigResult.Error?.Message}");
+ }
+ }
+
composition.ClearDirtyFlag();
}
+ ///
+ /// Verifies the integrity and authenticity of a project file using a detached digital signature.
+ /// This method checks if the project file has been modified since it was signed.
+ ///
+ /// The path to the project file (.sfproj).
+ /// The path to the signature file (.sig). If null, defaults to projectFilePath + ".sig".
+ /// The signature configuration containing the Public Key.
+ /// A result containing true if the project is valid and authentic; otherwise, false or an error.
+ public static async Task> VerifyProjectAsync(string projectFilePath, string? signatureFilePath, SignatureConfiguration config)
+ {
+ if (!File.Exists(projectFilePath))
+ return new NotFoundError("File", $"Project file not found: {projectFilePath}");
+
+ var sigPath = signatureFilePath ?? projectFilePath + ".sig";
+ if (!File.Exists(sigPath))
+ return new NotFoundError("File", $"Signature file not found: {sigPath}");
+
+ try
+ {
+ var signature = await File.ReadAllTextAsync(sigPath);
+ return await FileAuthenticator.VerifyFileAsync(projectFilePath, signature, config);
+ }
+ catch (Exception ex)
+ {
+ return new Error("An error occurred while verifying the project file.", ex);
+ }
+ }
+
private static async Task CreateSourceReferenceAsync(
ISoundDataProvider provider,
AudioEngine engine,
@@ -372,15 +428,33 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
/// The audio engine instance for context.
/// The audio format of the composition. Cannot be null.
/// The full path of the project file to load.
+ ///
+ /// An optional JSON type resolver for user-defined types.
+ /// Required if your project contains custom Modifiers or Analyzers in a NativeAOT environment.
+ ///
/// A tuple containing the loaded Composition and a list of missing/unresolved source references.
public static async Task<(Composition Composition, List UnresolvedSources)>
- LoadProjectAsync(AudioEngine engine, AudioFormat format, string projectFilePath)
+ LoadProjectAsync(
+ AudioEngine engine,
+ AudioFormat format,
+ string projectFilePath,
+ IJsonTypeInfoResolver? customTypeResolver = null)
{
if (!File.Exists(projectFilePath))
throw new FileNotFoundException("Project file not found.", projectFilePath);
+ // Combine internal context with optional user context
+ var jsonOptions = new JsonSerializerOptions
+ {
+ PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
+ TypeInfoResolver = customTypeResolver != null
+ ? JsonTypeInfoResolver.Combine(SoundFlowJsonContext.Default, customTypeResolver)
+ : SoundFlowJsonContext.Default
+ };
+
var json = await File.ReadAllTextAsync(projectFilePath);
- var projectData = JsonSerializer.Deserialize(json, SerializerOptions)
+ var typeInfo = (JsonTypeInfo)jsonOptions.GetTypeInfo(typeof(ProjectData));
+ var projectData = JsonSerializer.Deserialize(json, typeInfo)
?? throw new JsonException("Failed to deserialize project data.");
if (Version.TryParse(projectData.ProjectFileVersion, out var fileVersion) &&
@@ -400,10 +474,10 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
};
composition.TempoTrack.Clear();
composition.TempoTrack.AddRange(projectData.TempoTrack.Select(m => new TempoMarker(m.Time, m.BeatsPerMinute)));
- composition.Modifiers.AddRange(DeserializeEffects(format, projectData.Modifiers, composition));
- composition.Analyzers.AddRange(DeserializeEffects(format, projectData.Analyzers, composition));
+ composition.Modifiers.AddRange(DeserializeEffects(format, projectData.Modifiers, composition, jsonOptions));
+ composition.Analyzers.AddRange(DeserializeEffects(format, projectData.Analyzers, composition, jsonOptions));
- var deserializedMidiControllables = DeserializeEffects(format, projectData.MidiTargets, composition);
+ var deserializedMidiControllables = DeserializeEffects(format, projectData.MidiTargets, composition, jsonOptions);
composition.MidiTargets.AddRange(deserializedMidiControllables.Select(c => new MidiTargetNode(c)));
@@ -466,9 +540,9 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
Pan = projectTrack.Settings.Pan,
};
trackSettings.Modifiers.AddRange(
- DeserializeEffects(format, projectTrack.Settings.Modifiers, composition));
+ DeserializeEffects(format, projectTrack.Settings.Modifiers, composition, jsonOptions));
trackSettings.Analyzers.AddRange(
- DeserializeEffects(format, projectTrack.Settings.Analyzers, composition));
+ DeserializeEffects(format, projectTrack.Settings.Analyzers, composition, jsonOptions));
var track = new Track(projectTrack.Name, trackSettings)
{
@@ -505,9 +579,9 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
FadeOutCurve = projectSegment.Settings.FadeOutCurve,
};
segmentSettings.Modifiers.AddRange(
- DeserializeEffects(format, projectSegment.Settings.Modifiers, composition));
+ DeserializeEffects(format, projectSegment.Settings.Modifiers, composition, jsonOptions));
segmentSettings.Analyzers.AddRange(
- DeserializeEffects(format, projectSegment.Settings.Analyzers, composition));
+ DeserializeEffects(format, projectSegment.Settings.Analyzers, composition, jsonOptions));
var segment = new AudioSegment(
format,
@@ -542,7 +616,7 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
IsSoloed = projectMidiTrack.Settings.IsSoloed
};
trackSettings.MidiModifiers.AddRange(DeserializeEffects(default,
- projectMidiTrack.Settings.MidiModifiers, composition));
+ projectMidiTrack.Settings.MidiModifiers, composition, jsonOptions));
var midiTrack = new MidiTrack(projectMidiTrack.Name, settings: trackSettings)
{
@@ -621,7 +695,7 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
}
catch (Exception ex)
{
- Log.Error($"[CompositionProjectManager] Error decoding embedded data for source ID {sourceRef.Id}: {ex.Message}");
+ Log.Error($"Error decoding embedded data for source ID {sourceRef.Id}: {ex.Message}");
return null;
}
}
@@ -690,7 +764,7 @@ private static byte[] CreateEmptyWavHeader(int sampleRate, int channels, SampleF
}
catch (Exception ex)
{
- Log.Error($"[CompositionProjectManager] Error loading MIDI data for source ID {sourceRef.Id}: {ex.Message}");
+ Log.Error($"Error loading MIDI data for source ID {sourceRef.Id}: {ex.Message}");
return null;
}
}
@@ -750,7 +824,7 @@ public static bool RelinkMissingMedia(
#endregion
// Helper method to serialize modifiers/analyzers
- private static List SerializeEffects(IEnumerable effects) where T : class
+ private static List SerializeEffects(IEnumerable effects, JsonSerializerOptions jsonOptions) where T : class
{
var effectDataList = new List();
foreach (var effect in effects)
@@ -777,19 +851,21 @@ private static List SerializeEffects(IEnumerable effec
{
var value = prop.GetValue(effect);
if (value != null)
- parameters[prop.Name] =
- JsonValue.Create(JsonSerializer.SerializeToElement(value, SerializerOptions));
+ {
+ var propTypeInfo = jsonOptions.GetTypeInfo(prop.PropertyType);
+ parameters[prop.Name] = JsonValue.Create(JsonSerializer.SerializeToElement(value, propTypeInfo));
+ }
}
catch (Exception ex)
{
Log.Warning(
- $"[CompositionProjectManager] Could not serialize property '{prop.Name}' for effect type '{effectType.Name}': {ex.Message}");
+ $"Could not serialize property '{prop.Name}' for effect type '{effectType.Name}': {ex.Message}");
}
}
effectDataList.Add(new ProjectEffectData
{
- TypeName = effectType.AssemblyQualifiedName ?? effectType.FullName ?? string.Empty,
+ TypeName = effectType.FullName ?? string.Empty,
IsEnabled = effect switch
{
SoundModifier sm => sm.Enabled,
@@ -797,7 +873,7 @@ private static List SerializeEffects(IEnumerable effec
MidiModifier mm => mm.IsEnabled,
_ => false
},
- Parameters = JsonDocument.Parse(parameters.ToJsonString(SerializerOptions))
+ Parameters = JsonDocument.Parse(parameters.ToJsonString(jsonOptions))
});
}
@@ -807,7 +883,7 @@ private static List SerializeEffects(IEnumerable effec
// Helper method to deserialize modifiers/analyzers
private static List DeserializeEffects(AudioFormat format, List effectDataList,
- Composition composition) where T : class
+ Composition composition, JsonSerializerOptions jsonOptions) where T : class
{
var targetEffectList = new List();
foreach (var effectData in effectDataList)
@@ -818,10 +894,10 @@ private static List DeserializeEffects(AudioFormat format, List DeserializeEffects(AudioFormat format, List DeserializeEffects(AudioFormat format, List
/// Provides a set of configurable options for saving a composition project.
@@ -8,9 +10,9 @@ public class ProjectSaveOptions
///
/// The project file version to write into the saved file.
/// It is recommended to use the library's default version unless you have a specific need for version management.
- /// The current default is "1.3.0".
+ /// The current default is "1.4.0".
///
- public string ProjectFileVersion { get; set; } = "1.3.0";
+ public string ProjectFileVersion { get; set; } = "1.4.0";
///
/// The name of the subfolder where consolidated media files will be stored, relative to the project file.
@@ -38,4 +40,11 @@ public class ProjectSaveOptions
/// Default is true.
///
public bool EmbedSmallMedia { get; set; } = true;
+
+ ///
+ /// Gets or sets the configuration for digitally signing the project file.
+ /// If set, a detached signature file (.sig) will be generated alongside the project file.
+ /// This ensures the integrity and authenticity of the project structure and settings.
+ ///
+ public SignatureConfiguration? SigningConfiguration { get; set; }
}
\ No newline at end of file
diff --git a/Src/Editing/Persistence/SoundFlowJsonContext.cs b/Src/Editing/Persistence/SoundFlowJsonContext.cs
new file mode 100644
index 0000000..68fd437
--- /dev/null
+++ b/Src/Editing/Persistence/SoundFlowJsonContext.cs
@@ -0,0 +1,133 @@
+using System.Text.Json.Serialization;
+using SoundFlow.Components;
+using SoundFlow.Editing.Mapping;
+using SoundFlow.Enums;
+using SoundFlow.Midi.Modifier;
+using SoundFlow.Modifiers;
+using SoundFlow.Security.Analyzers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Models;
+using SoundFlow.Security.Modifiers;
+using SoundFlow.Security.Payloads;
+using SoundFlow.Visualization;
+
+namespace SoundFlow.Editing.Persistence;
+
+///
+/// Source-generated JSON context for SoundFlow project persistence.
+/// Includes all DTOs, Primitives, and Built-in Effect types to ensure AOT compatibility.
+///
+[JsonSourceGenerationOptions(
+ WriteIndented = true,
+ PropertyNamingPolicy = JsonKnownNamingPolicy.CamelCase,
+ NumberHandling = JsonNumberHandling.AllowReadingFromString
+)]
+
+// Core Project DTOs
+[JsonSerializable(typeof(ProjectData))]
+[JsonSerializable(typeof(ProjectTrack))]
+[JsonSerializable(typeof(ProjectMidiTrack))]
+[JsonSerializable(typeof(ProjectSegment))]
+[JsonSerializable(typeof(ProjectMidiSegment))]
+[JsonSerializable(typeof(ProjectTrackSettings))]
+[JsonSerializable(typeof(ProjectAudioSegmentSettings))]
+[JsonSerializable(typeof(ProjectSourceReference))]
+[JsonSerializable(typeof(ProjectTempoMarker))]
+[JsonSerializable(typeof(ProjectEffectData))]
+[JsonSerializable(typeof(ProjectMidiMapping))]
+
+// Mapping & Routing Types
+[JsonSerializable(typeof(ValueTransformer))]
+[JsonSerializable(typeof(MidiInputSource))]
+[JsonSerializable(typeof(MidiMappingTarget))]
+[JsonSerializable(typeof(MethodArgument))]
+[JsonSerializable(typeof(MidiMappingSourceType))]
+[JsonSerializable(typeof(MidiMappingBehavior))]
+[JsonSerializable(typeof(MidiMappingTargetType))]
+[JsonSerializable(typeof(MidiMappingArgumentSource))]
+[JsonSerializable(typeof(MidiMappingCurveType))]
+
+// Primitives & Common Enums
+[JsonSerializable(typeof(sbyte))]
+[JsonSerializable(typeof(short))]
+[JsonSerializable(typeof(int))]
+[JsonSerializable(typeof(long))]
+[JsonSerializable(typeof(byte))]
+[JsonSerializable(typeof(ushort))]
+[JsonSerializable(typeof(uint))]
+[JsonSerializable(typeof(ulong))]
+[JsonSerializable(typeof(float))]
+[JsonSerializable(typeof(double))]
+[JsonSerializable(typeof(decimal))]
+[JsonSerializable(typeof(char))]
+[JsonSerializable(typeof(bool))]
+[JsonSerializable(typeof(string))]
+[JsonSerializable(typeof(nint))]
+[JsonSerializable(typeof(nuint))]
+[JsonSerializable(typeof(Guid))]
+[JsonSerializable(typeof(DateTime))]
+[JsonSerializable(typeof(DateTimeOffset))]
+[JsonSerializable(typeof(TimeSpan))]
+[JsonSerializable(typeof(FadeCurveType))]
+[JsonSerializable(typeof(LoopSettings))]
+[JsonSerializable(typeof(FadeCurveType))]
+[JsonSerializable(typeof(LoopSettings))]
+
+// Sound Modifiers
+[JsonSerializable(typeof(AlgorithmicReverbModifier))]
+[JsonSerializable(typeof(BassBoosterModifier))]
+[JsonSerializable(typeof(ChorusModifier))]
+[JsonSerializable(typeof(CompressorModifier))]
+[JsonSerializable(typeof(DelayModifier))]
+[JsonSerializable(typeof(Filter))]
+[JsonSerializable(typeof(FrequencyBandModifier))]
+[JsonSerializable(typeof(HighPassModifier))]
+[JsonSerializable(typeof(LowPassModifier))]
+[JsonSerializable(typeof(MultiChannelChorusModifier))]
+[JsonSerializable(typeof(ParametricEqualizer))]
+[JsonSerializable(typeof(ResamplerModifier))]
+[JsonSerializable(typeof(TrebleBoosterModifier))]
+[JsonSerializable(typeof(VocalExtractorModifier))]
+
+// Security Modifiers
+[JsonSerializable(typeof(OwnershipWatermarkEmbedModifier))]
+[JsonSerializable(typeof(IntegrityWatermarkEmbedModifier))]
+[JsonSerializable(typeof(StreamEncryptionModifier))]
+
+// Modifier Specific Sub-Types
+[JsonSerializable(typeof(EqualizerBand))]
+[JsonSerializable(typeof(List))]
+[JsonSerializable(typeof(FilterType))]
+
+// Audio Analyzers
+[JsonSerializable(typeof(LevelMeterAnalyzer))]
+[JsonSerializable(typeof(SpectrumAnalyzer))]
+[JsonSerializable(typeof(VoiceActivityDetector))]
+[JsonSerializable(typeof(ContentFingerprintAnalyzer))]
+
+// Security Analyzers
+[JsonSerializable(typeof(OwnershipWatermarkExtractAnalyzer))]
+[JsonSerializable(typeof(IntegrityWatermarkVerifyAnalyzer))]
+
+// Security Types
+[JsonSerializable(typeof(FingerprintConfiguration))]
+[JsonSerializable(typeof(AudioFingerprint))]
+[JsonSerializable(typeof(FingerprintHash))]
+[JsonSerializable(typeof(List))]
+[JsonSerializable(typeof(WatermarkConfiguration))]
+[JsonSerializable(typeof(TextPayload))]
+[JsonSerializable(typeof(EncryptionConfiguration))]
+[JsonSerializable(typeof(SignatureConfiguration))]
+
+// MIDI Modifiers
+[JsonSerializable(typeof(ArpeggiatorModifier))]
+[JsonSerializable(typeof(ChannelFilterModifier))]
+[JsonSerializable(typeof(HarmonizerModifier))]
+[JsonSerializable(typeof(RandomizerModifier))]
+[JsonSerializable(typeof(TransposeModifier))]
+[JsonSerializable(typeof(VelocityModifier))]
+
+// MIDI Modifier Sub-Types
+[JsonSerializable(typeof(ArpMode))]
+[JsonSerializable(typeof(int[]))] // For Harmonizer intervals
+internal partial class SoundFlowJsonContext : JsonSerializerContext;
\ No newline at end of file
diff --git a/Src/Editing/Track.cs b/Src/Editing/Track.cs
index b31c682..6ea2357 100644
--- a/Src/Editing/Track.cs
+++ b/Src/Editing/Track.cs
@@ -1,4 +1,5 @@
using System.Buffers;
+using System.Diagnostics.CodeAnalysis;
using SoundFlow.Interfaces;
namespace SoundFlow.Editing;
@@ -7,6 +8,7 @@ namespace SoundFlow.Editing;
/// Represents a single audio track within a composition, containing a collection of audio segments
/// and applying track-level settings like volume, pan, mute, and solo.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public class Track : IMidiMappable
{
private string _name;
diff --git a/Src/Editing/TrackSettings.cs b/Src/Editing/TrackSettings.cs
index ab0c428..5573073 100644
--- a/Src/Editing/TrackSettings.cs
+++ b/Src/Editing/TrackSettings.cs
@@ -1,3 +1,4 @@
+using System.Diagnostics.CodeAnalysis;
using SoundFlow.Abstracts;
using SoundFlow.Interfaces;
using SoundFlow.Midi.Abstracts;
@@ -8,6 +9,7 @@ namespace SoundFlow.Editing;
/// Represents the configurable settings for a or ,
/// controlling its overall playback characteristics such as volume, pan, and mute/solo states.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public class TrackSettings : IMidiMappable
{
private float _volume = 1.0f;
diff --git a/Src/Enums/FilterType.cs b/Src/Enums/FilterType.cs
new file mode 100644
index 0000000..65828fa
--- /dev/null
+++ b/Src/Enums/FilterType.cs
@@ -0,0 +1,47 @@
+namespace SoundFlow.Enums;
+
+///
+/// Types of filters supported by the Parametric Equalizer.
+///
+public enum FilterType
+{
+ ///
+ /// A peaking equalizer boosts or cuts a specific frequency range.
+ ///
+ Peaking,
+
+ ///
+ /// A low-shelf equalizer boosts or cuts all frequencies below a specific frequency.
+ ///
+ LowShelf,
+
+ ///
+ /// A high-shelf equalizer boosts or cuts all frequencies above a specific frequency.
+ ///
+ HighShelf,
+
+ ///
+ /// A low-pass filter removes high frequencies from the audio signal.
+ ///
+ LowPass,
+
+ ///
+ /// A high-pass filter removes low frequencies from the audio signal.
+ ///
+ HighPass,
+
+ ///
+ /// A band-pass filter removes all frequencies outside a specific frequency range.
+ ///
+ BandPass,
+
+ ///
+ /// A notch filter removes a specific frequency range from the audio signal.
+ ///
+ Notch,
+
+ ///
+ /// An all-pass filter changes the phase of the audio signal without affecting its frequency response.
+ ///
+ AllPass
+}
\ No newline at end of file
diff --git a/Src/Interfaces/IVisualizer.cs b/Src/Interfaces/IVisualizer.cs
index b20881b..71aeabf 100644
--- a/Src/Interfaces/IVisualizer.cs
+++ b/Src/Interfaces/IVisualizer.cs
@@ -14,7 +14,7 @@ public interface IVisualizer : IDisposable
/// Processes the audio data. This method is called by an analyzer component.
///
/// The audio data to process.
- void ProcessOnAudioData(Span audioData);
+ void ProcessOnAudioData(ReadOnlySpan audioData);
///
/// Updates the visualization. This method should be called periodically to render the visualization.
diff --git a/Src/Metadata/Midi/MidiFileParser.cs b/Src/Metadata/Midi/MidiFileParser.cs
index baef081..1b61267 100644
--- a/Src/Metadata/Midi/MidiFileParser.cs
+++ b/Src/Metadata/Midi/MidiFileParser.cs
@@ -62,7 +62,7 @@ public static MidiFile Parse(Stream stream)
else
{
// This is an unknown or non-track chunk. Read its length and skip it.
- Log.Error($"[MIDI Parser] Skipping unknown chunk type '{chunkId}' of length {chunkLength}.");
+ Log.Error($"Skipping unknown chunk type '{chunkId}' of length {chunkLength}.");
reader.BaseStream.Seek(chunkLength, SeekOrigin.Current);
}
}
diff --git a/Src/Metadata/Models/SoundFormatInfo.cs b/Src/Metadata/Models/SoundFormatInfo.cs
index 9e83ced..75796c1 100644
--- a/Src/Metadata/Models/SoundFormatInfo.cs
+++ b/Src/Metadata/Models/SoundFormatInfo.cs
@@ -29,7 +29,7 @@ public enum BitrateMode
///
/// Holds the format, tag, and cue information for an audio file.
///
-public class SoundFormatInfo
+public record SoundFormatInfo
{
///
/// The common name of the audio format (e.g., "WAV", "MP3").
diff --git a/Src/Metadata/Models/SoundTags.cs b/Src/Metadata/Models/SoundTags.cs
index 9032966..d073065 100644
--- a/Src/Metadata/Models/SoundTags.cs
+++ b/Src/Metadata/Models/SoundTags.cs
@@ -8,42 +8,42 @@ public sealed class SoundTags
///
/// Gets or sets the title of the audio file.
///
- public string Title { get; internal set; } = string.Empty;
+ public string Title { get; set; } = string.Empty;
///
/// Gets or sets the artist of the audio file.
///
- public string Artist { get; internal set; } = string.Empty;
+ public string Artist { get; set; } = string.Empty;
///
/// Gets or sets the album of the audio file.
///
- public string Album { get; internal set; } = string.Empty;
+ public string Album { get; set; } = string.Empty;
///
/// Gets or sets the genre of the audio file.
///
- public string Genre { get; internal set; } = string.Empty;
+ public string Genre { get; set; } = string.Empty;
///
/// Gets or sets the year of the audio file, if available.
///
- public uint? Year { get; internal set; }
+ public uint? Year { get; set; }
///
/// Gets or sets the track number of the audio file, if available.
///
- public uint? TrackNumber { get; internal set; }
+ public uint? TrackNumber { get; set; }
///
/// Gets or sets the embedded album art of the audio file, if available.
///
- public byte[]? AlbumArt { get; internal set; }
+ public byte[]? AlbumArt { get; set; }
///
/// Gets the embedded, unsynchronized lyrics. Null if not present.
///
- public string? Lyrics { get; internal set; }
+ public string? Lyrics { get; set; }
///
diff --git a/Src/Metadata/Readers/Format/Mp3Reader.cs b/Src/Metadata/Readers/Format/Mp3Reader.cs
index c85adcf..7e7e3e5 100644
--- a/Src/Metadata/Readers/Format/Mp3Reader.cs
+++ b/Src/Metadata/Readers/Format/Mp3Reader.cs
@@ -54,19 +54,20 @@ public override async Task> ReadAsync(Stream stream, Rea
FormatIdentifier = "mp3",
IsLossless = false
};
- long audioDataStart = 0;
+
var streamLength = stream.Length;
- // Read ID3v2 tag if requested
- if (options.ReadTags)
+ // Check for an ID3v2 tag to determine the correct audio data starting offset.
+ var (isTagPresent, tagSize) = await Id3V2Reader.TryGetHeaderInfoAsync(stream);
+ var audioDataStart = isTagPresent ? tagSize : 0;
+
+ // If the user wants to read tags and a tag is present, fully parse it.
+ if (options.ReadTags && isTagPresent)
{
var id3Reader = new Id3V2Reader();
- var id3Result = await id3Reader.ReadAsync(stream, options);
+ var id3Result = await id3Reader.ReadAsync(stream, options); // This will parse the full tag.
if (id3Result.IsFailure) return Result.Fail(id3Result.Error!);
- var (tag, tagSize) = id3Result.Value;
-
- info.Tags = tag;
- audioDataStart = tagSize;
+ info.Tags = id3Result.Value.Item1;
}
// If no ID3v2 tags were found, try to read ID3v1 tags from the end of the file.
@@ -89,19 +90,17 @@ public override async Task> ReadAsync(Stream stream, Rea
var parseResult = ParseFrameHeader(headerBuffer, info);
if(parseResult.IsFailure) return Result.Fail(parseResult.Error!);
- if (options.DurationAccuracy == DurationAccuracy.AccurateScan)
+ // Try to read VBR header regardless of accuracy setting.
+ try
{
- try
- {
- await TryReadVbrHeaderAsync(stream, headerBuffer, info);
- }
- catch (EndOfStreamException ex)
- {
- return new CorruptFrameError("MP3 Xing/VBRI", "File is truncated or VBR header is malformed.", ex);
- }
+ await TryReadVbrHeaderAsync(stream, headerBuffer, info);
+ }
+ catch (EndOfStreamException ex)
+ {
+ return new CorruptFrameError("MP3 Xing/VBRI", "File is truncated or VBR header is malformed.", ex);
}
- // Estimate duration if not accurately determined
+ // Fallback estimation if no VBR header was found
if (info.Duration == TimeSpan.Zero && info.Bitrate > 0)
{
var audioDataLength = streamLength - audioDataStart;
diff --git a/Src/Metadata/Readers/Tags/Id3V2Reader.cs b/Src/Metadata/Readers/Tags/Id3V2Reader.cs
index b30ec24..60b7858 100644
--- a/Src/Metadata/Readers/Tags/Id3V2Reader.cs
+++ b/Src/Metadata/Readers/Tags/Id3V2Reader.cs
@@ -6,6 +6,39 @@ namespace SoundFlow.Metadata.Readers.Tags;
internal class Id3V2Reader
{
+ ///
+ /// A non-destructive utility method to quickly check for an ID3v2 tag and get its total size.
+ ///
+ /// The stream to check. Must be seekable.
+ /// A tuple containing a boolean indicating if a tag was found, and the total size of the tag in bytes.
+ public static async Task<(bool Found, long Size)> TryGetHeaderInfoAsync(Stream stream)
+ {
+ if (stream.Length < 10) return (false, 0);
+
+ var originalPosition = stream.Position;
+ try
+ {
+ stream.Position = 0;
+ var header = new byte[10];
+ var bytesRead = await stream.ReadAsync(header.AsMemory(0, 10));
+
+ if (bytesRead < 10 || Encoding.ASCII.GetString(header, 0, 3) != "ID3")
+ {
+ return (false, 0);
+ }
+
+ // Synchsafe integer conversion for tag body size
+ var tagBodySize = (header[6] << 21) | (header[7] << 14) | (header[8] << 7) | header[9];
+ var totalTagSize = 10 + tagBodySize;
+
+ return (true, totalTagSize);
+ }
+ finally
+ {
+ stream.Position = originalPosition;
+ }
+ }
+
public async Task> ReadAsync(Stream stream, ReadOptions options)
{
var startPosition = stream.Position;
@@ -18,6 +51,7 @@ internal class Id3V2Reader
return Result<(SoundTags?, long)>.Ok((null, 0));
}
+ var majorVersion = header[3];
// Synchsafe integer conversion
var tagSize = (header[6] << 21) | (header[7] << 14) | (header[8] << 7) | header[9];
long tagEndPosition = 10 + tagSize;
@@ -33,10 +67,21 @@ internal class Id3V2Reader
var frameId = Encoding.ASCII.GetString(frameHeader, 0, 4);
if (frameId.All(c => c == '\0')) break; // Padding
- var frameSize = (frameHeader[4] << 24) | (frameHeader[5] << 16) | (frameHeader[6] << 8) |
- frameHeader[7];
+ int frameSize;
+
+ // ID3v2.4 uses Synchsafe integers for frame sizes.
+ // ID3v2.3 uses standard integers.
+ if (majorVersion == 4)
+ {
+ frameSize = (frameHeader[4] << 21) | (frameHeader[5] << 14) | (frameHeader[6] << 7) | frameHeader[7];
+ }
+ else
+ {
+ frameSize = (frameHeader[4] << 24) | (frameHeader[5] << 16) | (frameHeader[6] << 8) | frameHeader[7];
+ }
+
if (frameSize <= 0 || stream.Position + frameSize > tagEndPosition)
- return new CorruptFrameError("ID3v2", "Invalid frame size or frame exceeds tag boundaries.");
+ return new CorruptFrameError($"ID3v2.{majorVersion}", "Invalid frame size or frame exceeds tag boundaries.");
var nextFramePos = stream.Position + frameSize;
@@ -152,11 +197,7 @@ private string GetString(byte[] data)
var length = end - start;
if (length <= 0) return string.Empty;
-
- // **[REFINED FIX]**
- // For multi-byte encodings like UTF-16, a malformed tag can have a data length
- // that is not a multiple of the character size. To prevent a decoding error
- // (which produces '�'), we must truncate the length to the last valid character boundary.
+
if (terminatorSize > 1 && length % terminatorSize != 0)
{
length -= length % terminatorSize; // e.g., for UTF-16, if length is 13, it becomes 12.
diff --git a/Src/Metadata/SoundMetadataWriter.cs b/Src/Metadata/SoundMetadataWriter.cs
index 4df9016..9834ebc 100644
--- a/Src/Metadata/SoundMetadataWriter.cs
+++ b/Src/Metadata/SoundMetadataWriter.cs
@@ -52,7 +52,7 @@ public static async Task WriteTagsAsync(string filePath, SoundTags? tags
catch (Exception ex)
{
// Catch unexpected system-level exceptions during file operations.
- return new IOError("An unexpected error occurred during the file write operation.", ex);
+ return new IoError("File write operation.", ex);
}
finally
{
@@ -96,7 +96,7 @@ public static async Task RemoveTagsAsync(string filePath)
}
catch(Exception ex)
{
- return new IOError("An unexpected error occurred during the file write operation.", ex);
+ return new IoError("File write operation.", ex);
}
finally
{
@@ -149,7 +149,7 @@ private static Result GetWriter(string filePath)
}
catch (IOException ex)
{
- return new IOError("An I/O error occurred while identifying the file format.", ex);
+ return new IoError("Identifying the file format.", ex);
}
}
}
\ No newline at end of file
diff --git a/Src/Midi/Abstracts/MidiModifier.cs b/Src/Midi/Abstracts/MidiModifier.cs
index dc4c210..db28fe2 100644
--- a/Src/Midi/Abstracts/MidiModifier.cs
+++ b/Src/Midi/Abstracts/MidiModifier.cs
@@ -1,6 +1,6 @@
-using SoundFlow.Interfaces;
+using System.Diagnostics.CodeAnalysis;
+using SoundFlow.Interfaces;
using SoundFlow.Midi.Structs;
-using SoundFlow.Structs;
namespace SoundFlow.Midi.Abstracts;
@@ -8,6 +8,7 @@ namespace SoundFlow.Midi.Abstracts;
/// Base class for real-time MIDI processing components (MIDI effects).
/// Implementations of this class can filter, transform, or generate MIDI messages.
///
+[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicMethods)]
public abstract class MidiModifier : IMidiMappable
{
///
diff --git a/Src/Midi/Routing/MidiRoute.cs b/Src/Midi/Routing/MidiRoute.cs
index cd74e6c..f3ddc39 100644
--- a/Src/Midi/Routing/MidiRoute.cs
+++ b/Src/Midi/Routing/MidiRoute.cs
@@ -164,7 +164,7 @@ private void ProcessRoute(MidiMessage message)
if (result.IsFailure)
{
IsFaulted = true;
- Log.Error($"[MIDI Route Fault] Route from '{Source.Name}' to '{Destination.Name}' failed: {result.Error?.Message}");
+ Log.Error($"Route from '{Source.Name}' to '{Destination.Name}' failed: {result.Error?.Message}");
OnError?.Invoke(this, result.Error);
}
}
@@ -181,7 +181,7 @@ private void ProcessSysExRoute(byte[] data)
if (result.IsFailure)
{
IsFaulted = true;
- Log.Error($"[MIDI Route Fault] SysEx route from '{Source.Name}' to '{Destination.Name}' failed: {result.Error?.Message}");
+ Log.Error($"SysEx route from '{Source.Name}' to '{Destination.Name}' failed: {result.Error?.Message}");
OnError?.Invoke(this, result.Error);
}
}
diff --git a/Src/Modifiers/Filter.cs b/Src/Modifiers/Filter.cs
index a6d2f53..9cef9b7 100644
--- a/Src/Modifiers/Filter.cs
+++ b/Src/Modifiers/Filter.cs
@@ -4,6 +4,7 @@
using SoundFlow.Midi.Enums;
using SoundFlow.Midi.Structs;
using SoundFlow.Structs;
+using SoundFlow.Utils;
namespace SoundFlow.Modifiers;
@@ -12,36 +13,32 @@ namespace SoundFlow.Modifiers;
///
public class Filter : SoundModifier
{
+ // One filter instance per channel to maintain independent state
+ private readonly BiquadFilter[] _filters;
+ private readonly AudioFormat _format;
+
+ // Parameters
+ private FilterType _type = FilterType.LowPass;
+ private float _cutoffFrequency = 1000f;
+ private float _resonance = 0.7f;
+
///
- /// Defines the different types of filters available.
+ /// Initializes a new instance of the class with default settings.
///
- public enum FilterType
+ /// The audio format containing channels and sample rate and sample format
+ public Filter(AudioFormat format)
{
- ///
- /// Allows frequencies below the cutoff frequency to pass, attenuating frequencies above it.
- ///
- LowPass,
- ///
- /// Allows frequencies above the cutoff frequency to pass, attenuating frequencies below it.
- ///
- HighPass,
- ///
- /// Allows frequencies around the cutoff frequency to pass, attenuating frequencies further away.
- ///
- BandPass,
- ///
- /// Attenuates frequencies around the cutoff frequency, allowing frequencies further away to pass.
- ///
- Notch
+ _format = format;
+ _filters = new BiquadFilter[format.Channels];
+ for (var i = 0; i < format.Channels; i++)
+ {
+ _filters[i] = new BiquadFilter();
+ }
+ UpdateCoefficients();
}
- // Parameters
- private FilterType _type = FilterType.LowPass;
-
- ///
- /// Gets or sets the audio format.
- ///
- public AudioFormat Format { get; set; }
+ ///
+ public override string Name { get; set; } = "Filter";
///
/// Gets or sets the type of filter.
@@ -54,12 +51,10 @@ public FilterType Type
set
{
_type = value;
- CalculateCoefficients();
+ UpdateCoefficients();
}
}
- private float _cutoffFrequency = 1000f;
-
///
/// Gets or sets the cutoff frequency of the filter in Hertz.
/// This frequency determines the point at which the filter starts to attenuate the signal.
@@ -72,16 +67,13 @@ public float CutoffFrequency
set
{
_cutoffFrequency = value;
- CalculateCoefficients();
+ UpdateCoefficients();
}
}
- private float _resonance = 0.7f;
-
///
/// Gets or sets the resonance of the filter, a value between 0 and 1.
- /// Higher resonance values emphasize frequencies around the cutoff frequency, potentially leading to self-oscillation in some filter types.
- /// Changing the resonance recalculates the filter coefficients. Resonance is clamped between 0.01 and 0.99 to prevent instability.
+ /// Higher resonance values emphasize frequencies around the cutoff frequency.
///
[ControllableParameter("Resonance", 0.0, 1.0)]
public float Resonance
@@ -90,27 +82,10 @@ public float Resonance
set
{
_resonance = value;
- CalculateCoefficients();
+ UpdateCoefficients();
}
}
- // Internal state for the biquad filter
- private float _x1, _x2, _y1, _y2; // Delay elements for input (x) and output (y) samples
- private float _a0, _a1, _a2, _b1, _b2; // Filter coefficients for the biquad filter structure
-
- ///
- /// Initializes a new instance of the class with default settings and calculates initial filter coefficients.
- ///
- /// The audio format containing channels and sample rate and sample format
- public Filter(AudioFormat format)
- {
- Format = format;
- CalculateCoefficients();
- }
-
- ///
- public override string Name { get; set; } = "Filter";
-
///
public override void ProcessMidiMessage(MidiMessage message)
{
@@ -138,66 +113,22 @@ public override void ProcessMidiMessage(MidiMessage message)
///
public override float ProcessSample(float sample, int channel)
{
- var output = _a0 * sample + _a1 * _x1 + _a2 * _x2 - _b1 * _y1 - _b2 * _y2;
-
- // Update delay elements for the next sample
- _x2 = _x1;
- _x1 = sample;
- _y2 = _y1;
- _y1 = output;
-
- return output;
+ // Use the filter instance dedicated to this channel
+ if (channel < _filters.Length)
+ {
+ return _filters[channel].Process(sample);
+ }
+ return sample;
}
///
- /// Calculates the biquad filter coefficients based on the current , , and parameters.
- /// This method uses standard formulas for digital biquad filter coefficient calculation and normalizes the coefficients.
+ /// Calculates and updates the biquad filter coefficients for all channels.
///
- private void CalculateCoefficients()
+ private void UpdateCoefficients()
{
- float sampleRate = Format.SampleRate;
- _resonance = Math.Clamp(_resonance, 0.01f, 0.99f);
- var omega = 2.0f * MathF.PI * CutoffFrequency / sampleRate; // Angular frequency
- var sinOmega = MathF.Sin(omega);
- var cosOmega = MathF.Cos(omega);
- var alpha = sinOmega / (2 * Resonance);
-
- // Calculate coefficients based on the selected filter type
- switch (Type)
+ foreach (var filter in _filters)
{
- case FilterType.LowPass:
- _a0 = (1 - cosOmega) / 2;
- _a1 = 1 - cosOmega;
- _a2 = (1 - cosOmega) / 2;
- break;
- case FilterType.HighPass:
- _a0 = (1 + cosOmega) / 2;
- _a1 = -(1 + cosOmega);
- _a2 = (1 + cosOmega) / 2;
- break;
- case FilterType.BandPass:
- _a0 = alpha;
- _a1 = 0;
- _a2 = -alpha;
- break;
- case FilterType.Notch:
- _a0 = 1;
- _a1 = -2 * cosOmega;
- _a2 = 1;
- break;
- default:
- throw new ArgumentOutOfRangeException();
+ filter.Update(_type, _format.SampleRate, _cutoffFrequency, _resonance);
}
-
- _b1 = -2 * cosOmega;
- _b2 = 1 - alpha;
-
- // Normalize coefficients by dividing by a0 (which is actually 'a0' in biquad formulas, and in our case it's (1+alpha) after calculations)
- var a0Inv = 1 / (1 + alpha);
- _a0 *= a0Inv;
- _a1 *= a0Inv;
- _a2 *= a0Inv;
- _b1 *= a0Inv;
- _b2 *= a0Inv;
}
}
\ No newline at end of file
diff --git a/Src/Modifiers/ParametricEqualizer.cs b/Src/Modifiers/ParametricEqualizer.cs
index 1c9c27e..7f157f0 100644
--- a/Src/Modifiers/ParametricEqualizer.cs
+++ b/Src/Modifiers/ParametricEqualizer.cs
@@ -1,5 +1,7 @@
using SoundFlow.Abstracts;
+using SoundFlow.Enums;
using SoundFlow.Structs;
+using SoundFlow.Utils;
namespace SoundFlow.Modifiers;
@@ -16,6 +18,7 @@ public sealed class ParametricEqualizer : SoundModifier
///
public List Bands { get; private set; } = [];
+ // Dictionary mapping Channel Index -> List of BiquadFilters
private readonly Dictionary> _filtersPerChannel = [];
private readonly AudioFormat _format;
@@ -25,7 +28,7 @@ public sealed class ParametricEqualizer : SoundModifier
/// The audio format to process.
public ParametricEqualizer(AudioFormat format)
{
- _format = format; // Store the format
+ _format = format;
}
///
@@ -36,11 +39,11 @@ private void InitializeFilters()
_filtersPerChannel.Clear();
for (var channel = 0; channel < _format.Channels; channel++)
{
- List filters = [];
+ var filters = new List();
foreach (var band in Bands)
{
var filter = new BiquadFilter();
- filter.UpdateCoefficients(band, _format.SampleRate);
+ filter.Update(band.Type, _format.SampleRate, band.Frequency, band.Q, band.GainDb, band.S);
filters.Add(filter);
}
@@ -61,25 +64,25 @@ public override void Process(Span buffer, int channels)
///
public override float ProcessSample(float sample, int channel)
{
- if (!_filtersPerChannel.TryGetValue(channel, out var value))
+ if (!_filtersPerChannel.TryGetValue(channel, out var channelFilters))
{
- // Initialize filters for this channel if not already done
+ // Initialize filters for this channel if not already done (lazy init)
var filters = new List();
foreach (var band in Bands)
{
var filter = new BiquadFilter();
- filter.UpdateCoefficients(band, _format.SampleRate);
+ filter.Update(band.Type, _format.SampleRate, band.Frequency, band.Q, band.GainDb, band.S);
filters.Add(filter);
}
- value = filters;
- _filtersPerChannel[channel] = value;
+ channelFilters = filters;
+ _filtersPerChannel[channel] = channelFilters;
}
var processedSample = sample;
- foreach (var filter in value)
+ foreach (var filter in channelFilters)
{
- processedSample = filter.ProcessSample(processedSample);
+ processedSample = filter.Process(processedSample);
}
return processedSample;
@@ -116,52 +119,6 @@ public void RemoveBand(EqualizerBand band)
}
}
-///
-/// Types of filters supported by the Parametric Equalizer.
-///
-public enum FilterType
-{
- ///
- /// A peaking equalizer boosts or cuts a specific frequency range.
- ///
- Peaking,
-
- ///
- /// A low-shelf equalizer boosts or cuts all frequencies below a specific frequency.
- ///
- LowShelf,
-
- ///
- /// A high-shelf equalizer boosts or cuts all frequencies above a specific frequency.
- ///
- HighShelf,
-
- ///
- /// A low-pass filter removes high frequencies from the audio signal.
- ///
- LowPass,
-
- ///
- /// A high-pass filter removes low frequencies from the audio signal.
- ///
- HighPass,
-
- ///
- /// A band-pass filter removes all frequencies outside a specific frequency range.
- ///
- BandPass,
-
- ///
- /// A notch filter removes a specific frequency range from the audio signal.
- ///
- Notch,
-
- ///
- /// An all-pass filter changes the phase of the audio signal without affecting its frequency response.
- ///
- AllPass
-}
-
///
/// Represents an EQ band with specific parameters.
///
@@ -196,143 +153,4 @@ public class EqualizerBand(FilterType type, float frequency, float gainDb, float
/// The type of filter to apply.
///
public FilterType Type { get; set; } = type;
-}
-
-///
-/// A biquad filter used to process audio samples.
-///
-public class BiquadFilter
-{
- private float _a0, _a1, _a2, _b0, _b1, _b2;
- private float _x1, _x2, _y1, _y2;
-
- ///
- /// Updates the filter coefficients based on the specified EQ band parameters.
- ///
- /// The EQ band containing filter parameters.
- /// The sample rate of the audio data.
- public void UpdateCoefficients(EqualizerBand band, float sampleRate)
- {
- float a;
- var omega = 2 * (float)Math.PI * band.Frequency / sampleRate;
- var sinOmega = (float)Math.Sin(omega);
- var cosOmega = (float)Math.Cos(omega);
- float alpha;
-
- switch (band.Type)
- {
- case FilterType.Peaking:
- a = (float)Math.Pow(10, band.GainDb / 40);
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = 1 + alpha * a;
- _b1 = -2 * cosOmega;
- _b2 = 1 - alpha * a;
- _a0 = 1 + alpha / a;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha / a;
- break;
- case FilterType.LowShelf:
- a = (float)Math.Pow(10, band.GainDb / 40);
- var sqrtA = (float)Math.Sqrt(a);
- alpha = sinOmega / 2 * (float)Math.Sqrt((a + 1 / a) * (1 / band.S - 1) + 2);
-
- _b0 = a * ((a + 1) - (a - 1) * cosOmega + 2 * sqrtA * alpha);
- _b1 = 2 * a * ((a - 1) - (a + 1) * cosOmega);
- _b2 = a * ((a + 1) - (a - 1) * cosOmega - 2 * sqrtA * alpha);
- _a0 = (a + 1) + (a - 1) * cosOmega + 2 * sqrtA * alpha;
- _a1 = -2 * ((a - 1) + (a + 1) * cosOmega);
- _a2 = (a + 1) + (a - 1) * cosOmega - 2 * sqrtA * alpha;
- break;
- case FilterType.HighShelf:
- a = (float)Math.Pow(10, band.GainDb / 40);
- sqrtA = (float)Math.Sqrt(a);
- alpha = sinOmega / 2 * (float)Math.Sqrt((a + 1 / a) * (1 / band.S - 1) + 2);
-
- _b0 = a * ((a + 1) + (a - 1) * cosOmega + 2 * sqrtA * alpha);
- _b1 = -2 * a * ((a - 1) + (a + 1) * cosOmega);
- _b2 = a * ((a + 1) + (a - 1) * cosOmega - 2 * sqrtA * alpha);
- _a0 = (a + 1) - (a - 1) * cosOmega + 2 * sqrtA * alpha;
- _a1 = 2 * ((a - 1) - (a + 1) * cosOmega);
- _a2 = (a + 1) - (a - 1) * cosOmega - 2 * sqrtA * alpha;
- break;
- case FilterType.LowPass:
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = (1 - cosOmega) / 2;
- _b1 = 1 - cosOmega;
- _b2 = (1 - cosOmega) / 2;
- _a0 = 1 + alpha;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha;
- break;
- case FilterType.HighPass:
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = (1 + cosOmega) / 2;
- _b1 = -(1 + cosOmega);
- _b2 = (1 + cosOmega) / 2;
- _a0 = 1 + alpha;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha;
- break;
- case FilterType.BandPass:
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = alpha;
- _b1 = 0;
- _b2 = -alpha;
- _a0 = 1 + alpha;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha;
- break;
- case FilterType.Notch:
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = 1;
- _b1 = -2 * cosOmega;
- _b2 = 1;
- _a0 = 1 + alpha;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha;
- break;
- case FilterType.AllPass:
- alpha = sinOmega / (2 * band.Q);
-
- _b0 = 1 - alpha;
- _b1 = -2 * cosOmega;
- _b2 = 1 + alpha;
- _a0 = 1 + alpha;
- _a1 = -2 * cosOmega;
- _a2 = 1 - alpha;
- break;
- default:
- throw new NotSupportedException("Filter type not supported or implemented");
- }
-
- // Normalize the coefficients
- _b0 /= _a0;
- _b1 /= _a0;
- _b2 /= _a0;
- _a1 /= _a0;
- _a2 /= _a0;
- }
-
- ///
- /// Processes a single audio sample through the biquad filter.
- ///
- /// The input sample.
- /// The filtered output sample.
- public float ProcessSample(float x)
- {
- var y = _b0 * x + _b1 * _x1 + _b2 * _x2 - _a1 * _y1 - _a2 * _y2;
-
- // Shift the data
- _x2 = _x1;
- _x1 = x;
- _y2 = _y1;
- _y1 = y;
-
- return y;
- }
}
\ No newline at end of file
diff --git a/Src/Providers/AssetDataProvider.cs b/Src/Providers/AssetDataProvider.cs
index c5fa614..0f97600 100644
--- a/Src/Providers/AssetDataProvider.cs
+++ b/Src/Providers/AssetDataProvider.cs
@@ -1,3 +1,6 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
using SoundFlow.Abstracts;
using SoundFlow.Enums;
using SoundFlow.Interfaces;
@@ -13,9 +16,22 @@ namespace SoundFlow.Providers;
/// Loads full audio directly to memory.
public sealed class AssetDataProvider : ISoundDataProvider
{
- private readonly float[] _data;
+ private float[]? _data;
private int _samplePosition;
+ ///
+ /// Initializes a new instance of the class by reading from a file path.
+ /// This method handles the stream lifecycle internally, ensuring the file handle is closed immediately after reading.
+ ///
+ /// The audio engine instance.
+ /// The absolute or relative path to the audio file.
+ /// Optional configuration for metadata reading.
+ public AssetDataProvider(AudioEngine engine, string filePath, ReadOptions? options = null)
+ {
+ using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read);
+ Initialize(engine, stream, options ?? new ReadOptions(), null);
+ }
+
///
/// Initializes a new instance of the class by reading from a stream and detecting its format.
/// If metadata reading fails, it will attempt to probe the stream with registered codecs.
@@ -25,44 +41,7 @@ public sealed class AssetDataProvider : ISoundDataProvider
/// Optional configuration for metadata reading.
public AssetDataProvider(AudioEngine engine, Stream stream, ReadOptions? options = null)
{
- options ??= new ReadOptions();
-
- var formatInfoResult = SoundMetadataReader.Read(stream, options);
- ISoundDecoder decoder;
-
- if (formatInfoResult is { IsSuccess: true, Value: not null })
- {
- FormatInfo = formatInfoResult.Value;
- var discoveredFormat = new AudioFormat
- {
- Format = SampleFormat.F32,
- Channels = FormatInfo.ChannelCount,
- Layout = AudioFormat.GetLayoutFromChannels(FormatInfo.ChannelCount),
- SampleRate = FormatInfo.SampleRate
- };
- stream.Position = 0;
- decoder = engine.CreateDecoder(stream, FormatInfo.FormatIdentifier, discoveredFormat);
- }
- else
- {
- stream.Position = 0;
- decoder = engine.CreateDecoder(stream, out var detectedFormat);
- FormatInfo = new SoundFormatInfo
- {
- FormatName = "Unknown (Probed)",
- FormatIdentifier = "unknown",
- ChannelCount = detectedFormat.Channels,
- SampleRate = detectedFormat.SampleRate,
- Duration = decoder.Length > 0 && detectedFormat.SampleRate > 0
- ? TimeSpan.FromSeconds((double)decoder.Length / (detectedFormat.SampleRate * detectedFormat.Channels))
- : TimeSpan.Zero
- };
- }
-
- _data = Decode(decoder);
- decoder.Dispose();
- SampleRate = FormatInfo.SampleRate;
- Length = _data.Length;
+ Initialize(engine, stream, options ?? new ReadOptions(), null);
}
///
@@ -74,90 +53,128 @@ public AssetDataProvider(AudioEngine engine, Stream stream, ReadOptions? options
/// The stream to read audio data from.
public AssetDataProvider(AudioEngine engine, AudioFormat format, Stream stream)
{
- var formatInfoResult = SoundMetadataReader.Read(stream, new ReadOptions
+ var options = new ReadOptions
{
- ReadTags = false,
- ReadAlbumArt = false,
+ ReadTags = false,
+ ReadAlbumArt = false,
DurationAccuracy = DurationAccuracy.FastEstimate
- });
-
+ };
+ Initialize(engine, stream, options, format);
+ }
+
+ ///
+ /// Initializes a new instance of the class from a byte array.
+ ///
+ /// The audio engine instance.
+ /// The byte array containing the audio file data.
+ /// Optional configuration for metadata reading.
+ public AssetDataProvider(AudioEngine engine, byte[] data, ReadOptions? options = null)
+ : this(engine, new MemoryStream(data), options)
+ {
+ }
+
+ private void Initialize(AudioEngine engine, Stream stream, ReadOptions options, AudioFormat? explicitFormat)
+ {
+ var formatInfoResult = SoundMetadataReader.Read(stream, options);
ISoundDecoder decoder;
+
+ // Reset stream position before decoding attempts
+ stream.Position = 0;
+
if (formatInfoResult is { IsSuccess: true, Value: not null })
{
FormatInfo = formatInfoResult.Value;
- stream.Position = 0;
- decoder = engine.CreateDecoder(stream, FormatInfo.FormatIdentifier, format);
+
+ // If explicit format is provided, use it; otherwise, derive from metadata
+ var targetFormat = explicitFormat ?? new AudioFormat
+ {
+ Format = SampleFormat.F32,
+ Channels = FormatInfo.ChannelCount,
+ Layout = AudioFormat.GetLayoutFromChannels(FormatInfo.ChannelCount),
+ SampleRate = FormatInfo.SampleRate
+ };
+
+ decoder = engine.CreateDecoder(stream, FormatInfo.FormatIdentifier, targetFormat);
}
else
{
- stream.Position = 0;
- decoder = engine.CreateDecoder(stream, out var detectedFormat, format);
+ // Fallback to probing
+ decoder = explicitFormat.HasValue ? engine.CreateDecoder(stream, out _, explicitFormat.Value) : engine.CreateDecoder(stream, out _);
+
FormatInfo = new SoundFormatInfo
{
FormatName = "Unknown (Probed)",
FormatIdentifier = "unknown",
- ChannelCount = detectedFormat.Channels,
- SampleRate = detectedFormat.SampleRate,
- Duration = decoder.Length > 0 && detectedFormat.SampleRate > 0
- ? TimeSpan.FromSeconds((double)decoder.Length / (detectedFormat.SampleRate * detectedFormat.Channels))
- : TimeSpan.Zero
+ ChannelCount = explicitFormat?.Channels ?? 0, // Fallback if available, or 0 (decoder usually provides valid info)
+ SampleRate = explicitFormat?.SampleRate ?? 0,
+ Duration = TimeSpan.Zero
};
+
+ // Refine FormatInfo based on actual decoder properties if probe succeeded
+ if (decoder is { Channels: > 0, SampleRate: > 0 })
+ {
+ FormatInfo = FormatInfo with
+ {
+ ChannelCount = decoder.Channels,
+ SampleRate = decoder.SampleRate,
+ Duration = decoder.Length > 0
+ ? TimeSpan.FromSeconds((double)decoder.Length / (decoder.SampleRate * decoder.Channels))
+ : TimeSpan.Zero
+ };
+ }
}
- _data = Decode(decoder);
- decoder.Dispose();
- SampleRate = format.SampleRate;
- Length = _data.Length;
- }
-
- ///
- /// Initializes a new instance of the class from a byte array.
- ///
- /// The audio engine instance.
- /// The byte array containing the audio file data.
- /// Optional configuration for metadata reading.
- public AssetDataProvider(AudioEngine engine, byte[] data, ReadOptions? options = null)
- : this(engine, new MemoryStream(data), options)
- {
+ try
+ {
+ _data = Decode(decoder);
+ SampleRate = explicitFormat?.SampleRate ?? FormatInfo.SampleRate;
+ Length = _data.Length;
+ }
+ finally
+ {
+ decoder.Dispose();
+ }
}
///
public int Position => _samplePosition;
///
- public int Length { get; } // Length in samples
+ public int Length { get; private set; } // Length in samples
///
public bool CanSeek => true;
///
public SampleFormat SampleFormat { get; private set; }
-
+
///
- public int SampleRate { get; }
+ public int SampleRate { get; private set; }
///
public bool IsDisposed { get; private set; }
-
+
///
- public SoundFormatInfo? FormatInfo { get; }
+ public SoundFormatInfo? FormatInfo { get; private set; }
///
public event EventHandler? EndOfStreamReached;
-
+
///
public event EventHandler? PositionChanged;
///
public int ReadBytes(Span buffer)
{
+ if (IsDisposed || _data is null) return 0;
+
var samplesToRead = Math.Min(buffer.Length, _data.Length - _samplePosition);
if (samplesToRead <= 0)
{
EndOfStreamReached?.Invoke(this, EventArgs.Empty);
return 0;
}
-
+
_data.AsSpan(_samplePosition, samplesToRead).CopyTo(buffer);
_samplePosition += samplesToRead;
PositionChanged?.Invoke(this, new PositionChangedEventArgs(_samplePosition));
@@ -168,6 +185,8 @@ public int ReadBytes(Span buffer)
///
public void Seek(int sampleOffset)
{
+ if (IsDisposed || _data is null) return;
+
_samplePosition = Math.Clamp(sampleOffset, 0, _data.Length);
PositionChanged?.Invoke(this, new PositionChangedEventArgs(_samplePosition));
}
@@ -176,7 +195,7 @@ private float[] Decode(ISoundDecoder decoder)
{
SampleFormat = decoder.SampleFormat;
var length = decoder.Length > 0 || FormatInfo == null
- ? decoder.Length
+ ? decoder.Length
: (int)(FormatInfo.Duration.TotalSeconds * FormatInfo.SampleRate * FormatInfo.ChannelCount);
return length > 0 ? DecodeKnownLength(decoder, length) : DecodeUnknownLength(decoder);
@@ -199,8 +218,8 @@ private static float[] DecodeUnknownLength(ISoundDecoder decoder)
const int blockSize = 22050; // Approx 0.5s at 44.1kHz stereo
var blocks = new List();
var totalSamples = 0;
-
- while(true)
+
+ while (true)
{
var block = new float[blockSize * decoder.Channels];
var samplesRead = decoder.Decode(block);
@@ -229,5 +248,8 @@ public void Dispose()
{
if (IsDisposed) return;
IsDisposed = true;
+ _data = null;
+ EndOfStreamReached = null;
+ PositionChanged = null;
}
}
\ No newline at end of file
diff --git a/Src/Providers/RawDataProvider.cs b/Src/Providers/RawDataProvider.cs
index bea2545..71305b5 100644
--- a/Src/Providers/RawDataProvider.cs
+++ b/Src/Providers/RawDataProvider.cs
@@ -25,11 +25,13 @@ public class RawDataProvider : ISoundDataProvider
/// Initializes a new instance of the class from a raw float array.
///
/// The raw float array containing the audio samples.
+ /// The sample rate of the audio data. Defaults to 48000.
/// Thrown if is null.
- public RawDataProvider(float[] rawSamples)
+ public RawDataProvider(float[] rawSamples, int sampleRate = 48000)
{
_floatData = rawSamples ?? throw new ArgumentNullException(nameof(rawSamples));
_sampleFormat = SampleFormat.F32;
+ SampleRate = sampleRate;
}
///
@@ -37,13 +39,15 @@ public RawDataProvider(float[] rawSamples)
///
/// The raw PCM stream containing the audio samples.
/// The sample format of the raw PCM stream.
+ /// The sample rate of the audio data. Defaults to 48000.
/// Thrown if is null.
/// Thrown if is .
- public RawDataProvider(Stream pcmStream, SampleFormat sampleFormat)
+ public RawDataProvider(Stream pcmStream, SampleFormat sampleFormat, int sampleRate = 48000)
{
_pcmStream = pcmStream ?? throw new ArgumentNullException(nameof(pcmStream));
_sampleFormat = sampleFormat != SampleFormat.Unknown ? sampleFormat
: throw new ArgumentException("SampleFormat cannot be Unknown for RawDataProvider when using a stream.", nameof(sampleFormat));
+ SampleRate = sampleRate;
}
///
@@ -51,35 +55,41 @@ public RawDataProvider(Stream pcmStream, SampleFormat sampleFormat)
///
/// The raw byte array containing the audio samples.
/// The sample format of the raw byte array.
+ /// The sample rate of the audio data. Defaults to 48000.
/// Thrown if is null.
/// Thrown if is .
- public RawDataProvider(byte[] rawBytes, SampleFormat sampleFormat)
+ public RawDataProvider(byte[] rawBytes, SampleFormat sampleFormat, int sampleRate = 48000)
{
_byteArray = rawBytes ?? throw new ArgumentNullException(nameof(rawBytes));
_sampleFormat = sampleFormat != SampleFormat.Unknown ? sampleFormat
: throw new ArgumentException("SampleFormat cannot be Unknown for RawDataProvider when using a byte array.", nameof(sampleFormat));
+ SampleRate = sampleRate;
}
///
/// Initializes a new instance of the class from a raw int array.
///
/// The raw int array containing the audio samples.
+ /// The sample rate of the audio data. Defaults to 48000.
/// Thrown if is null.
- public RawDataProvider(int[] rawSamples)
+ public RawDataProvider(int[] rawSamples, int sampleRate = 48000)
{
_intArray = rawSamples ?? throw new ArgumentNullException(nameof(rawSamples));
_sampleFormat = SampleFormat.S32;
+ SampleRate = sampleRate;
}
///
/// Initializes a new instance of the class from a raw short array.
///
/// The raw short array containing the audio samples.
+ /// The sample rate of the audio data. Defaults to 48000.
/// Thrown if is null.
- public RawDataProvider(short[] rawSamples)
+ public RawDataProvider(short[] rawSamples, int sampleRate = 48000)
{
_shortData = rawSamples ?? throw new ArgumentNullException(nameof(rawSamples));
_sampleFormat = SampleFormat.S16;
+ SampleRate = sampleRate;
}
@@ -96,13 +106,13 @@ public RawDataProvider(short[] rawSamples)
public SampleFormat SampleFormat => _sampleFormat;
///
- public int SampleRate { get; } = 48000; // Assuming 48kHz sample rate
+ public int SampleRate { get; init; } = 48000;
///
public bool IsDisposed { get; private set; }
///
- public SoundFormatInfo? FormatInfo => null;
+ public SoundFormatInfo? FormatInfo { get; init; }
///
public event EventHandler? EndOfStreamReached;
diff --git a/Src/Security/Analyzers/ContentFingerprintAnalyzer.cs b/Src/Security/Analyzers/ContentFingerprintAnalyzer.cs
new file mode 100644
index 0000000..31b541b
--- /dev/null
+++ b/Src/Security/Analyzers/ContentFingerprintAnalyzer.cs
@@ -0,0 +1,219 @@
+using System.Numerics;
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Models;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security.Analyzers;
+
+///
+/// Analyzes audio content to generate robust acoustic fingerprints for identification.
+/// Uses spectral peak analysis and combinatorial hashing.
+///
+public sealed class ContentFingerprintAnalyzer : AudioAnalyzer
+{
+ private readonly FingerprintConfiguration _config;
+ private readonly List _hashes = [];
+
+ // Buffering state
+ private readonly float[] _ringBuffer;
+ private int _ringBufferPos;
+ private int _totalFramesProcessed;
+
+ // FFT state
+ private readonly Complex[] _fftBuffer;
+ private readonly float[] _window;
+ private readonly int _hopSize;
+
+ // Peak tracking
+ // Index = time offset (frame index), Value = List of peak frequency bin indices in that frame.
+ private readonly Dictionary> _spectralPeaksHistory = new();
+
+ ///
+ public override string Name { get; set; } = "Content Fingerprint Analyzer";
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The audio format.
+ /// Configuration options. If null, defaults will be used.
+ public ContentFingerprintAnalyzer(AudioFormat format, FingerprintConfiguration? config = null) : base(format)
+ {
+ _config = config ?? new FingerprintConfiguration();
+
+ if (!MathHelper.IsPowerOfTwo(_config.FftSize))
+ throw new ArgumentException("FFT size must be a power of 2.");
+
+ _fftBuffer = new Complex[_config.FftSize];
+ _window = MathHelper.HanningWindow(_config.FftSize);
+ _ringBuffer = new float[_config.FftSize];
+
+ _hopSize = _config.FftSize / _config.OverlapFactor;
+ }
+
+ ///
+ /// Gets the complete list of hashes generated so far.
+ ///
+ public IReadOnlyList GetGeneratedHashes()
+ {
+ lock (_hashes)
+ {
+ return new List(_hashes);
+ }
+ }
+
+ ///
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
+ {
+ // Downmix to mono and fill ring buffer
+ for (var i = 0; i < buffer.Length; i += channels)
+ {
+ // Perform simple average downmix for channel agnosticism
+ float monoSample = 0;
+ for (var c = 0; c < channels; c++)
+ {
+ monoSample += buffer[i + c];
+ }
+ monoSample /= channels;
+
+ _ringBuffer[_ringBufferPos] = monoSample;
+ _ringBufferPos++;
+
+ // When buffer is full, process frame
+ if (_ringBufferPos >= _config.FftSize)
+ {
+ ProcessFrame();
+
+ // Shift buffer by hop size to prepare for next overlap
+ var remaining = _config.FftSize - _hopSize;
+ Array.Copy(_ringBuffer, _hopSize, _ringBuffer, 0, remaining);
+ _ringBufferPos = remaining;
+ }
+ }
+ }
+
+ ///
+ /// Processes a single FFT frame: Windowing, FFT, Peak Extraction, and Hashing.
+ ///
+ private void ProcessFrame()
+ {
+ for (var i = 0; i < _config.FftSize; i++)
+ {
+ _fftBuffer[i] = new Complex(_ringBuffer[i] * _window[i], 0);
+ }
+
+ MathHelper.Fft(_fftBuffer);
+
+ var peaks = ExtractPeaks();
+
+ // Only store if we found peaks, optimization for silence
+ if (peaks.Count > 0)
+ {
+ _spectralPeaksHistory[_totalFramesProcessed] = peaks;
+ GenerateHashesForFrame(_totalFramesProcessed);
+ }
+
+ // Cleanup old history to prevent memory leak
+ var historyHorizon = _totalFramesProcessed - _config.TargetZoneSize - 1;
+ _spectralPeaksHistory.Remove(historyHorizon);
+
+ _totalFramesProcessed++;
+ }
+
+ ///
+ /// Identifies significant spectral peaks using adaptive thresholding and band limits.
+ ///
+ private List ExtractPeaks()
+ {
+ var peaks = new List();
+ var binCount = _config.FftSize / 2;
+ var frequencyResolution = (float)Format.SampleRate / _config.FftSize;
+
+ // Calculate start and end bins based on frequency limits
+ var minBin = (int)(_config.MinFrequency / frequencyResolution);
+ var maxBin = (int)(_config.MaxFrequency / frequencyResolution);
+ minBin = Math.Max(1, minBin); // Skip DC
+ maxBin = Math.Min(binCount - 2, maxBin);
+
+ // Calculate local average magnitude for adaptive thresholding
+ double totalMag = 0;
+ var count = 0;
+ for (var i = minBin; i <= maxBin; i++)
+ {
+ totalMag += _fftBuffer[i].Magnitude;
+ count++;
+ }
+ var averageMag = count > 0 ? totalMag / count : 0;
+
+ // Adaptive threshold must be higher than floor and higher than local average * multiplier
+ var threshold = Math.Max(_config.MinPeakMagnitude, averageMag * _config.AdaptiveThresholdMultiplier);
+
+ // Divide frequency range into bands to ensure uniform peak distribution
+ const int bandCount = 4;
+ var bandWidth = (maxBin - minBin) / bandCount;
+
+ for (var b = 0; b < bandCount; b++)
+ {
+ var bandStart = minBin + b * bandWidth;
+ var bandEnd = bandStart + bandWidth;
+
+ var maxBandMag = 0.0;
+ var maxBandBin = -1;
+
+ for (var i = bandStart; i < bandEnd; i++)
+ {
+ var mag = _fftBuffer[i].Magnitude;
+
+ // Check basic magnitude threshold
+ if (mag < threshold) continue;
+
+ // Check local maxima condition
+ if (!(mag > _fftBuffer[i - 1].Magnitude) || !(mag > _fftBuffer[i + 1].Magnitude) ||
+ !(mag > maxBandMag)) continue;
+ maxBandMag = mag;
+ maxBandBin = i;
+ }
+
+ if (maxBandBin != -1)
+ {
+ peaks.Add(maxBandBin);
+ }
+ }
+
+ return peaks;
+ }
+
+ ///
+ /// Generates hashes by pairing peaks from the current frame with peaks from previous frames.
+ ///
+ private void GenerateHashesForFrame(int currentFrameIndex)
+ {
+ if (!_spectralPeaksHistory.TryGetValue(currentFrameIndex, out var targets) || targets.Count == 0) return;
+
+ // Look back in time for Anchors within the target zone
+ for (var t = 1; t <= _config.TargetZoneSize; t++)
+ {
+ var anchorFrameIndex = currentFrameIndex - t;
+ if (!_spectralPeaksHistory.TryGetValue(anchorFrameIndex, out var anchors)) continue;
+
+ foreach (var anchorBin in anchors)
+ {
+ foreach (var targetBin in targets)
+ {
+ // Construct 32-bit Hash:
+ // 12 bits: Anchor Frequency Bin (0-4095)
+ // 12 bits: Target Frequency Bin (0-4095)
+ // 8 bits: Delta Time (frames) (0-255)
+
+ var h = (uint)((anchorBin & 0xFFF) << 20 | (targetBin & 0xFFF) << 8 | (t & 0xFF));
+ var hashEntry = new FingerprintHash(h, anchorFrameIndex);
+ lock (_hashes)
+ {
+ _hashes.Add(hashEntry);
+ }
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Analyzers/IntegrityWatermarkVerifyAnalyzer.cs b/Src/Security/Analyzers/IntegrityWatermarkVerifyAnalyzer.cs
new file mode 100644
index 0000000..b4956f3
--- /dev/null
+++ b/Src/Security/Analyzers/IntegrityWatermarkVerifyAnalyzer.cs
@@ -0,0 +1,95 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Utils;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security.Analyzers;
+
+///
+/// Verifies the integrity of an audio stream by validating block-chained watermarks.
+/// Raises an event if the hash embedded in Block N does not match the computed hash of Block N-1.
+///
+public sealed class IntegrityWatermarkVerifyAnalyzer : AudioAnalyzer
+{
+ private readonly WatermarkConfiguration _config;
+
+ private readonly float[] _currentBlock;
+ private int _blockIndex;
+
+ private byte _calculatedHashOfPreviousBlock; // Hash(Block N-1)
+ private byte _extractedHashFromCurrentBlock; // Extracted from LSB of Block N
+ private bool _isFirstBlock = true;
+ private long _totalBlocksProcessed;
+
+ ///
+ public override string Name { get; set; } = "Integrity Verifier";
+
+ ///
+ /// Occurs when an integrity violation is detected.
+ /// The argument provided is the index of the block where verification failed.
+ ///
+ public event Action? IntegrityViolationDetected;
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ public IntegrityWatermarkVerifyAnalyzer(AudioFormat format, WatermarkConfiguration config) : base(format)
+ {
+ _config = config;
+ _currentBlock = new float[_config.IntegrityBlockSize];
+ }
+
+ ///
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
+ {
+ foreach (var sample in buffer)
+ {
+ // 1. Extract embedded hash bits from the start of the block (first 8 samples)
+ if (_blockIndex < 8)
+ {
+ var intRep = BitConverter.SingleToInt32Bits(sample);
+ var bit = intRep & 1;
+ if (bit == 1)
+ {
+ _extractedHashFromCurrentBlock |= (byte)(1 << _blockIndex);
+ }
+ }
+
+ // 2. Store sample for hashing (to verify the *next* block)
+ _currentBlock[_blockIndex] = sample;
+ _blockIndex++;
+
+ // 3. Block Complete: Perform Verification
+ if (_blockIndex >= _config.IntegrityBlockSize)
+ {
+ PerformBlockVerification();
+
+ // Reset state for next block
+ _blockIndex = 0;
+ _extractedHashFromCurrentBlock = 0;
+ }
+ }
+ }
+
+ private void PerformBlockVerification()
+ {
+ // Skip verification for the first block, as we have no previous block to compare against.
+ if (!_isFirstBlock)
+ {
+ // The watermark contract states: Block N contains the hash of Block N-1 (Previous vs Current block).
+ if (_calculatedHashOfPreviousBlock != _extractedHashFromCurrentBlock)
+ {
+ IntegrityViolationDetected?.Invoke(_totalBlocksProcessed);
+ Log.Warning(
+ $"Integrity violation at block {_totalBlocksProcessed}. Expected hash (from prev block): {_calculatedHashOfPreviousBlock}, Embedded hash: {_extractedHashFromCurrentBlock}");
+ }
+ }
+
+ // Calculate the hash of the current block to carry forward to the next iteration.
+ _calculatedHashOfPreviousBlock = WatermarkingUtils.CalculatePearsonHash(_currentBlock);
+
+ _isFirstBlock = false;
+ _totalBlocksProcessed++;
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Analyzers/OwnershipWatermarkExtractAnalyzer.cs b/Src/Security/Analyzers/OwnershipWatermarkExtractAnalyzer.cs
new file mode 100644
index 0000000..01cfdea
--- /dev/null
+++ b/Src/Security/Analyzers/OwnershipWatermarkExtractAnalyzer.cs
@@ -0,0 +1,154 @@
+using System.Collections;
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Utils;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security.Analyzers;
+
+///
+/// Defines the internal state of the watermark extractor.
+///
+internal enum ExtractorState
+{
+ SearchingForSync,
+ ExtractingPayload,
+ Complete
+}
+
+///
+/// Analyzes an audio stream to extract hidden ownership watermarks embedded using DSSS.
+/// Implements a sliding window correlator to detect the synchronization sequence before decoding the payload.
+///
+public sealed class OwnershipWatermarkExtractAnalyzer : AudioAnalyzer
+{
+ private readonly WatermarkConfiguration _config;
+
+ // PRNG State
+ private uint _rngState;
+
+ private ExtractorState _state = ExtractorState.SearchingForSync;
+
+ // Extraction Buffers
+ private float _currentBitCorrelation;
+ private int _samplesAccumulated;
+ private readonly List _extractedBits = [];
+
+ // Sync Detection
+ private readonly bool[] _syncShiftRegister;
+ private static readonly bool[] SyncSequence =
+ [true, false, true, false, true, false, true, false, true, true, false, false, true, true, false, false];
+
+ ///
+ public override string Name { get; set; } = "Ownership Watermark Extractor";
+
+ ///
+ /// Occurs when the payload extraction is complete.
+ ///
+ public event Action? PayloadExtracted;
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ public OwnershipWatermarkExtractAnalyzer(AudioFormat format, WatermarkConfiguration config) : base(format)
+ {
+ _config = config;
+
+ // Initialize PRNG with the exact same seed method as the Embedder
+ _rngState = WatermarkingUtils.GetStableHash(_config.Key);
+ if (_rngState == 0) _rngState = 0xCAFEBABE;
+
+ _syncShiftRegister = new bool[SyncSequence.Length];
+ }
+
+ ///
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
+ {
+ if (_state == ExtractorState.Complete) return;
+
+ for (var i = 0; i < buffer.Length; i += channels)
+ {
+ // Downmix to mono for analysis (simple average)
+ float monoSample = 0;
+ for (var c = 0; c < channels; c++) monoSample += buffer[i + c];
+ monoSample /= channels;
+
+ // Generate matched chip (must match embedder's sequence)
+ var nextFloat = WatermarkingUtils.NextFloat(_rngState);
+ _rngState = nextFloat.CurrentState;
+ var chip = nextFloat.NextFloat > 0.5f ? 1.0f : -1.0f;
+
+ // Audio below the embedding threshold (-50dB) is pure noise, ignore it.
+ if (Math.Abs(monoSample) > 0.003f)
+ {
+ // Only accumulate if there is actual signal energy
+ _currentBitCorrelation += monoSample * chip;
+ }
+ _samplesAccumulated++;
+
+ // End of a bit period
+ if (_samplesAccumulated >= _config.SpreadFactor)
+ {
+ var bit = _currentBitCorrelation > 0;
+ ProcessExtractedBit(bit);
+
+ // Reset for next bit
+ _currentBitCorrelation = 0;
+ _samplesAccumulated = 0;
+ }
+ }
+ }
+
+ private void ProcessExtractedBit(bool bit)
+ {
+ switch (_state)
+ {
+ case ExtractorState.SearchingForSync:
+ // Shift bits into register
+ Array.Copy(_syncShiftRegister, 1, _syncShiftRegister, 0, _syncShiftRegister.Length - 1);
+ _syncShiftRegister[^1] = bit;
+
+ // Check if register matches SyncSequence
+ if (CheckSyncMatch())
+ {
+ _state = ExtractorState.ExtractingPayload;
+ Log.Info("Watermark Sync Sequence Detected. Starting Payload Extraction.");
+ }
+ break;
+
+ case ExtractorState.ExtractingPayload:
+ _extractedBits.Add(bit);
+ break;
+ }
+ }
+
+ private bool CheckSyncMatch()
+ {
+ // Compare the shift register with the expected sync sequence.
+ var errors = 0;
+ const int maxErrors = 3; // Allow up to 3 bit errors tolerance in the 16-bit sync word (approx 20% BER tolerance on sync) for robustness against noise.
+
+ for (var i = 0; i < SyncSequence.Length; i++)
+ {
+ if (_syncShiftRegister[i] != SyncSequence[i])
+ {
+ errors++;
+ }
+ }
+
+ return errors <= maxErrors;
+ }
+
+ ///
+ /// Finalizes extraction and attempts to parse the collected payload bits.
+ /// This should be called when the audio stream ends.
+ ///
+ public void Finish()
+ {
+ if (_state != ExtractorState.ExtractingPayload || _extractedBits.Count <= 0) return;
+ var payloadBits = new BitArray(_extractedBits.ToArray());
+ PayloadExtracted?.Invoke(payloadBits);
+ _state = ExtractorState.Complete;
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/AudioEncryptor.cs b/Src/Security/AudioEncryptor.cs
new file mode 100644
index 0000000..6429ee9
--- /dev/null
+++ b/Src/Security/AudioEncryptor.cs
@@ -0,0 +1,548 @@
+using System.Buffers;
+using System.Security.Cryptography;
+using System.Text;
+using SoundFlow.Interfaces;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Providers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Containers;
+using SoundFlow.Security.Modifiers;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security;
+
+///
+/// Provides high-level methods to encrypt and decrypt audio using the format.
+///
+public static class AudioEncryptor
+{
+ private const int BufferSize = 8192; // 8KB buffer
+
+ ///
+ /// Asynchronously reads audio from the source provider, encrypts it, and writes it to the destination stream.
+ /// Uses streaming to support large files without high memory usage.
+ /// Optionally signs the destination stream. If is true, the signature is embedded in the header; otherwise, it is returned.
+ ///
+ /// The source audio provider.
+ /// The output stream. If signing is requested, it must be readable and seekable.
+ /// The encryption configuration.
+ /// Configuration to sign the data. If null, no signing occurs.
+ /// If true and is present, the signature is embedded into the container header.
+ ///
+ /// A task containing the Base64 signature string if signing was performed and was false.
+ /// Returns null if no signing occurred or if the signature was embedded.
+ ///
+ public static async Task EncryptAsync(
+ ISoundDataProvider source,
+ Stream destinationStream,
+ EncryptionConfiguration config,
+ SignatureConfiguration? signingConfig = null,
+ bool embedSignature = false)
+ {
+ var format = new AudioFormat
+ {
+ SampleRate = source.SampleRate,
+ Channels = source.FormatInfo?.ChannelCount ?? 2,
+ Format = source.SampleFormat,
+ Layout = AudioFormat.GetLayoutFromChannels(source.FormatInfo?.ChannelCount ?? 2)
+ };
+
+ var shouldSign = signingConfig != null;
+ if (shouldSign && (!destinationStream.CanSeek || !destinationStream.CanRead))
+ {
+ Log.Warning("Skipping signature generation: Destination stream is not seekable or readable.");
+ shouldSign = false;
+ }
+
+ // 1. Write Header (with placeholder if embedding)
+ var sigBlockOffset = SecureAudioContainer.WriteHeader(destinationStream, config, format, embedSignature && shouldSign);
+
+ // 2. Prepare source
+ if (source.CanSeek) source.Seek(0);
+ using var modifier = new StreamEncryptionModifier(config) { Enabled = true };
+
+ var sampleBuffer = ArrayPool.Shared.Rent(BufferSize);
+ var byteBuffer = ArrayPool.Shared.Rent(BufferSize * sizeof(float));
+
+ try
+ {
+ while (true)
+ {
+ // Process the next block of audio synchronously
+ var bytesAvailable = ProcessNextBlock(source, modifier, sampleBuffer, byteBuffer, format.Channels);
+ if (bytesAvailable == 0) break;
+
+ // Write the encrypted block to the destination stream asynchronously
+ await destinationStream.WriteAsync(byteBuffer.AsMemory(0, bytesAvailable));
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(sampleBuffer);
+ ArrayPool.Shared.Return(byteBuffer);
+ }
+
+ // 3. Perform Signing if requested
+ if (shouldSign)
+ {
+ await destinationStream.FlushAsync();
+ destinationStream.Seek(0, SeekOrigin.Begin); // Rewind for full stream hashing
+
+ // If embedding, the current file state has zeros in the signature block.
+ // This is exactly what we want to hash.
+ var sigResult = await FileAuthenticator.SignStreamAsync(destinationStream, signingConfig!);
+
+ if (sigResult.IsFailure)
+ {
+ Log.Error($"Failed to generate signature: {sigResult.Error?.Message}");
+ return null;
+ }
+
+ var signatureBase64 = sigResult.Value;
+
+ if (embedSignature && sigBlockOffset >= 0 && signatureBase64 != null)
+ {
+ // Patch the header with the signature
+ var sigBytes = Convert.FromBase64String(signatureBase64);
+ if (sigBytes.Length + 4 > SecureAudioContainer.MaxSignatureSize)
+ {
+ Log.Error("Generated signature is too large for the reserved container space.");
+ return null;
+ }
+
+ destinationStream.Seek(sigBlockOffset, SeekOrigin.Begin);
+ await using var writer = new BinaryWriter(destinationStream, Encoding.UTF8, true);
+ writer.Write(sigBytes.Length);
+ writer.Write(sigBytes);
+ writer.Flush();
+
+ // Return to end of stream
+ destinationStream.Seek(0, SeekOrigin.End);
+ return null; // Signature is embedded
+ }
+
+ // Return detached signature
+ destinationStream.Seek(0, SeekOrigin.End);
+ return signatureBase64;
+ }
+
+ return null;
+
+ // Read bytes from the source provider and encrypt them in-place, method isolated to allow using spans in async context
+ static int ProcessNextBlock(ISoundDataProvider provider, StreamEncryptionModifier encryptionModifier, float[] floatArr, byte[] byteArr, int channelCount)
+ {
+ // Create Spans here. Since this method is not async, this is perfectly valid.
+ var spanFloat = floatArr.AsSpan();
+
+ var samplesRead = provider.ReadBytes(spanFloat);
+ if (samplesRead == 0) return 0;
+
+ var validSlice = spanFloat[..samplesRead];
+
+ // Encrypt in-place
+ encryptionModifier.Process(validSlice, channelCount);
+
+ // Copy to byte buffer
+ var bytesToWrite = samplesRead * sizeof(float);
+ Buffer.BlockCopy(floatArr, 0, byteArr, 0, bytesToWrite);
+
+ return bytesToWrite;
+ }
+ }
+
+ ///
+ /// Reads audio from the source provider, encrypts it, and writes it to the destination file path.
+ /// Optionally signs the result (either embedded or detached).
+ ///
+ public static async Task EncryptAsync(
+ ISoundDataProvider source,
+ string destinationPath,
+ EncryptionConfiguration config,
+ SignatureConfiguration? signingConfig = null,
+ bool embedSignature = false)
+ {
+ await using var fileStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write, FileShare.Read);
+ var detachedSignature = await EncryptAsync(source, fileStream, config, signingConfig, embedSignature);
+ if (detachedSignature != null)
+ {
+ await File.WriteAllTextAsync(destinationPath + ".sig", detachedSignature);
+ }
+ }
+
+ ///
+ /// Reads an encrypted stream and returns a provider that streams the decrypted audio.
+ /// The source stream remains open and its ownership is transferred to the returned provider,
+ /// which will dispose of the stream when it is disposed.
+ ///
+ /// The stream containing the secure audio data.
+ /// The decryption key.
+ /// A result containing a streaming provider, or an error.
+ public static Result Decrypt(Stream sourceStream, byte[] key)
+ {
+ try
+ {
+ var headerResult = SecureAudioContainer.ReadHeader(sourceStream);
+
+ if (headerResult.IsFailure)
+ {
+ sourceStream.Dispose();
+ return Result.Fail(headerResult.Error!);
+ }
+
+ var (format, iv, dataOffset, embeddedSigBytes, _) = headerResult.Value;
+
+ if (embeddedSigBytes != null)
+ {
+ Log.Warning($"This file contains a digital signature that is being ignored. Use {nameof(VerifyAndDecryptAsync)} for authenticated decryption.");
+ }
+
+ var config = new EncryptionConfiguration { Key = key, Iv = iv };
+
+ // The DecryptionStream takes ownership of the sourceStream.
+ var cryptoStream = new DecryptionStream(sourceStream, config, dataOffset);
+
+ // Wrap in RawDataProvider as the stream produces raw PCM data.
+ var provider = new RawDataProvider(cryptoStream, format.Format, format.SampleRate)
+ {
+ FormatInfo = new SoundFormatInfo
+ {
+ FormatName = "Decrypted Audio",
+ FormatIdentifier = "raw",
+ SampleRate = format.SampleRate,
+ ChannelCount = format.Channels,
+ IsLossless = true
+ }
+ };
+
+ return Result.Ok(provider);
+ }
+ catch (Exception ex)
+ {
+ // Ensure the stream is disposed on any failure during initialization.
+ sourceStream.Dispose();
+ return ex switch
+ {
+ ObjectDisposedException => new ObjectDisposedError("sourceStream"),
+ CryptographicException cryptoEx =>
+ new ValidationError("Invalid cryptographic configuration. The provided key may have an incorrect size.", cryptoEx),
+ ArgumentException argEx =>
+ new ValidationError($"Invalid argument during decryption initialization: {argEx.Message}", argEx),
+ IOException ioEx =>
+ new IoError("Initializing decryption stream", ioEx),
+ _ => new InvalidOperationError("An unexpected error occurred while initializing the decryption provider.", ex)
+ };
+ }
+ }
+
+ ///
+ /// Opens an encrypted file and returns a provider that streams the decrypted audio.
+ /// The file remains locked until the provider is disposed.
+ ///
+ /// The path to the secure audio file.
+ /// The decryption key.
+ /// A result containing a streaming provider, or an error.
+ public static Result Decrypt(string sourceFilePath, byte[] key)
+ {
+ if (!File.Exists(sourceFilePath))
+ return new NotFoundError("File", $"The specified file was not found: '{sourceFilePath}'.");
+
+ try
+ {
+ var fileStream = new FileStream(sourceFilePath, FileMode.Open, FileAccess.Read);
+ return Decrypt(fileStream, key);
+ }
+ catch (Exception ex)
+ {
+ return ex switch
+ {
+ UnauthorizedAccessException =>
+ new AccessDeniedError(sourceFilePath),
+ DirectoryNotFoundException =>
+ new NotFoundError("Directory", $"The directory for the specified path was not found: '{sourceFilePath}'."),
+ PathTooLongException or ArgumentException =>
+ new ValidationError($"The file path is invalid: '{sourceFilePath}'.", ex),
+ NotSupportedException nsEx =>
+ new ValidationError($"The file path format is not supported: '{sourceFilePath}'.", nsEx),
+ IOException ioEx =>
+ new IoError($"opening the file '{sourceFilePath}'", ioEx),
+ _ => new HostError($"An unexpected OS error occurred when opening '{sourceFilePath}'.", ex)
+ };
+ }
+ }
+
+ ///
+ /// Opens an encrypted file, verifies its authenticity (embedded or detached), and returns a decryption provider.
+ ///
+ /// The path to the secure audio file.
+ /// The decryption key.
+ /// The configuration containing the Public Key.
+ /// Optional detached signature. If null, the method looks for an embedded signature.
+ /// A result containing the decrypted provider if verification succeeds.
+ public static async Task> VerifyAndDecryptAsync(
+ string sourceFilePath,
+ byte[] key,
+ SignatureConfiguration signingConfig,
+ string? detachedSignature = null)
+ {
+ if (!File.Exists(sourceFilePath))
+ return new NotFoundError("File", $"File not found: {sourceFilePath}");
+
+ try
+ {
+ var fileStream = new FileStream(sourceFilePath, FileMode.Open, FileAccess.Read, FileShare.Read);
+ var result = await VerifyAndDecryptAsync(fileStream, key, signingConfig, detachedSignature);
+
+ if (result.IsFailure)
+ await fileStream.DisposeAsync();
+
+ return result;
+ }
+ catch (Exception ex)
+ {
+ return new IoError($"opening file '{sourceFilePath}'", ex);
+ }
+ }
+
+ ///
+ /// Verifies the authenticity of an encrypted stream and returns a decryption provider.
+ /// Handles both embedded signatures (by masking the signature block with zeros) and detached signatures.
+ ///
+ public static async Task> VerifyAndDecryptAsync(
+ Stream sourceStream,
+ byte[] key,
+ SignatureConfiguration signingConfig,
+ string? detachedSignature = null)
+ {
+ if (!sourceStream.CanSeek)
+ return new InvalidOperationError("Stream must be seekable for verification.");
+
+ // 1. Read Header to find embedded signature info
+ var headerStart = sourceStream.Position;
+ var headerResult = SecureAudioContainer.ReadHeader(sourceStream);
+
+ if (headerResult.IsFailure)
+ return Result.Fail(headerResult.Error!);
+
+ var (format, iv, dataOffset, embeddedSigBytes, sigOffset) = headerResult.Value;
+
+ // 2. Determine which signature to use
+ var signatureToVerify = detachedSignature;
+ var isEmbedded = false;
+
+ if (string.IsNullOrEmpty(detachedSignature) && embeddedSigBytes != null)
+ {
+ signatureToVerify = Convert.ToBase64String(embeddedSigBytes);
+ isEmbedded = true;
+ }
+
+ if (string.IsNullOrEmpty(signatureToVerify))
+ return new ValidationError("No signature found (neither embedded nor detached provided).");
+
+ // 3. Verification
+ sourceStream.Seek(headerStart, SeekOrigin.Begin);
+ Result verifyResult;
+
+ if (isEmbedded && sigOffset >= 0)
+ {
+ // We must verify using a stream that "sees" zeros where the signature currently is.
+ await using var zeroingStream = new ZeroingStream(sourceStream, sigOffset, SecureAudioContainer.MaxSignatureSize);
+ verifyResult = await FileAuthenticator.VerifyStreamAsync(zeroingStream, signatureToVerify, signingConfig);
+ }
+ else
+ {
+ // Standard verification
+ verifyResult = await FileAuthenticator.VerifyStreamAsync(sourceStream, signatureToVerify, signingConfig);
+ }
+
+ if (verifyResult.IsFailure)
+ return Result.Fail(verifyResult.Error!);
+
+ if (!verifyResult.Value)
+ return new ValidationError("Integrity check failed. Signature mismatch.");
+
+ // 4. Setup Decryption
+ // Reset stream to data start
+ sourceStream.Seek(dataOffset, SeekOrigin.Begin);
+
+ var encryptionConfig = new EncryptionConfiguration { Key = key, Iv = iv };
+ var cryptoStream = new DecryptionStream(sourceStream, encryptionConfig, dataOffset);
+
+ var provider = new RawDataProvider(cryptoStream, format.Format, format.SampleRate)
+ {
+ FormatInfo = new SoundFormatInfo
+ {
+ FormatName = "Decrypted Audio",
+ FormatIdentifier = "raw",
+ SampleRate = format.SampleRate,
+ ChannelCount = format.Channels,
+ IsLossless = true
+ }
+ };
+
+ return Result.Ok(provider);
+ }
+
+ ///
+ /// A stream wrapper that zeroes out a specific range of bytes during read.
+ /// Used to simulate the "pre-signed" state of the file header during verification.
+ ///
+ private sealed class ZeroingStream(Stream baseStream, long zeroStart, int zeroLength) : Stream
+ {
+ private readonly long _zeroEnd = zeroStart + zeroLength;
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ var bytesRead = baseStream.Read(buffer, offset, count);
+ if (bytesRead == 0) return 0;
+
+ var currentPos = baseStream.Position - bytesRead;
+ var endPos = baseStream.Position;
+
+ // Check intersection with zero region
+ if (endPos > zeroStart && currentPos < _zeroEnd)
+ {
+ var overlapStart = Math.Max(currentPos, zeroStart);
+ var overlapEnd = Math.Min(endPos, _zeroEnd);
+ var lengthToZero = (int)(overlapEnd - overlapStart);
+ var bufferOffset = offset + (int)(overlapStart - currentPos);
+
+ Array.Clear(buffer, bufferOffset, lengthToZero);
+ }
+
+ return bytesRead;
+ }
+
+ public override int Read(Span buffer)
+ {
+ var bytesRead = baseStream.Read(buffer);
+ if (bytesRead == 0) return 0;
+
+ var currentPos = baseStream.Position - bytesRead;
+ var endPos = baseStream.Position;
+
+ if (endPos > zeroStart && currentPos < _zeroEnd)
+ {
+ var overlapStart = Math.Max(currentPos, zeroStart);
+ var overlapEnd = Math.Min(endPos, _zeroEnd);
+ var lengthToZero = (int)(overlapEnd - overlapStart);
+ var bufferOffset = (int)(overlapStart - currentPos);
+
+ buffer.Slice(bufferOffset, lengthToZero).Clear();
+ }
+
+ return bytesRead;
+ }
+
+ // Passthrough members
+ public override bool CanRead => baseStream.CanRead;
+ public override bool CanSeek => baseStream.CanSeek;
+ public override bool CanWrite => false;
+ public override long Length => baseStream.Length;
+ public override long Position { get => baseStream.Position; set => baseStream.Position = value; }
+ public override void Flush() => baseStream.Flush();
+ public override long Seek(long offset, SeekOrigin origin) => baseStream.Seek(offset, origin);
+ public override void SetLength(long value) => throw new NotSupportedException();
+ public override void Write(byte[] buffer, int offset, int count) => throw new NotSupportedException();
+ }
+
+ ///
+ /// Internal stream wrapper that decrypts data on-the-fly during Read operations.
+ /// Supports seeking by recalculating AES-CTR state.
+ ///
+ private sealed class DecryptionStream : Stream
+ {
+ private readonly Stream _baseStream;
+ private readonly StreamEncryptionModifier _modifier;
+ private readonly long _dataOffset;
+
+ public DecryptionStream(Stream baseStream, EncryptionConfiguration config, long dataOffset)
+ {
+ _baseStream = baseStream;
+ _dataOffset = dataOffset;
+ _modifier = new StreamEncryptionModifier(config) { Enabled = true };
+
+ // Ensure stream is positioned at the start of the audio data.
+ if (_baseStream.Position != _dataOffset)
+ _baseStream.Seek(_dataOffset, SeekOrigin.Begin);
+ }
+
+ public override int Read(byte[] buffer, int offset, int count)
+ {
+ var read = _baseStream.Read(buffer, offset, count);
+ if (read > 0)
+ {
+ // Decrypt the data that was just read, in-place.
+ _modifier.ProcessBytes(buffer.AsSpan(offset, read));
+ }
+
+ return read;
+ }
+
+ public override int Read(Span buffer)
+ {
+ var read = _baseStream.Read(buffer);
+ if (read > 0)
+ {
+ _modifier.ProcessBytes(buffer[..read]);
+ }
+
+ return read;
+ }
+
+ public override long Seek(long offset, SeekOrigin origin)
+ {
+ // Calculate the absolute target position in the base stream.
+ var targetPos = origin switch
+ {
+ SeekOrigin.Begin => _dataOffset + offset,
+ SeekOrigin.Current => _baseStream.Position + offset,
+ SeekOrigin.End => _baseStream.Length + offset,
+ _ => throw new ArgumentOutOfRangeException(nameof(origin))
+ };
+
+ // Clamp the position to the valid range of audio data.
+ if (targetPos < _dataOffset) targetPos = _dataOffset;
+ if (targetPos > _baseStream.Length) targetPos = _baseStream.Length;
+
+ // Perform the file seek
+ _baseStream.Seek(targetPos, SeekOrigin.Begin);
+
+ // Determine the relative offset from the start of the audio data.
+ var relativeOffset = targetPos - _dataOffset;
+
+ // Re-synchronize the AES-CTR modifier to this exact byte offset.
+ _modifier.SeekTo(relativeOffset);
+
+ return relativeOffset;
+ }
+
+ public override void SetLength(long value) => throw new NotSupportedException("The stream is read-only.");
+
+ public override void Write(byte[] buffer, int offset, int count) =>
+ throw new NotSupportedException("The stream is read-only.");
+
+ public override void Flush() => _baseStream.Flush();
+ public override bool CanRead => true;
+ public override bool CanSeek => true;
+ public override bool CanWrite => false;
+ public override long Length => _baseStream.Length - _dataOffset;
+
+ public override long Position
+ {
+ get => _baseStream.Position - _dataOffset;
+ set => Seek(value, SeekOrigin.Begin);
+ }
+
+ protected override void Dispose(bool disposing)
+ {
+ if (disposing)
+ {
+ _modifier.Dispose();
+ _baseStream.Dispose();
+ }
+
+ base.Dispose(disposing);
+ }
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/AudioIdentifier.cs b/Src/Security/AudioIdentifier.cs
new file mode 100644
index 0000000..55d392e
--- /dev/null
+++ b/Src/Security/AudioIdentifier.cs
@@ -0,0 +1,159 @@
+using System.Diagnostics;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
+using SoundFlow.Security.Analyzers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Models;
+using SoundFlow.Security.Stores;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security;
+
+///
+/// Provides high-level functionality to identify audio content using fingerprints.
+///
+public static class AudioIdentifier
+{
+ ///
+ /// Generates a fingerprint for the provided audio data by processing the entire stream immediately.
+ ///
+ /// The audio data provider.
+ /// Optional configuration.
+ /// A generated .
+ public static AudioFingerprint GenerateFingerprint(ISoundDataProvider provider, FingerprintConfiguration? config = null)
+ {
+ config ??= new FingerprintConfiguration();
+
+ var analyzer = new ContentFingerprintAnalyzer(
+ new AudioFormat
+ {
+ SampleRate = provider.SampleRate,
+ Channels = provider.FormatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(provider.FormatInfo?.ChannelCount ?? 2)
+ },
+ config);
+
+ // Process audio in blocks
+ const int blockSize = 4096;
+ var buffer = new float[blockSize];
+
+ if (provider.CanSeek) provider.Seek(0);
+
+ while (true)
+ {
+ var read = provider.ReadBytes(buffer);
+ if (read == 0) break;
+
+ analyzer.Process(buffer.AsSpan(0, read), provider.FormatInfo?.ChannelCount ?? 2);
+ }
+
+ var hashes = analyzer.GetGeneratedHashes();
+ var duration = provider.Length / (double)(provider.SampleRate * (provider.FormatInfo?.ChannelCount ?? 2));
+
+ return new AudioFingerprint
+ {
+ Hashes = hashes.ToList(),
+ DurationSeconds = duration
+ };
+ }
+
+ ///
+ /// Attempts to identify the audio content provided by the data provider by matching it against the store.
+ /// Uses a histogram of time-deltas to find the best alignment.
+ ///
+ /// The query audio provider.
+ /// The fingerprint store to search.
+ /// Optional configuration.
+ /// A detailing the match.
+ public static async Task> IdentifyAsync(ISoundDataProvider provider, IFingerprintStore store, FingerprintConfiguration? config = null)
+ {
+ var sw = Stopwatch.StartNew();
+ config ??= new FingerprintConfiguration();
+
+ // 1. Generate Fingerprint for Query
+ var queryFingerprint = GenerateFingerprint(provider, config);
+ var queryTotalHashes = queryFingerprint.Hashes.Count;
+
+ if (queryTotalHashes == 0)
+ {
+ return Result.Fail(new NotFoundError("Fingerprint", "No hashes found in query audio."));
+ }
+
+ // 2. Query Store for Matches
+ // Map: TrackId -> Dictionary
+ // Ideally, TimeDelta = DatabaseTime - QueryTime should be constant for a matching track.
+ var timeDeltaHistograms = new Dictionary>();
+
+ foreach (var queryHash in queryFingerprint.Hashes)
+ {
+ var matches = await store.QueryHashAsync(queryHash.Hash);
+
+ foreach (var match in matches)
+ {
+ if (!timeDeltaHistograms.TryGetValue(match.TrackId, out var histogram))
+ {
+ histogram = new Dictionary();
+ timeDeltaHistograms[match.TrackId] = histogram;
+ }
+
+ // Calculate relative time offset
+ var delta = match.TrackTimeOffset - queryHash.TimeOffset;
+
+ if (!histogram.TryAdd(delta, 1))
+ {
+ histogram[delta]++;
+ }
+ }
+ }
+
+ // 3. Score Matches
+ string? bestTrackId = null;
+ var bestScore = 0;
+ var bestDelta = 0;
+
+ foreach (var (trackId, histogram) in timeDeltaHistograms)
+ {
+ foreach (var bin in histogram)
+ {
+ if (bin.Value <= bestScore)
+ continue;
+
+ bestScore = bin.Value;
+ bestTrackId = trackId;
+ bestDelta = bin.Key;
+ }
+ }
+
+ sw.Stop();
+
+ // 4. Validate against threshold
+
+ // A. Absolute floor (MinConfidenceThreshold) to filter out very short/empty clips.
+ if (bestTrackId == null || bestScore < config.MinConfidenceThreshold) return Result.Fail(new NotFoundError("Fingerprint", $"No match found with a relative score above {config.MinConfidenceThreshold} (had {bestScore})."));
+ var relativeScore = (double)bestScore / queryTotalHashes;
+
+ // B. Relative floor (MinRelativeConfidence) to filter out random collisions in long/dense queries.
+ if (relativeScore >= config.MinRelativeConfidence)
+ {
+ // Calculate time offset in seconds: Frames * HopSize / SampleRate
+ var hopSize = config.FftSize / config.OverlapFactor;
+ var timeOffsetSeconds = (double)bestDelta * hopSize / provider.SampleRate;
+
+ Log.Debug($"Match: {bestTrackId} | Score: {bestScore} ({relativeScore:P2}) | Time: {timeOffsetSeconds:F2}s");
+
+ return Result.Ok(new FingerprintResult
+ {
+ TrackId = bestTrackId,
+ Confidence = bestScore,
+ MatchTimeSeconds = timeOffsetSeconds,
+ ProcessingTime = sw.Elapsed
+ });
+ }
+
+ Log.Info($"Rejected match '{bestTrackId}': Score {bestScore} is below relative threshold {config.MinRelativeConfidence:P0} (Actual: {relativeScore:P2}).");
+
+ return Result.Fail(new NotFoundError("Fingerprint", $"Relative score {relativeScore:P2} is below threshold {config.MinRelativeConfidence:P0}."));
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/AudioWatermarker.cs b/Src/Security/AudioWatermarker.cs
new file mode 100644
index 0000000..aff08ea
--- /dev/null
+++ b/Src/Security/AudioWatermarker.cs
@@ -0,0 +1,186 @@
+using System.Buffers;
+using System.Text;
+using SoundFlow.Enums;
+using SoundFlow.Interfaces;
+using SoundFlow.Security.Analyzers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Modifiers;
+using SoundFlow.Security.Payloads;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Security;
+
+///
+/// Provides high-level methods to apply and extract watermarks.
+///
+public static class AudioWatermarker
+{
+ private const int BufferSize = 8192;
+
+ ///
+ /// Embeds a text ownership watermark into an audio source and writes the result to a destination stream.
+ /// The output format is 32-bit Float WAV.
+ ///
+ /// The source audio provider.
+ /// The destination stream (must be writable and seekable to update headers).
+ /// The text to embed.
+ /// Configuration options.
+ /// Thrown if the destination stream does not support seeking.
+ public static void EmbedOwnershipWatermark(ISoundDataProvider source, Stream destination, string text,
+ WatermarkConfiguration config)
+ {
+ if (!destination.CanSeek || !destination.CanWrite)
+ throw new ArgumentException(
+ "Destination stream must be writable and seekable to generate a valid WAV container.",
+ nameof(destination));
+
+ var format = new AudioFormat
+ {
+ SampleRate = source.SampleRate,
+ Channels = source.FormatInfo?.ChannelCount ?? 2,
+ Format = SampleFormat.F32,
+ Layout = AudioFormat.GetLayoutFromChannels(source.FormatInfo?.ChannelCount ?? 2)
+ };
+
+ var payload = new TextPayload(text);
+ var embedder = new OwnershipWatermarkEmbedModifier(payload, config) { Enabled = true };
+ using var writer = new BinaryWriter(destination, Encoding.ASCII, true);
+
+ // 1. Write WAV Header placeholders
+ var startPos = destination.Position;
+ WriteWavHeader(writer, format.SampleRate, format.Channels, 0);
+
+ var dataChunkSizePos = destination.Position - 4; // Position of 'data' chunk size
+
+ // 2. Stream Process
+ if (source.CanSeek) source.Seek(0);
+
+ var floatBuffer = ArrayPool.Shared.Rent(BufferSize);
+ var byteBuffer = ArrayPool.Shared.Rent(BufferSize * sizeof(float));
+
+ long totalDataBytes = 0;
+
+ try
+ {
+ var spanFloat = floatBuffer.AsSpan();
+ while (true)
+ {
+ var samplesRead = source.ReadBytes(spanFloat);
+ if (samplesRead == 0) break;
+
+ var validSlice = spanFloat[..samplesRead];
+
+ // Apply watermark
+ embedder.Process(validSlice, format.Channels);
+
+ // Convert to bytes (F32 raw)
+ Buffer.BlockCopy(floatBuffer, 0, byteBuffer, 0, samplesRead * 4);
+
+ destination.Write(byteBuffer, 0, samplesRead * 4);
+ totalDataBytes += samplesRead * 4;
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(floatBuffer);
+ ArrayPool.Shared.Return(byteBuffer);
+ }
+
+ // 3. Update Header Sizes
+ var originalPos = destination.Position;
+
+ // Patch 'data' chunk size
+ destination.Seek(dataChunkSizePos, SeekOrigin.Begin);
+ writer.Write((uint)totalDataBytes);
+
+ // Patch 'RIFF' chunk size (File Length - 8)
+ destination.Seek(startPos + 4, SeekOrigin.Begin);
+ writer.Write((uint)(destination.Length - startPos - 8));
+
+ // Restore position
+ destination.Seek(originalPos, SeekOrigin.Begin);
+ }
+
+ ///
+ /// Wrapper to embed a watermark directly to a file path.
+ ///
+ /// The source audio provider.
+ /// The output file path.
+ /// The text to embed.
+ /// Configuration options.
+ public static void EmbedOwnershipWatermarkToFile(ISoundDataProvider source, string destinationPath, string text,
+ WatermarkConfiguration config)
+ {
+ using var fileStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write);
+ EmbedOwnershipWatermark(source, fileStream, text, config);
+ }
+
+ private static void WriteWavHeader(BinaryWriter writer, int sampleRate, int channels, int dataSize)
+ {
+ writer.Write("RIFF"u8.ToArray());
+ writer.Write(36 + dataSize); // Placeholder
+ writer.Write("WAVE"u8.ToArray());
+ writer.Write("fmt "u8.ToArray());
+ writer.Write(16); // Chunk size
+ writer.Write((short)3); // Format 3 = IEEE Float
+ writer.Write((short)channels);
+ writer.Write(sampleRate);
+ writer.Write(sampleRate * channels * 4); // ByteRate
+ writer.Write((short)(channels * 4)); // BlockAlign
+ writer.Write((short)32); // BitsPerSample
+ writer.Write("data"u8.ToArray());
+ writer.Write(dataSize); // Placeholder
+ }
+
+ ///
+ /// Attempts to extract a text payload from a watermarked audio source.
+ ///
+ public static async Task> ExtractOwnershipWatermarkAsync(ISoundDataProvider source,
+ WatermarkConfiguration config)
+ {
+ var format = new AudioFormat
+ {
+ SampleRate = source.SampleRate,
+ Channels = source.FormatInfo?.ChannelCount ?? 2,
+ Format = source.SampleFormat
+ };
+
+ var extractor = new OwnershipWatermarkExtractAnalyzer(format, config);
+ var payloadResult = new TaskCompletionSource();
+
+ // Hook up event
+ extractor.PayloadExtracted += (bits) =>
+ {
+ var payload = new TextPayload();
+ var text = payload.FromBits(bits) as string;
+ payloadResult.TrySetResult(text ?? string.Empty);
+ };
+
+ // Process audio
+ if (source.CanSeek) source.Seek(0);
+ var buffer = ArrayPool.Shared.Rent(BufferSize);
+
+ try
+ {
+ while (true)
+ {
+ var read = source.ReadBytes(buffer);
+ if (read == 0) break;
+
+ extractor.Process(buffer.AsSpan(0, read), format.Channels);
+
+ if (payloadResult.Task.IsCompleted) break;
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(buffer);
+ }
+
+ extractor.Finish();
+
+ return payloadResult.Task.IsCompleted
+ ? Result.Ok(await payloadResult.Task)
+ : Result.Fail(new Error("No watermark detected or payload incomplete."));
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Configuration/EncryptionConfiguration.cs b/Src/Security/Configuration/EncryptionConfiguration.cs
new file mode 100644
index 0000000..becbe94
--- /dev/null
+++ b/Src/Security/Configuration/EncryptionConfiguration.cs
@@ -0,0 +1,23 @@
+namespace SoundFlow.Security.Configuration;
+
+///
+/// Configuration settings for audio stream encryption.
+///
+public class EncryptionConfiguration
+{
+ ///
+ /// Gets or sets the encryption key. Must be 32 bytes (256 bits) for AES-256.
+ ///
+ public byte[] Key { get; set; } = [];
+
+ ///
+ /// Gets or sets the initialization vector (Nonce).
+ /// For AES-CTR, this is typically 12 bytes, with the last 4 bytes reserved for the counter.
+ ///
+ public byte[] Iv { get; set; } = [];
+
+ ///
+ /// Gets or sets a value indicating whether to compute an HMAC integrity tag during processing.
+ ///
+ public bool EnableIntegrityCheck { get; set; } = true;
+}
\ No newline at end of file
diff --git a/Src/Security/Configuration/FingerprintConfiguration.cs b/Src/Security/Configuration/FingerprintConfiguration.cs
new file mode 100644
index 0000000..f62811e
--- /dev/null
+++ b/Src/Security/Configuration/FingerprintConfiguration.cs
@@ -0,0 +1,66 @@
+namespace SoundFlow.Security.Configuration;
+
+///
+/// Configuration settings for the content fingerprint analyzer.
+///
+public class FingerprintConfiguration
+{
+ ///
+ /// Gets or sets the size of the FFT window. Must be a power of 2.
+ /// Default is 2048.
+ ///
+ public int FftSize { get; set; } = 2048;
+
+ ///
+ /// Gets or sets the overlap factor between analysis frames.
+ /// Default is 2 (50% overlap).
+ ///
+ public int OverlapFactor { get; set; } = 2;
+
+ ///
+ /// Gets or sets the minimum frequency to consider for peaks (Hz).
+ /// Default is 300Hz.
+ ///
+ public float MinFrequency { get; set; } = 300.0f;
+
+ ///
+ /// Gets or sets the maximum frequency to consider for peaks (Hz).
+ /// Default is 5000Hz.
+ ///
+ public float MaxFrequency { get; set; } = 5000.0f;
+
+ ///
+ /// Gets or sets the size of the target zone in time (frames) for combinatorial hashing.
+ /// Reducing this reduces hash density.
+ /// Default is 3 frames (reduced from 5 to optimize density).
+ ///
+ public int TargetZoneSize { get; set; } = 3;
+
+ ///
+ /// Gets or sets the minimum magnitude required for a spectral peak to be considered.
+ /// Normalized range 0.0 to 1.0.
+ /// Default is 0.01 (increased from 0.002 to reduce noise hashes).
+ ///
+ public double MinPeakMagnitude { get; set; } = 0.01;
+
+ ///
+ /// Gets or sets the multiplier for adaptive thresholding.
+ /// A peak must be this many times larger than the local average to be selected.
+ /// Default is 2.0 (increased from 1.5 to select only prominent peaks).
+ ///
+ public double AdaptiveThresholdMultiplier { get; set; } = 2.0;
+
+ ///
+ /// Gets or sets the minimum absolute number of aligned hashes required to declare a match.
+ /// Default is 25.
+ ///
+ public int MinConfidenceThreshold { get; set; } = 25;
+
+ ///
+ /// Gets or sets the minimum relative confidence score (Matched Hashes / Total Query Hashes).
+ /// Range 0.0 to 1.0.
+ /// A value of 0.05 (5%) implies that at least 5% of the query's hashes must match the target.
+ /// This effectively filters out random collisions in large queries.
+ ///
+ public double MinRelativeConfidence { get; set; } = 0.05;
+}
\ No newline at end of file
diff --git a/Src/Security/Configuration/SignatureConfiguration.cs b/Src/Security/Configuration/SignatureConfiguration.cs
new file mode 100644
index 0000000..a7dde55
--- /dev/null
+++ b/Src/Security/Configuration/SignatureConfiguration.cs
@@ -0,0 +1,52 @@
+using SoundFlow.Security.Utils;
+
+namespace SoundFlow.Security.Configuration;
+
+///
+/// Configuration settings for digital signature operations (signing and verification).
+///
+public class SignatureConfiguration
+{
+ ///
+ /// Gets or sets the PEM-encoded Private Key (PKCS#8).
+ /// Required for .
+ /// Never distribute this key with your application.
+ ///
+ public string? PrivateKeyPem { get; set; }
+
+ ///
+ /// Gets or sets the PEM-encoded Public Key (SubjectPublicKeyInfo).
+ /// Required for .
+ /// This key is safe to distribute.
+ ///
+ public string? PublicKeyPem { get; set; }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The PEM-encoded Private Key (PKCS#8).
+ /// The PEM-encoded Public Key (SubjectPublicKeyInfo).
+ public SignatureConfiguration(string privateKeyPem, string publicKeyPem)
+ {
+ PrivateKeyPem = privateKeyPem;
+ PublicKeyPem = publicKeyPem;
+ }
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ public SignatureConfiguration()
+ {
+ PrivateKeyPem = null;
+ PublicKeyPem = null;
+ }
+
+ ///
+ /// Generates a new key pair and returns a instance using .
+ ///
+ /// A instance with the generated key pair.
+ public static SignatureConfiguration Generate()
+ {
+ return SignatureKeyGenerator.Generate();
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Configuration/WatermarkConfiguration.cs b/Src/Security/Configuration/WatermarkConfiguration.cs
new file mode 100644
index 0000000..48fde1b
--- /dev/null
+++ b/Src/Security/Configuration/WatermarkConfiguration.cs
@@ -0,0 +1,34 @@
+namespace SoundFlow.Security.Configuration;
+
+///
+/// Configuration settings for audio watermarking operations.
+///
+public class WatermarkConfiguration
+{
+ ///
+ /// Gets or sets the secret key used to seed the pseudo-random number generator for spread spectrum watermarking.
+ /// Both the embedder and extractor must use the same key.
+ ///
+ public string Key { get; set; } = "DefaultSoundFlowKey";
+
+ ///
+ /// Gets or sets the embedding strength (Alpha).
+ /// Higher values make the watermark more robust but more audible.
+ /// Range: 0.001 (invisible) to 0.1 (audible hiss).
+ /// Default is 0.08 for a good balance of robustness and quality.
+ ///
+ public float Strength { get; set; } = 0.08f;
+
+ ///
+ /// Gets or sets the spread factor (Chip Rate).
+ /// Determines how many audio samples represent a single bit of data.
+ /// Higher values drastically increase robustness against noise and compression but decrease data rate.
+ /// Default is 16384 (approx 40 bits/sec at 44.1kHz).
+ ///
+ public int SpreadFactor { get; set; } = 16384;
+
+ ///
+ /// Gets or sets the block size for integrity watermarking hashing.
+ ///
+ public int IntegrityBlockSize { get; set; } = 4096;
+}
\ No newline at end of file
diff --git a/Src/Security/Containers/SecureAudioContainer.cs b/Src/Security/Containers/SecureAudioContainer.cs
new file mode 100644
index 0000000..945de09
--- /dev/null
+++ b/Src/Security/Containers/SecureAudioContainer.cs
@@ -0,0 +1,154 @@
+using System.Text;
+using SoundFlow.Enums;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Security.Containers;
+
+///
+/// A utility class for handling the metadata headers of encrypted audio containers.
+///
+public static class SecureAudioContainer
+{
+ private static readonly byte[] MagicHeader = "SFA_ENC"u8.ToArray();
+ private const int HeaderVersion = 1;
+
+ ///
+ /// Fixed size reserved for the signature block (Length + Data + Padding) if embedded.
+ /// 512 bytes is sufficient for ECDSA P-384 (approx 100-120 bytes) and future proofing.
+ ///
+ public const int MaxSignatureSize = 512;
+
+ ///
+ /// Container flags to indicate the presence of an embedded digital signature.
+ ///
+ [Flags]
+ public enum ContainerFlags : uint
+ {
+ ///
+ /// No flags set.
+ ///
+ None = 0,
+ ///
+ /// The container has an embedded digital signature.
+ ///
+ HasEmbeddedSignature = 1 << 0
+ }
+
+ ///
+ /// Writes the Secure Audio container header to the stream.
+ ///
+ /// The destination stream.
+ /// The encryption configuration containing the IV.
+ /// The format of the audio data.
+ /// Whether to reserve space for an embedded digital signature.
+ /// The byte offset where the signature block begins (if embedded), or -1.
+ public static long WriteHeader(Stream outputStream, EncryptionConfiguration config, AudioFormat originalFormat, bool embedSignature = false)
+ {
+ using var writer = new BinaryWriter(outputStream, Encoding.UTF8, true);
+
+ // Write Header Magic
+ writer.Write(MagicHeader);
+
+ // Write Version
+ writer.Write(HeaderVersion);
+
+ // Write Flags
+ var flags = embedSignature ? ContainerFlags.HasEmbeddedSignature : ContainerFlags.None;
+ writer.Write((uint)flags);
+
+ // Write Format Metadata
+ writer.Write(originalFormat.SampleRate);
+ writer.Write(originalFormat.Channels);
+ writer.Write((int)originalFormat.Format);
+
+ // Write Encryption Metadata
+ writer.Write(config.Iv.Length);
+ writer.Write(config.Iv);
+
+ // Write Signature Placeholder (if requested)
+ long sigOffset = -1;
+ if (embedSignature)
+ {
+ writer.Flush();
+ sigOffset = outputStream.Position;
+ // Write zeroed placeholder
+ writer.Write(new byte[MaxSignatureSize]);
+ }
+
+ return sigOffset;
+ }
+
+ ///
+ /// Reads the Secure Audio container header from the stream.
+ ///
+ /// The source stream.
+ ///
+ /// A result containing the audio format, the IV, the data start offset,
+ /// and optionally the extracted signature bytes and its file offset if present.
+ ///
+ public static Result<(AudioFormat Format, byte[] IV, long DataStartOffset, byte[]? Signature, long SigBlockOffset)> ReadHeader(Stream inputStream)
+ {
+ try
+ {
+ using var reader = new BinaryReader(inputStream, Encoding.UTF8, true);
+
+ // Verify Magic
+ var magic = reader.ReadBytes(MagicHeader.Length);
+ if (!magic.SequenceEqual(MagicHeader))
+ return new HeaderNotFoundError("Secure Audio Container Magic");
+
+ // Check Version
+ var version = reader.ReadInt32();
+ if (version != HeaderVersion)
+ return new UnsupportedFormatError($"Unknown Container Version: {version}");
+
+ // Read Flags
+ var flags = (ContainerFlags)reader.ReadUInt32();
+
+ // Read Format
+ var sampleRate = reader.ReadInt32();
+ var channels = reader.ReadInt32();
+ var format = (SampleFormat)reader.ReadInt32();
+
+ var audioFormat = new AudioFormat
+ {
+ SampleRate = sampleRate,
+ Channels = channels,
+ Format = format,
+ Layout = AudioFormat.GetLayoutFromChannels(channels)
+ };
+
+ // Read Encryption Metadata
+ var ivLength = reader.ReadInt32();
+ var iv = reader.ReadBytes(ivLength);
+
+ // Read Signature (if present)
+ byte[]? signature = null;
+ long sigBlockOffset = -1;
+
+ if (flags.HasFlag(ContainerFlags.HasEmbeddedSignature))
+ {
+ sigBlockOffset = inputStream.Position;
+ var sigBytesWithHeader = reader.ReadBytes(MaxSignatureSize);
+
+ if (sigBytesWithHeader.Length < 4)
+ return new CorruptChunkError("SignatureBlock", "Truncated signature block.");
+
+ // Parse inner length
+ var sigLen = BitConverter.ToInt32(sigBytesWithHeader, 0);
+ if (sigLen is > 0 and <= MaxSignatureSize - 4)
+ {
+ signature = new byte[sigLen];
+ Array.Copy(sigBytesWithHeader, 4, signature, 0, sigLen);
+ }
+ }
+
+ return (audioFormat, iv, inputStream.Position, signature, sigBlockOffset);
+ }
+ catch (Exception ex)
+ {
+ return new IoError("Failed to read container header.", ex);
+ }
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/FileAuthenticator.cs b/Src/Security/FileAuthenticator.cs
new file mode 100644
index 0000000..d9893f1
--- /dev/null
+++ b/Src/Security/FileAuthenticator.cs
@@ -0,0 +1,140 @@
+using System.Security.Cryptography;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+
+namespace SoundFlow.Security;
+
+///
+/// Provides methods to sign and verify files using ECDSA Digital Signatures.
+/// This ensures file authenticity and integrity at the binary container level.
+///
+public static class FileAuthenticator
+{
+ private const int BufferSize = 8192;
+
+ ///
+ /// Asynchronously calculates a digital signature for a specific file.
+ ///
+ /// The path to the file to sign.
+ /// The configuration containing the Private Key in PEM format.
+ /// A result containing the Base64-encoded signature string.
+ public static async Task> SignFileAsync(string filePath, SignatureConfiguration config)
+ {
+ if (string.IsNullOrWhiteSpace(config.PrivateKeyPem))
+ return new ValidationError("Private Key is required for signing.");
+
+ if (!File.Exists(filePath))
+ return new NotFoundError("File", $"File to sign not found: {filePath}");
+
+ try
+ {
+ await using var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, BufferSize, useAsync: true);
+ return await SignStreamAsync(fileStream, config);
+ }
+ catch (IOException ex)
+ {
+ return new IoError($"reading file '{filePath}' for signing", ex);
+ }
+ }
+
+ ///
+ /// Asynchronously calculates a digital signature for a data stream.
+ ///
+ /// The stream to sign. Must be readable.
+ /// The configuration containing the Private Key in PEM format.
+ /// A result containing the Base64-encoded signature string.
+ public static async Task> SignStreamAsync(Stream stream, SignatureConfiguration config)
+ {
+ if (string.IsNullOrWhiteSpace(config.PrivateKeyPem))
+ return new ValidationError("Private Key is required for signing.");
+
+ try
+ {
+ // Asynchronously compute the hash of the stream.
+ using var hashAlgorithm = SHA384.Create();
+ var dataHash = await hashAlgorithm.ComputeHashAsync(stream);
+
+ // Sign the computed hash.
+ using var ecdsa = ECDsa.Create();
+ ecdsa.ImportFromPem(config.PrivateKeyPem);
+ var signatureBytes = ecdsa.SignHash(dataHash);
+
+ return Convert.ToBase64String(signatureBytes);
+ }
+ catch (CryptographicException ex)
+ {
+ return new ValidationError("Invalid Private Key format or cryptographic error during signing.", ex);
+ }
+ catch (Exception ex)
+ {
+ return new Error("An unexpected error occurred during signing.", ex);
+ }
+ }
+
+ ///
+ /// Asynchronously verifies the authenticity of a file against a provided signature.
+ ///
+ /// The path to the file to verify.
+ /// The Base64-encoded signature to verify against.
+ /// The configuration containing the Public Key in PEM format.
+ /// A result containing true if the signature is valid, otherwise false.
+ public static async Task> VerifyFileAsync(string filePath, string signatureBase64, SignatureConfiguration config)
+ {
+ if (string.IsNullOrWhiteSpace(config.PublicKeyPem))
+ return new ValidationError("Public Key is required for verification.");
+
+ if (!File.Exists(filePath))
+ return new NotFoundError("File", $"File to verify not found: {filePath}");
+
+ try
+ {
+ await using var fileStream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, BufferSize, useAsync: true);
+ return await VerifyStreamAsync(fileStream, signatureBase64, config);
+ }
+ catch (IOException ex)
+ {
+ return new IoError($"reading file '{filePath}' for verification", ex);
+ }
+ }
+
+ ///
+ /// Asynchronously verifies the authenticity of a data stream against a provided signature.
+ ///
+ /// The stream to verify.
+ /// The Base64-encoded signature to verify against.
+ /// The configuration containing the Public Key in PEM format.
+ /// A result containing true if the signature is valid, otherwise false.
+ public static async Task> VerifyStreamAsync(Stream stream, string signatureBase64, SignatureConfiguration config)
+ {
+ if (string.IsNullOrWhiteSpace(config.PublicKeyPem))
+ return new ValidationError("Public Key is required for verification.");
+
+ if (string.IsNullOrWhiteSpace(signatureBase64))
+ return new ValidationError("Signature cannot be empty for verification.");
+
+ try
+ {
+ var signatureBytes = Convert.FromBase64String(signatureBase64);
+
+ // Asynchronously compute the hash of the stream.
+ using var hashAlgorithm = SHA384.Create();
+ var dataHash = await hashAlgorithm.ComputeHashAsync(stream);
+
+ // Verify the computed hash against the signature.
+ using var ecdsa = ECDsa.Create();
+ ecdsa.ImportFromPem(config.PublicKeyPem);
+ var isValid = ecdsa.VerifyHash(dataHash, signatureBytes);
+
+ return isValid;
+ }
+ catch (Exception ex)
+ {
+ return ex switch
+ {
+ FormatException formatEx => new ValidationError("The provided signature is not a valid Base64 string.", formatEx),
+ CryptographicException cryptEx => new ValidationError("Invalid Public Key format or cryptographic error during verification.", cryptEx),
+ _ => new Error("An unexpected error occurred during verification.", ex)
+ };
+ }
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Models/AudioFingerprint.cs b/Src/Security/Models/AudioFingerprint.cs
new file mode 100644
index 0000000..e75ddf9
--- /dev/null
+++ b/Src/Security/Models/AudioFingerprint.cs
@@ -0,0 +1,22 @@
+namespace SoundFlow.Security.Models;
+
+///
+/// Represents the complete acoustic fingerprint of an audio source.
+///
+public class AudioFingerprint
+{
+ ///
+ /// Gets or sets the unique identifier for the audio source.
+ ///
+ public string TrackId { get; set; } = Guid.NewGuid().ToString();
+
+ ///
+ /// Gets or sets the list of generated hashes.
+ ///
+ public List Hashes { get; set; } = [];
+
+ ///
+ /// Gets or sets the total duration of the analyzed audio in seconds.
+ ///
+ public double DurationSeconds { get; set; }
+}
\ No newline at end of file
diff --git a/Src/Security/Models/FingerprintHash.cs b/Src/Security/Models/FingerprintHash.cs
new file mode 100644
index 0000000..11f6f8a
--- /dev/null
+++ b/Src/Security/Models/FingerprintHash.cs
@@ -0,0 +1,8 @@
+namespace SoundFlow.Security.Models;
+
+///
+/// Represents a single feature point in an audio fingerprint.
+///
+/// The computed hash value representing the relationship between spectral peaks.
+/// The time offset (in analysis frames) where the anchor point of this hash occurs.
+public readonly record struct FingerprintHash(uint Hash, int TimeOffset);
\ No newline at end of file
diff --git a/Src/Security/Models/FingerprintMatchCandidate.cs b/Src/Security/Models/FingerprintMatchCandidate.cs
new file mode 100644
index 0000000..1fd97ef
--- /dev/null
+++ b/Src/Security/Models/FingerprintMatchCandidate.cs
@@ -0,0 +1,8 @@
+namespace SoundFlow.Security.Models;
+
+///
+/// Represents a raw candidate match retrieved from the fingerprint store.
+///
+/// The unique identifier of the track in the database.
+/// The time offset (in frames) where this hash occurs in the source track.
+public readonly record struct FingerprintMatchCandidate(string TrackId, int TrackTimeOffset);
\ No newline at end of file
diff --git a/Src/Security/Models/FingerprintResult.cs b/Src/Security/Models/FingerprintResult.cs
new file mode 100644
index 0000000..f67d955
--- /dev/null
+++ b/Src/Security/Models/FingerprintResult.cs
@@ -0,0 +1,28 @@
+namespace SoundFlow.Security.Models;
+
+///
+/// Represents the final result of an audio identification attempt.
+///
+public class FingerprintResult
+{
+ ///
+ /// Gets the unique identifier of the matched track.
+ ///
+ public string TrackId { get; init; } = string.Empty;
+
+ ///
+ /// Gets the confidence score of the match.
+ /// This represents the number of hashes that temporally aligned with the database.
+ ///
+ public int Confidence { get; init; }
+
+ ///
+ /// Gets the calculated time offset (in seconds) where the query audio starts within the matched track.
+ ///
+ public double MatchTimeSeconds { get; init; }
+
+ ///
+ /// Gets the total processing time for the identification.
+ ///
+ public TimeSpan ProcessingTime { get; init; }
+}
\ No newline at end of file
diff --git a/Src/Security/Modifiers/IntegrityWatermarkEmbedModifier.cs b/Src/Security/Modifiers/IntegrityWatermarkEmbedModifier.cs
new file mode 100644
index 0000000..ebe963e
--- /dev/null
+++ b/Src/Security/Modifiers/IntegrityWatermarkEmbedModifier.cs
@@ -0,0 +1,83 @@
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Utils;
+
+namespace SoundFlow.Security.Modifiers;
+
+///
+/// Embeds a fragile integrity watermark using Block-Chained LSB Steganography.
+/// This modifier calculates a Pearson hash of the current audio block and embeds it
+/// into the Least Significant Bits (LSB) of the subsequent block.
+///
+public sealed class IntegrityWatermarkEmbedModifier : SoundModifier
+{
+ private readonly WatermarkConfiguration _config;
+
+ private readonly float[] _currentBlock;
+ private int _blockIndex;
+ private byte _previousBlockHash;
+
+ ///
+ public override string Name { get; set; } = "Integrity Watermark Embedder";
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// Configuration settings containing the block size.
+ public IntegrityWatermarkEmbedModifier(WatermarkConfiguration config)
+ {
+ _config = config;
+ _currentBlock = new float[_config.IntegrityBlockSize];
+ }
+
+ ///
+ public override void Process(Span buffer, int channels)
+ {
+ if (!Enabled) return;
+
+ // Interleaved processing effectively treats multichannel audio as a single continuous stream. Any channel manipulation breaks the chain.
+ for (var i = 0; i < buffer.Length; i++)
+ {
+ var sample = buffer[i];
+
+ // 1. Embed the hash of the previous block into the first 8 samples of the current block, using the LSB of the mantissa.
+ if (_blockIndex < 8)
+ {
+ var bit = (_previousBlockHash >> _blockIndex) & 1;
+ sample = EmbedBit(sample, bit);
+ buffer[i] = sample;
+ }
+
+ // 2. Accumulate the potentially modified sample into the current block buffer.
+ _currentBlock[_blockIndex] = sample;
+ _blockIndex++;
+
+ // 3. Block Completed, Calculate hash for the next block.
+ if (_blockIndex >= _config.IntegrityBlockSize)
+ {
+ _previousBlockHash = WatermarkingUtils.CalculatePearsonHash(_currentBlock);
+ _blockIndex = 0;
+ }
+ }
+ }
+
+ ///
+ public override float ProcessSample(float sample, int channel) =>
+ throw new NotSupportedException("Use the block-based Process method.");
+
+ ///
+ /// Embeds a single bit into the LSB of the float's mantissa representation.
+ ///
+ private static float EmbedBit(float sample, int bit)
+ {
+ var intRep = BitConverter.SingleToInt32Bits(sample);
+
+ // Clear the Least Significant Bit
+ intRep &= ~1;
+
+ // OR in the data bit
+ intRep |= (bit & 1);
+
+ return BitConverter.Int32BitsToSingle(intRep);
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Modifiers/OwnershipWatermarkEmbedModifier.cs b/Src/Security/Modifiers/OwnershipWatermarkEmbedModifier.cs
new file mode 100644
index 0000000..f8c373d
--- /dev/null
+++ b/Src/Security/Modifiers/OwnershipWatermarkEmbedModifier.cs
@@ -0,0 +1,121 @@
+using System.Collections;
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Security.Payloads;
+using SoundFlow.Security.Utils;
+
+namespace SoundFlow.Security.Modifiers;
+
+///
+/// Embeds a robust, invisible watermark into the audio stream using Direct Sequence Spread Spectrum (DSSS).
+/// This modifier generates a pseudo-random noise sequence seeded by a secret key and modulates
+/// the payload bits onto this noise. The noise is then added to the audio signal.
+///
+public sealed class OwnershipWatermarkEmbedModifier : SoundModifier
+{
+ private readonly WatermarkConfiguration _config;
+ private readonly BitArray _payloadBits;
+
+ // PRNG State
+ private uint _rngState;
+
+ private int _currentBitIndex;
+ private int _currentChipIndex;
+ private bool _isComplete;
+
+ // Sync sequence (16 bits): 1010101011001100
+ private static readonly bool[] SyncSequence =
+ [true, false, true, false, true, false, true, false, true, true, false, false, true, true, false, false];
+
+ ///
+ public override string Name { get; set; } = "Ownership Watermark Embedder";
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The data to embed.
+ /// Configuration settings.
+ public OwnershipWatermarkEmbedModifier(IWatermarkPayload payload, WatermarkConfiguration config)
+ {
+ _config = config;
+
+ // Initialize PRNG with a stable hash of the key
+ _rngState = WatermarkingUtils.GetStableHash(_config.Key);
+ // Ensure non-zero seed
+ if (_rngState == 0) _rngState = 0xCAFEBABE;
+
+ // Construct payload
+ var dataBits = payload.ToBits();
+ _payloadBits = new BitArray(SyncSequence.Length + dataBits.Length);
+
+ for (var i = 0; i < SyncSequence.Length; i++)
+ _payloadBits[i] = SyncSequence[i];
+
+ for (var i = 0; i < dataBits.Length; i++)
+ _payloadBits[SyncSequence.Length + i] = dataBits[i];
+ }
+
+ ///
+ public override void Process(Span buffer, int channels)
+ {
+ if (_isComplete || !Enabled) return;
+
+ // Process audio frame-by-frame (interleaved)
+ for (var i = 0; i < buffer.Length; i += channels)
+ {
+ // 1. Generate deterministic chip (-1 or +1)
+ var nextFloat = WatermarkingUtils.NextFloat(_rngState);
+ _rngState = nextFloat.CurrentState;
+ var chip = nextFloat.NextFloat > 0.5f ? 1.0f : -1.0f;
+
+ // 2. Retrieve the current data bit (-1 for 0, +1 for 1).
+ var bit = _payloadBits[_currentBitIndex] ? 1.0f : -1.0f;
+
+ // 3. Apply to all channels
+ for (var c = 0; c < channels; c++)
+ {
+ var sample = buffer[i + c];
+ var magnitude = Math.Abs(sample);
+
+ float adaptiveFactor;
+
+ // If signal is below ~ -50dB (0.003), then protect fade-outs, silence, and reverb tails where watermark is obvious.
+ if (magnitude < 0.003f)
+ {
+ adaptiveFactor = 0.0f;
+ }
+ else
+ {
+ // Square the magnitude. Audio 1.0 (Loud) -> Factor 1.0; Audio 0.5 (Med) -> Factor 0.25. This drastically reduces watermark power as volume decreases.
+ adaptiveFactor = magnitude * magnitude;
+
+ // Prevent it from blowing up on clipped audio (> 0dB)
+ if (adaptiveFactor > 1.0f) adaptiveFactor = 1.0f;
+ }
+
+ // Apply the squared curve to the config strength
+ var effectiveStrength = _config.Strength * adaptiveFactor;
+
+ if (effectiveStrength > 0) buffer[i + c] += effectiveStrength * chip * bit;
+ }
+
+ // 4. Advance State
+ _currentChipIndex++;
+ if (_currentChipIndex >= _config.SpreadFactor)
+ {
+ _currentChipIndex = 0;
+ _currentBitIndex++;
+
+ if (_currentBitIndex >= _payloadBits.Length)
+ {
+ _isComplete = true;
+ break;
+ }
+ }
+ }
+ }
+
+ ///
+ public override float ProcessSample(float sample, int channel) =>
+ throw new NotSupportedException("Use block Process method.");
+}
\ No newline at end of file
diff --git a/Src/Security/Modifiers/StreamEncryptionModifier.cs b/Src/Security/Modifiers/StreamEncryptionModifier.cs
new file mode 100644
index 0000000..ef2a6e9
--- /dev/null
+++ b/Src/Security/Modifiers/StreamEncryptionModifier.cs
@@ -0,0 +1,200 @@
+using System.Buffers.Binary;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using System.Runtime.Intrinsics;
+using System.Runtime.Intrinsics.X86;
+using System.Security.Cryptography;
+using SoundFlow.Abstracts;
+using SoundFlow.Security.Configuration;
+using Aes = System.Security.Cryptography.Aes;
+
+namespace SoundFlow.Security.Modifiers;
+
+///
+/// A modifier that performs real-time AES-256-CTR encryption or decryption.
+///
+///
+///
+/// This implementation uses AES-CTR (Counter Mode).
+/// CTR mode is used because it transforms the block cipher into a stream cipher.
+/// This preserves the length of the data exactly (no padding), which is critical
+/// for maintaining the sample-count synchronization of the audio engine.
+///
+///
+public sealed class StreamEncryptionModifier : SoundModifier, IDisposable
+{
+ private readonly Aes _aes;
+ private readonly ICryptoTransform _ecbEncryptor;
+
+ private readonly byte[] _counterBlock;
+ private readonly byte[] _keyStreamBlock;
+ private readonly uint _initialCounter;
+ private int _keyStreamIndex;
+
+ ///
+ public override string Name { get; set; } = "AES-256-CTR Encryption";
+
+ ///
+ /// Initializes a new instance of the class.
+ ///
+ /// The encryption configuration containing the Key (32 bytes) and IV (12 or 16 bytes).
+ public StreamEncryptionModifier(EncryptionConfiguration config)
+ {
+ if (config.Key.Length != 32)
+ throw new ArgumentException("AES-256 requires a 32-byte key.", nameof(config));
+
+ if (config.Iv.Length != 12 && config.Iv.Length != 16)
+ throw new ArgumentException("AES-CTR recommends a 12-byte nonce or 16-byte IV.", nameof(config));
+
+ _aes = Aes.Create();
+ _aes.KeySize = 256;
+ _aes.Key = config.Key;
+ _aes.Mode = CipherMode.ECB; // CTR mode is implemented by encrypting a counter with ECB.
+ _aes.Padding = PaddingMode.None;
+
+ _ecbEncryptor = _aes.CreateEncryptor();
+
+ _counterBlock = new byte[16];
+ _keyStreamBlock = new byte[16];
+
+ var ivLength = Math.Min(16, config.Iv.Length);
+ Buffer.BlockCopy(config.Iv, 0, _counterBlock, 0, ivLength);
+
+ // Capture initial counter state (last 4 bytes, Big Endian) for seeking logic
+ // If IV is 12 bytes, the counter part is 0. If 16 bytes, it's the last 4 bytes.
+ _initialCounter = ivLength >= 16 ? BinaryPrimitives.ReadUInt32BigEndian(_counterBlock.AsSpan(12, 4)) : 0;
+
+ // Set index to 16 to force generation of a new keystream block on the first Process call.
+ _keyStreamIndex = 16;
+ }
+
+ ///
+ /// Resets the internal cryptographic state to a specific byte offset in the stream.
+ /// This allows for random access seeking within the encrypted stream.
+ ///
+ /// The absolute byte offset from the beginning of the data.
+ public void SeekTo(long byteOffset)
+ {
+ // 1. Calculate which 16-byte block we are in.
+ var blockIndex = byteOffset / 16;
+
+ // 2. Calculate the offset within that block.
+ _keyStreamIndex = (int)(byteOffset % 16);
+
+ // 3. Calculate the new counter value (Counter = InitialCounter + BlockIndex)
+ var newCounter = unchecked(_initialCounter + (uint)blockIndex);
+
+ // 4. Update the counter block (Bytes 12-15)
+ BinaryPrimitives.WriteUInt32BigEndian(_counterBlock.AsSpan(12, 4), newCounter);
+
+ // 5. If we are not aligned perfectly to a block start, we need to pre-generate
+ GenerateKeyStreamBlock(false);
+ }
+
+ ///
+ public override void Process(Span buffer, int channels)
+ {
+ if (!Enabled) return;
+
+ // Treat the float buffer as a raw byte stream for cryptographic operations.
+ var byteBuffer = MemoryMarshal.Cast(buffer);
+ ProcessBytes(byteBuffer);
+ }
+
+ ///
+ public override float ProcessSample(float sample, int channel) =>
+ throw new NotSupportedException("Use the block-based Process method for high-performance encryption.");
+
+ ///
+ /// Processes the byte buffer using SIMD instructions where possible.
+ ///
+ /// The data to encrypt/decrypt.
+ [MethodImpl(MethodImplOptions.AggressiveOptimization)]
+ public unsafe void ProcessBytes(Span data)
+ {
+ fixed (byte* pData = data)
+ fixed (byte* pKeyStream = _keyStreamBlock)
+ {
+ var i = 0;
+ var length = data.Length;
+
+ while (i < length)
+ {
+ // 1. If we exhausted the current keystream block, generate the next one.
+ if (_keyStreamIndex >= 16)
+ {
+ // Encrypt the current counter, then increment for the next round
+ GenerateKeyStreamBlock(incrementCounter: true);
+ _keyStreamIndex = 0;
+ }
+
+ // Determine number of bytes to process, limited by remaining data and current keystream.
+ var remainingInKeystream = 16 - _keyStreamIndex;
+ var remainingInData = length - i;
+ var bytesToProcess = Math.Min(remainingInKeystream, remainingInData);
+
+ // Use SIMD for full 16-byte blocks.
+ if (Vector128.IsHardwareAccelerated && bytesToProcess == 16)
+ {
+ var vData = Sse2.LoadVector128(pData + i);
+ var vKey = Sse2.LoadVector128(pKeyStream);
+ var vResult = Sse2.Xor(vData, vKey);
+ Sse2.Store(pData + i, vResult);
+
+ i += 16;
+ _keyStreamIndex += 16;
+ }
+ else
+ {
+ // Fallback for partial blocks or non-SIMD hardware.
+ for (var j = 0; j < bytesToProcess; j++)
+ {
+ pData[i] ^= pKeyStream[_keyStreamIndex];
+ i++;
+ _keyStreamIndex++;
+ }
+ }
+ }
+ }
+ }
+
+ ///
+ /// Generates the next 16 bytes of the keystream by encrypting the current counter value.
+ /// Increments the counter afterwards.
+ ///
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void GenerateKeyStreamBlock(bool incrementCounter)
+ {
+ // Encrypt counter to get keystream
+ _ecbEncryptor.TransformBlock(_counterBlock, 0, 16, _keyStreamBlock, 0);
+
+ if (incrementCounter)
+ {
+ IncrementCounterBigEndian();
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private void IncrementCounterBigEndian()
+ {
+ // Read the last 4 bytes as a UInt32 in Big Endian
+ var counterSpan = _counterBlock.AsSpan(12, 4);
+ var counterValue = BinaryPrimitives.ReadUInt32BigEndian(counterSpan);
+
+ // Standard wrapping overflow is the correct behavior for CTR mode.
+ unchecked
+ {
+ counterValue++;
+ }
+
+ // Write back
+ BinaryPrimitives.WriteUInt32BigEndian(counterSpan, counterValue);
+ }
+
+ ///
+ public void Dispose()
+ {
+ _aes.Dispose();
+ _ecbEncryptor.Dispose();
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Payloads/IWatermarkPayload.cs b/Src/Security/Payloads/IWatermarkPayload.cs
new file mode 100644
index 0000000..27b2b87
--- /dev/null
+++ b/Src/Security/Payloads/IWatermarkPayload.cs
@@ -0,0 +1,22 @@
+using System.Collections;
+
+namespace SoundFlow.Security.Payloads;
+
+///
+/// Defines the contract for data that can be embedded into an audio watermark.
+///
+public interface IWatermarkPayload
+{
+ ///
+ /// Converts the high-level payload data into a bit array for embedding.
+ ///
+ /// A representing the payload.
+ BitArray ToBits();
+
+ ///
+ /// Reconstructs the high-level payload data from a bit array.
+ ///
+ /// The extracted bits.
+ /// The reconstructed object (e.g., a string or byte array).
+ object? FromBits(BitArray bits);
+}
\ No newline at end of file
diff --git a/Src/Security/Payloads/TextPayload.cs b/Src/Security/Payloads/TextPayload.cs
new file mode 100644
index 0000000..d01d2da
--- /dev/null
+++ b/Src/Security/Payloads/TextPayload.cs
@@ -0,0 +1,49 @@
+using System.Collections;
+using System.Text;
+
+namespace SoundFlow.Security.Payloads;
+
+///
+/// A watermark payload representing a text string.
+///
+/// The text to embed.
+public class TextPayload(string text) : IWatermarkPayload
+{
+ ///
+ /// Gets the text content of the payload.
+ ///
+ public string Text { get; } = text;
+
+ ///
+ /// Helper constructor for deserialization or empty initialization.
+ ///
+ public TextPayload() : this(string.Empty) { }
+
+ ///
+ public BitArray ToBits()
+ {
+ var bytes = Encoding.UTF8.GetBytes(Text);
+ // Prefix with length (4 bytes integer)
+ var lengthBytes = BitConverter.GetBytes(bytes.Length);
+ var combined = new byte[lengthBytes.Length + bytes.Length];
+
+ Buffer.BlockCopy(lengthBytes, 0, combined, 0, lengthBytes.Length);
+ Buffer.BlockCopy(bytes, 0, combined, lengthBytes.Length, bytes.Length);
+
+ return new BitArray(combined);
+ }
+
+ ///
+ public object FromBits(BitArray bits)
+ {
+ var bytes = new byte[(bits.Length + 7) / 8];
+ bits.CopyTo(bytes, 0);
+
+ if (bytes.Length < 4) return string.Empty;
+
+ var length = BitConverter.ToInt32(bytes, 0);
+ if (length < 0 || length > bytes.Length - 4) return string.Empty;
+
+ return Encoding.UTF8.GetString(bytes, 4, length);
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Stores/IFingerprintStore.cs b/Src/Security/Stores/IFingerprintStore.cs
new file mode 100644
index 0000000..6cff336
--- /dev/null
+++ b/Src/Security/Stores/IFingerprintStore.cs
@@ -0,0 +1,23 @@
+using SoundFlow.Security.Models;
+
+namespace SoundFlow.Security.Stores;
+
+///
+/// Defines the contract for a fingerprint storage backend.
+/// Implement this interface to store fingerprints in a database (SQL, NoSQL, Redis, etc.).
+///
+public interface IFingerprintStore
+{
+ ///
+ /// Stores a computed fingerprint in the database.
+ ///
+ /// The fingerprint to store.
+ Task InsertAsync(AudioFingerprint fingerprint);
+
+ ///
+ /// Queries the database for tracks containing the specified hash.
+ ///
+ /// The hash to look up.
+ /// A list of candidates containing the track ID and the time offset where the hash occurs.
+ Task> QueryHashAsync(uint hash);
+}
\ No newline at end of file
diff --git a/Src/Security/Stores/InMemoryFingerprintStore.cs b/Src/Security/Stores/InMemoryFingerprintStore.cs
new file mode 100644
index 0000000..1f30c74
--- /dev/null
+++ b/Src/Security/Stores/InMemoryFingerprintStore.cs
@@ -0,0 +1,50 @@
+using System.Collections.Concurrent;
+using SoundFlow.Security.Models;
+
+namespace SoundFlow.Security.Stores;
+
+///
+/// A reference implementation of that stores data in memory.
+/// Suitable for small libraries, unit tests, or caching layers.
+///
+public class InMemoryFingerprintStore : IFingerprintStore
+{
+ // The Inverted Index: Map Hash -> List of (TrackId, TimeOffset)
+ private readonly ConcurrentDictionary> _index = new();
+
+ ///
+ public Task InsertAsync(AudioFingerprint fingerprint)
+ {
+ foreach (var hashData in fingerprint.Hashes)
+ {
+ var candidate = new FingerprintMatchCandidate(fingerprint.TrackId, hashData.TimeOffset);
+
+ _index.AddOrUpdate(
+ hashData.Hash,
+ _ => [candidate],
+ (_, list) =>
+ {
+ lock (list)
+ {
+ list.Add(candidate);
+ }
+ return list;
+ });
+ }
+
+ return Task.CompletedTask;
+ }
+
+ ///
+ public Task> QueryHashAsync(uint hash)
+ {
+ if (!_index.TryGetValue(hash, out var candidates))
+ return Task.FromResult(new List());
+
+ lock (candidates)
+ {
+ // Return a copy to ensure thread safety during iteration by the caller
+ return Task.FromResult(new List(candidates));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Utils/SignatureKeyGenerator.cs b/Src/Security/Utils/SignatureKeyGenerator.cs
new file mode 100644
index 0000000..8c459d3
--- /dev/null
+++ b/Src/Security/Utils/SignatureKeyGenerator.cs
@@ -0,0 +1,29 @@
+using System.Security.Cryptography;
+using SoundFlow.Security.Configuration;
+
+namespace SoundFlow.Security.Utils;
+
+
+///
+/// Utility for generating secure ECDSA key pairs for file signing.
+///
+public static class SignatureKeyGenerator
+{
+ ///
+ /// Generates a new ECDSA key pair using the NIST P-384 curve.
+ /// P-384 offers a security level approximately equivalent to AES-192.
+ ///
+ /// A containing the Private Key (PKCS#8) and the Public Key (SPKI).
+ public static SignatureConfiguration Generate()
+ {
+ using var algorithm = ECDsa.Create(ECCurve.NamedCurves.nistP384);
+
+ // Export private key in PKCS#8 format (standard for private keys)
+ var privateKey = algorithm.ExportPkcs8PrivateKeyPem();
+
+ // Export public key in SubjectPublicKeyInfo format (standard for x.509/public keys)
+ var publicKey = algorithm.ExportSubjectPublicKeyInfoPem();
+
+ return new SignatureConfiguration(privateKey, publicKey);
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/Utils/WatermarkingUtils.cs b/Src/Security/Utils/WatermarkingUtils.cs
new file mode 100644
index 0000000..d14115e
--- /dev/null
+++ b/Src/Security/Utils/WatermarkingUtils.cs
@@ -0,0 +1,86 @@
+using System.Runtime.InteropServices;
+
+namespace SoundFlow.Security.Utils;
+
+///
+/// Provides utility methods for Pearson hashing.
+///
+public static class WatermarkingUtils
+{
+ ///
+ /// The Pearson hashing permutation table (0-255).
+ ///
+ public static readonly byte[] PearsonTable =
+ [
+ 251, 175, 119, 215, 81, 14, 79, 191, 103, 49, 181, 143, 186, 157, 0, 232,
+ 31, 239, 229, 55, 129, 28, 99, 69, 23, 165, 32, 145, 20, 87, 24, 96,
+ 253, 169, 109, 223, 50, 67, 130, 92, 152, 36, 208, 230, 206, 196, 71, 252,
+ 64, 91, 45, 190, 85, 12, 106, 240, 111, 211, 197, 101, 154, 53, 209, 217,
+ 112, 29, 247, 48, 249, 133, 113, 203, 238, 201, 227, 214, 136, 108, 16, 128,
+ 192, 156, 193, 218, 177, 245, 84, 6, 19, 107, 195, 167, 1, 95, 62, 52,
+ 187, 33, 116, 56, 13, 10, 221, 222, 125, 42, 17, 189, 58, 207, 144, 254,
+ 155, 199, 172, 162, 148, 117, 185, 118, 140, 124, 25, 171, 90, 233, 228, 131,
+ 122, 188, 77, 163, 153, 37, 237, 242, 3, 15, 246, 26, 134, 183, 158, 66,
+ 231, 150, 147, 86, 216, 220, 102, 224, 164, 204, 30, 126, 11, 22, 135, 100,
+ 57, 115, 93, 120, 159, 132, 114, 21, 210, 123, 72, 59, 243, 27, 7, 8,
+ 40, 236, 68, 73, 63, 198, 225, 76, 255, 41, 38, 18, 88, 65, 105, 139,
+ 9, 127, 226, 78, 160, 5, 235, 46, 74, 39, 2, 248, 142, 205, 47, 241,
+ 146, 180, 250, 149, 138, 212, 121, 166, 104, 89, 137, 194, 219, 70, 244, 184,
+ 60, 4, 170, 213, 176, 80, 234, 173, 168, 200, 178, 97, 141, 94, 75, 43,
+ 83, 35, 161, 202, 110, 215, 174, 82, 34, 179, 151, 44, 98, 182, 51, 54
+ ];
+
+ ///
+ /// Calculates the 8-bit Pearson hash of the float buffer.
+ /// Pearson hashing provides a good distribution for data integrity checks with low collision rates for small changes.
+ ///
+ public static byte CalculatePearsonHash(Span data)
+ {
+ byte h = 0;
+ var byteData = MemoryMarshal.Cast(data);
+
+ // Process every byte of the float array
+ foreach (var t in byteData)
+ {
+ h = PearsonTable[h ^ t];
+ }
+
+ return h;
+ }
+
+ ///
+ /// Calculates the next float in a sequence using XorShift32.
+ ///
+ /// The current state of the PRNG.
+ /// The next float in the sequence and the updated state.
+ public static (float NextFloat, uint CurrentState) NextFloat(uint rngState)
+ {
+ // XorShift32
+ var x = rngState;
+ x ^= x << 13;
+ x ^= x >> 17;
+ x ^= x << 5;
+ rngState = x;
+
+ // Normalize to [0, 1]
+ return ((float)x / uint.MaxValue, rngState);
+ }
+
+ ///
+ /// Calculates the FNV-1a hash of a string.
+ /// FNV-1a is a fast non-cryptographic hash algorithm with good distribution for stable seeding.
+ ///
+ /// The string to hash.
+ /// The FNV-1a hash of the string.
+ public static uint GetStableHash(string str)
+ {
+ var hash = 2166136261;
+ foreach (var c in str)
+ {
+ hash ^= c;
+ hash *= 16777619;
+ }
+
+ return hash;
+ }
+}
\ No newline at end of file
diff --git a/Src/Security/WatermarkTuner.cs b/Src/Security/WatermarkTuner.cs
new file mode 100644
index 0000000..52def26
--- /dev/null
+++ b/Src/Security/WatermarkTuner.cs
@@ -0,0 +1,305 @@
+using System.Text;
+using SoundFlow.Interfaces;
+using SoundFlow.Metadata.Models;
+using SoundFlow.Providers;
+using SoundFlow.Security.Configuration;
+using SoundFlow.Structs;
+using SoundFlow.Utils;
+
+namespace SoundFlow.Security;
+
+///
+/// Provides static methods to automatically determine the optimal watermarking configuration
+/// for a given audio source and payload. This "tuner" simulates embedding and extracting
+/// at various parameters to find a balance between robustness and inaudibility.
+///
+public static class WatermarkTuner
+{
+ ///
+ /// Defines the discrete spread factor levels to test, ordered from most robust (and slowest)
+ /// to least robust (and fastest). A higher spread factor disperses the watermark signal
+ /// over more audio frames, increasing its resilience to damage.
+ ///
+ private static readonly int[] SpreadLevels = [16384, 8192, 4096, 2048];
+
+ ///
+ /// Asynchronously analyzes an audio source to find the most robust watermarking configuration.
+ /// It iterates through predefined spread factors and strength levels, simulating an embed/extract
+ /// cycle at strategic audio positions to verify the payload's integrity.
+ ///
+ /// The audio data provider to analyze.
+ /// The string data to be embedded in the watermark.
+ /// The secret key used for encryption and hashing within the watermark.
+ /// If true, the tuner will apply a safety margin to the final strength.
+ ///
+ /// A task that resolves to a object containing the
+ /// recommended parameters. If no suitable configuration is found, returns a safe default.
+ ///
+ public static async Task TuneConfigurationAsync(
+ ISoundDataProvider source,
+ string payload,
+ string secretKey,
+ bool applySafetyMargin = false)
+ {
+ Log.Info("WatermarkTuner: Starting comprehensive auto-tuning process.");
+
+ var channels = source.FormatInfo?.ChannelCount ?? 1;
+ if (channels == 0) channels = 1;
+
+ foreach (var spread in SpreadLevels)
+ {
+ Log.Debug($"Tuner: Testing Spread Factor {spread}");
+
+ // 1. Calculate required duration for the watermark at the current spread factor.
+ var payloadBytes = Encoding.UTF8.GetByteCount(payload);
+ // The total bits include CRC16 (16), payload length (32), and the payload itself.
+ var totalBits = 16 + 32 + (payloadBytes * 8);
+ var framesNeeded = totalBits * spread;
+
+ // Add a 5-second buffer to prevent the watermark from ending abruptly at the file's edge.
+ var bufferFrames = (int)(source.SampleRate * 5.0);
+ var totalFramesNeeded = framesNeeded + bufferFrames;
+
+ var fileLengthFrames = source.Length / channels;
+ if (totalFramesNeeded > fileLengthFrames)
+ {
+ var requiredSeconds = (double)totalFramesNeeded / source.SampleRate;
+ var fileSeconds = (double)fileLengthFrames / source.SampleRate;
+ Log.Debug($"Tuner: Payload too long for spread factor {spread}. Requires {requiredSeconds:F1}s, file is {fileSeconds:F1}s. Skipping.");
+ continue;
+ }
+
+ // 2. Identify strategic locations in the audio file to test embedding.
+ var candidates = GetCandidateOffsets(source, totalFramesNeeded, channels);
+
+ // 3. Test various strength levels at each candidate location.
+ const float startStrength = 0.02f;
+ const float maxStrength = 0.12f;
+ const float step = 0.02f;
+
+ for (var s = startStrength; s <= maxStrength + 0.001f; s += step)
+ {
+ var config = new WatermarkConfiguration
+ {
+ Key = secretKey,
+ SpreadFactor = spread,
+ Strength = s
+ };
+
+ foreach (var startFrame in candidates)
+ {
+ var sliceSamples = ReadSlice(source, startFrame, totalFramesNeeded);
+ var watermarkedSlice = EmbedToMemory(sliceSamples, source.FormatInfo, payload, config);
+
+ // Simulate a volume reduction attack to test watermark resilience.
+ ApplyVolumeAttack(watermarkedSlice, 0.75f);
+
+ var result = await ExtractFromMemory(watermarkedSlice, source.FormatInfo, config);
+
+ if (result.IsSuccess && result.Value == payload)
+ {
+ // Success! Apply a safety margin to the strength to account for real-world distortions.
+ var margin = 1f;
+ if (applySafetyMargin)
+ {
+ margin = s switch
+ {
+ <= 0.04f => 1.4f,
+ <= 0.08f => 1.2f,
+ _ => 1.1f
+ };
+ }
+
+ var finalStrength = (float)Math.Round(s * margin, 3);
+ if (finalStrength > 0.14f) finalStrength = 0.14f;
+
+ config.Strength = finalStrength;
+
+ Log.Info($"Tuner: Resolved optimal configuration at position {(float)startFrame / source.SampleRate:F1}s. Spread={spread}, BaseStrength={s:F3}, FinalStrength={finalStrength:F3}");
+ return config;
+ }
+ }
+ }
+ Log.Debug($"Tuner: Spread factor {spread} failed at all candidate positions and strengths.");
+ }
+
+ Log.Warning("Tuner: Auto-tuning failed to find a robust configuration. Defaulting to Spread=16384, Strength=0.10");
+ return new WatermarkConfiguration
+ {
+ Key = secretKey,
+ SpreadFactor = 16384,
+ Strength = 0.10f
+ };
+ }
+
+ ///
+ /// Identifies a list of strategic starting frame offsets within the audio source for testing.
+ ///
+ /// The audio data provider.
+ /// The total number of frames the watermark requires.
+ /// The number of audio channels.
+ /// A list of integer offsets, each representing a starting frame index.
+ private static List GetCandidateOffsets(ISoundDataProvider source, int frameCount, int channels)
+ {
+ var results = new List();
+ var fileLengthFrames = source.Length / channels;
+ var validRegion = fileLengthFrames - frameCount;
+
+ if (validRegion <= 0) return [0];
+
+ // Candidate 1: The very beginning of the file.
+ results.Add(0);
+
+ // Candidate 2: 10 seconds in, to skip potential silent intros or fade-ins.
+ var offset10S = Math.Min(validRegion, (int)(source.SampleRate * 10.0));
+ if (offset10S > source.SampleRate) results.Add(offset10S);
+
+ // Candidate 3: The most acoustically "dense" region for maximum resilience.
+ var denseOffset = FindDenseAudioSlice(source, frameCount, channels);
+ if (denseOffset != -1 && Math.Abs(denseOffset - 0) > source.SampleRate * 5) // Ensure it's not too close to the start
+ {
+ results.Add(denseOffset);
+ }
+
+ return results.Distinct().ToList();
+ }
+
+ ///
+ /// Scans the audio source to find the slice that is least likely to contain silence.
+ /// Watermarks are generally more robust when embedded in louder, more complex audio segments.
+ ///
+ /// The audio data provider.
+ /// The size of the slice to evaluate, in frames.
+ /// The number of audio channels.
+ /// The starting frame index of the "densest" slice, or -1 if none could be determined.
+ private static int FindDenseAudioSlice(ISoundDataProvider source, int frameCount, int channels)
+ {
+ var fileLengthFrames = source.Length / channels;
+ var validRegion = fileLengthFrames - frameCount;
+ if (validRegion <= 0) return 0;
+
+ var bestStart = -1;
+ var minSilenceCount = int.MaxValue;
+ const float silenceThreshold = 0.003f;
+
+ const int candidatesToCheck = 5;
+ var candidateStride = validRegion / candidatesToCheck;
+ if (candidateStride == 0) candidateStride = 1;
+
+ var buffer = new float[frameCount * channels];
+
+ for (var i = 0; i < validRegion; i += candidateStride)
+ {
+ if(source.CanSeek) source.Seek(i * channels);
+ var read = source.ReadBytes(buffer);
+ if (read < buffer.Length) break;
+
+ var silenceCount = 0;
+ for (var k = 0; k < read; k += 200)
+ {
+ if (Math.Abs(buffer[k]) < silenceThreshold) silenceCount++;
+ }
+
+ if (silenceCount < minSilenceCount)
+ {
+ minSilenceCount = silenceCount;
+ bestStart = i;
+ // If we find a slice with no silence at all, it's a perfect candidate.
+ if (minSilenceCount == 0) break;
+ }
+ }
+
+ return bestStart;
+ }
+
+ ///
+ /// Reads a specific slice of audio data from a provider into an in-memory buffer.
+ ///
+ /// The audio data provider.
+ /// The frame index to start reading from.
+ /// The number of frames to read.
+ /// A float array containing the requested audio samples.
+ private static float[] ReadSlice(ISoundDataProvider source, int startFrameIndex, int frameCount)
+ {
+ var channels = source.FormatInfo?.ChannelCount ?? 1;
+ var absoluteSampleOffset = startFrameIndex * channels;
+
+ if (source.CanSeek) source.Seek(absoluteSampleOffset);
+
+ var totalSamplesToRead = frameCount * channels;
+ var buffer = new float[totalSamplesToRead];
+ var samplesRead = source.ReadBytes(buffer);
+
+ // Return a potentially smaller array if the read operation hit the end of the stream.
+ return samplesRead < buffer.Length ? buffer.AsSpan(0, samplesRead).ToArray() : buffer;
+ }
+
+ ///
+ /// Helper method to embed a watermark into an in-memory sample buffer.
+ ///
+ /// The raw audio samples to modify.
+ /// The format information for the audio.
+ /// The payload text to embed.
+ /// The watermarking configuration to use.
+ /// A new float array containing the watermarked audio samples.
+ private static float[] EmbedToMemory(float[] sourceSamples, SoundFormatInfo? info, string text, WatermarkConfiguration config)
+ {
+ using var sourceProvider = new RawDataProvider(sourceSamples)
+ {
+ FormatInfo = info
+ };
+ using var memoryStream = new MemoryStream();
+
+ // 1. Write watermarked WAV to MemoryStream
+ AudioWatermarker.EmbedOwnershipWatermark(sourceProvider, memoryStream, text, config);
+
+ // 2. Read back as raw float samples
+ const int headerSize = 44; // AudioWatermarker uses a deterministic writer: 44 bytes header.
+
+ if (memoryStream.Length <= headerSize) return [];
+
+ var dataLengthBytes = memoryStream.Length - headerSize;
+ var floatCount = dataLengthBytes / sizeof(float);
+ var resultBuffer = new float[floatCount];
+
+ memoryStream.Position = headerSize;
+
+ // Read directly into byte buffer then block copy to float[]
+ var byteBuffer = new byte[dataLengthBytes];
+ var read = memoryStream.Read(byteBuffer, 0, (int)dataLengthBytes);
+
+ Buffer.BlockCopy(byteBuffer, 0, resultBuffer, 0, read);
+
+ return resultBuffer;
+ }
+
+ ///
+ /// Simulates a simple volume reduction attack on an in-memory sample buffer.
+ /// This is used during tuning to test watermark resilience.
+ ///
+ /// The audio sample buffer to modify in-place.
+ /// The volume multiplier (e.g., 0.75 for 75% volume).
+ private static void ApplyVolumeAttack(float[] samples, float volume)
+ {
+ for (var i = 0; i < samples.Length; i++)
+ {
+ samples[i] *= volume;
+ }
+ }
+
+ ///
+ /// Helper method to extract a watermark from an in-memory sample buffer.
+ ///
+ /// The raw audio samples containing a watermark.
+ /// The format information for the audio.
+ /// The watermarking configuration to use for extraction.
+ /// A task resolving to a containing the extracted payload if successful.
+ private static async Task> ExtractFromMemory(float[] watermarkedSamples, SoundFormatInfo? info, WatermarkConfiguration config)
+ {
+ using var provider = new RawDataProvider(watermarkedSamples)
+ {
+ FormatInfo = info
+ };
+ return await AudioWatermarker.ExtractOwnershipWatermarkAsync(provider, config);
+ }
+}
\ No newline at end of file
diff --git a/Src/SoundFlow.csproj b/Src/SoundFlow.csproj
index 4bbe714..aa407cb 100644
--- a/Src/SoundFlow.csproj
+++ b/Src/SoundFlow.csproj
@@ -5,6 +5,8 @@
enableenabletrue
+ true
+ trueSoundFlowA powerful and extensible cross-platform .NET audio engine. provides comprehensive audio processing capabilities including playback, recording, effects, analysis, and visualization, built with a modular and high-performance architecture.Copyright (c) 2025 LSXPrime
@@ -15,7 +17,7 @@
logo.pngREADME.mdaudio, sound, mp3, wav, playback, record, voice, volume, fft, simd, crossplatform, miniaudio, c#, .net, echo, noise
- 1.3.0
+ 1.4.0https://github.com/LSXPrime/SoundFlow/releasestrueLSXPrime
@@ -28,6 +30,10 @@
Alwaystrue
+
@@ -37,4 +43,10 @@
+
+
+
+ STATEMENT.md
+
+
diff --git a/Src/Structs/Errors.cs b/Src/Structs/Errors.cs
index bff2b4a..db9b5c3 100644
--- a/Src/Structs/Errors.cs
+++ b/Src/Structs/Errors.cs
@@ -27,14 +27,16 @@ public record Error(string Message, Exception? InnerException = null) : IError;
/// Represents errors related to invalid input arguments or preconditions.
///
/// A message that describes the validation error.
-public record ValidationError(string Message) : Error(Message);
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public record ValidationError(string Message, Exception? InnerException = null) : Error(Message, InnerException);
///
/// Represents an error when a required resource (like a file) is not found.
///
/// The name or identifier of the resource that was not found.
/// A message that describes the error.
-public record NotFoundError(string ResourceName, string Message) : Error(Message);
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public record NotFoundError(string ResourceName, string Message, Exception? InnerException = null) : Error(Message, InnerException);
///
/// Represents errors that occur during the parsing or writing of a specific file format.
@@ -48,16 +50,18 @@ public abstract record FileFormatError(string Message, Exception? InnerException
/// The error that occurs when attempting to read an audio format that is not supported by the library.
///
/// Specific details about the unsupported format (e.g., codec name, sample format).
-public sealed record UnsupportedFormatError(string FormatDetails)
- : FileFormatError($"The provided audio format is not supported. Details: {FormatDetails}");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record UnsupportedFormatError(string FormatDetails, Exception? InnerException = null)
+ : FileFormatError($"The provided audio format is not supported. Details: {FormatDetails}", InnerException);
///
/// The error that occurs when a mandatory header, marker, or chunk is missing from a file.
///
/// A description of the mandatory header that was not found (e.g., "RIFF chunk", "ID3v2 tag").
-public sealed record HeaderNotFoundError(string HeaderDescription)
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record HeaderNotFoundError(string HeaderDescription, Exception? InnerException = null)
: FileFormatError(
- $"Could not find the mandatory '{HeaderDescription}'. The file may be corrupt or not a valid audio file.");
+ $"Could not find the mandatory '{HeaderDescription}'. The file may be corrupt or not a valid audio file.", InnerException);
///
/// The error that occurs when a recognized audio file's structural component (chunk, atom, etc.) is malformed.
@@ -81,8 +85,9 @@ public sealed record CorruptFrameError(string FrameDescription, string Reason, E
/// The error that occurs when an object has been disposed.
///
/// A description of the disposed object (e.g., class name).
-public sealed record ObjectDisposedError(string ObjectDescription)
- : Error($"The '{ObjectDescription}' object has been disposed.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record ObjectDisposedError(string ObjectDescription, Exception? InnerException = null)
+ : Error($"The '{ObjectDescription}' object has been disposed.", InnerException);
///
/// Represents errors related to an audio or MIDI device.
@@ -95,8 +100,9 @@ public abstract record DeviceError(string Message, Exception? InnerException = n
/// The error that occurs when an operation is performed on a device that is in an invalid state for that operation.
///
/// A message describing why the state is invalid for the operation.
-public sealed record DeviceStateError(string Reason)
- : DeviceError(Reason);
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record DeviceStateError(string Reason, Exception? InnerException = null)
+ : DeviceError(Reason, InnerException);
///
/// The error that occurs when a core device operation (like open, start, or stop) fails.
@@ -111,45 +117,51 @@ public sealed record DeviceOperationError(string Operation, string Reason, Excep
/// The error that occurs when a requested audio device cannot be found.
///
/// The identifier (name, ID, etc.) of the device that was not found.
-public sealed record DeviceNotFoundError(string DeviceIdentifier)
- : DeviceError($"The device '{DeviceIdentifier}' could not be found.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record DeviceNotFoundError(string DeviceIdentifier, Exception? InnerException = null)
+ : DeviceError($"The device '{DeviceIdentifier}' could not be found.", InnerException);
///
/// The error that occurs when a required audio backend (like WASAPI, CoreAudio, etc.) is not available or enabled.
///
/// The name of the specific backend that was not found, if applicable.
-public sealed record BackendNotFoundError(string? BackendName = null)
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record BackendNotFoundError(string? BackendName = null, Exception? InnerException = null)
: Error(string.IsNullOrEmpty(BackendName)
? "No suitable audio backend was found."
- : $"The audio backend '{BackendName}' was not found or is not enabled.");
+ : $"The audio backend '{BackendName}' was not found or is not enabled.", InnerException);
///
/// The error that occurs when an operation attempts to use a resource that is already in use.
///
/// The name or description of the busy resource.
-public sealed record ResourceBusyError(string ResourceName)
- : Error($"The resource '{ResourceName}' is busy or already in use.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record ResourceBusyError(string ResourceName, Exception? InnerException = null)
+ : Error($"The resource '{ResourceName}' is busy or already in use.", InnerException);
///
/// The error that occurs when an attempt to allocate memory fails.
///
-public sealed record OutOfMemoryError()
- : Error("Insufficient memory to complete the operation.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record OutOfMemoryError(Exception? InnerException = null)
+ : Error("Insufficient memory to complete the operation.", InnerException);
///
/// The error that occurs when a method call is invalid for the object's current state.
/// This is analogous to .
///
/// The message that describes the error.
-public sealed record InvalidOperationError(string Message)
- : Error(Message);
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record InvalidOperationError(string Message, Exception? InnerException = null)
+ : Error(Message, InnerException);
///
/// The error that occurs when a requested feature or method is not implemented.
///
/// The name of the unimplemented feature.
-public sealed record NotImplementedError(string FeatureName)
- : Error($"The feature '{FeatureName}' is not implemented.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record NotImplementedError(string FeatureName, Exception? InnerException = null)
+ : Error($"The feature '{FeatureName}' is not implemented.", InnerException);
///
/// Represents a generic error reported by the underlying operating system or host environment.
@@ -173,19 +185,32 @@ public sealed record InternalLibraryError(string LibraryName, string Reason, Exc
///
/// A description of the I/O operation that failed (e.g., "reading from file stream").
/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
-public sealed record IOError(string OperationDescription, Exception? InnerException = null)
+public sealed record IoError(string OperationDescription, Exception? InnerException = null)
: Error($"An I/O error occurred during '{OperationDescription}'.", InnerException);
///
/// The error that occurs when an operation times out.
///
/// A description of the operation that timed out.
-public sealed record TimeoutError(string OperationDescription)
- : Error($"The operation '{OperationDescription}' timed out.");
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record TimeoutError(string OperationDescription, Exception? InnerException = null)
+ : Error($"The operation '{OperationDescription}' timed out.", InnerException);
///
/// The error that occurs when access to a requested resource is denied.
///
/// The path or identifier of the resource to which access was denied.
-public sealed record AccessDeniedError(string ResourceIdentifier)
- : Error($"Access to the resource '{ResourceIdentifier}' was denied.");
\ No newline at end of file
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record AccessDeniedError(string ResourceIdentifier, Exception? InnerException = null)
+ : Error($"Access to the resource '{ResourceIdentifier}' was denied.", InnerException);
+
+
+///
+/// The error that occurs when a duplicate request is made, even though the state is preserved.
+/// This means that the same method or operation should only be called once, as subsequent calls will have no effect,
+/// and the state should be changed to be able to perform the operation.
+///
+/// A description of the request that was made twice.
+/// The exception that is the cause of the current error, or a null reference if no inner exception is specified.
+public sealed record DuplicateRequestError(string RequestDescription, Exception? InnerException = null)
+ : Error($"The request '{RequestDescription}' was made twice, but only needs to be called once. Change the preserved state to allow performing the operation before making a second call.", InnerException);
\ No newline at end of file
diff --git a/Src/Synthesis/Voices/Voice.cs b/Src/Synthesis/Voices/Voice.cs
index e255685..5005776 100644
--- a/Src/Synthesis/Voices/Voice.cs
+++ b/Src/Synthesis/Voices/Voice.cs
@@ -1,4 +1,5 @@
using System.Buffers;
+using SoundFlow.Enums;
using SoundFlow.Midi.Structs;
using SoundFlow.Modifiers;
using SoundFlow.Structs;
@@ -82,7 +83,7 @@ public Voice(VoiceDefinition definition, VoiceContext context)
{
_filter = new Filter(new AudioFormat { SampleRate = context.SampleRate, Channels = 2 })
{
- Type = Filter.FilterType.LowPass,
+ Type = FilterType.LowPass,
Resonance = 0.5f
};
diff --git a/Src/Utils/BiquadFilter.cs b/Src/Utils/BiquadFilter.cs
new file mode 100644
index 0000000..eea4885
--- /dev/null
+++ b/Src/Utils/BiquadFilter.cs
@@ -0,0 +1,169 @@
+using System.Runtime.CompilerServices;
+using SoundFlow.Enums;
+
+namespace SoundFlow.Utils;
+
+///
+/// A standalone Biquad filter implementation.
+/// Handles state and coefficient calculation for a single audio channel based on the RBJ Audio EQ Cookbook.
+///
+public sealed class BiquadFilter
+{
+ // Coefficients
+ private float _a1, _a2, _b0, _b1, _b2;
+
+ // State (Previous inputs/outputs)
+ private float _x1, _x2, _y1, _y2;
+
+ ///
+ /// Updates the filter coefficients based on the specified parameters.
+ ///
+ /// The type of filter to apply.
+ /// The sample rate of the audio data.
+ /// The center or cutoff frequency in Hz.
+ /// The quality factor (Q) or resonance.
+ /// The gain in decibels (used for Peaking and Shelf filters).
+ /// The shelf slope parameter (used for Shelf filters).
+ public void Update(FilterType type, float sampleRate, float frequency, float q, float gainDb = 0f, float shelfSlope = 1f)
+ {
+ // Clamp frequency to prevent stability issues near Nyquist
+ frequency = Math.Clamp(frequency, 10f, sampleRate * 0.49f);
+
+ // Ensure Q is valid to avoid division by zero
+ q = Math.Max(0.01f, q);
+
+ var omega = 2.0f * MathF.PI * frequency / sampleRate;
+ var sinOmega = MathF.Sin(omega);
+ var cosOmega = MathF.Cos(omega);
+ var alpha = sinOmega / (2.0f * q);
+
+ // A is used for gain calculations
+ var a = MathF.Pow(10, gainDb / 40);
+ float a0;
+
+ switch (type)
+ {
+ case FilterType.LowPass:
+ _b0 = (1 - cosOmega) / 2;
+ _b1 = 1 - cosOmega;
+ _b2 = (1 - cosOmega) / 2;
+ a0 = 1 + alpha;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha;
+ break;
+
+ case FilterType.HighPass:
+ _b0 = (1 + cosOmega) / 2;
+ _b1 = -(1 + cosOmega);
+ _b2 = (1 + cosOmega) / 2;
+ a0 = 1 + alpha;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha;
+ break;
+
+ case FilterType.BandPass:
+ _b0 = alpha;
+ _b1 = 0;
+ _b2 = -alpha;
+ a0 = 1 + alpha;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha;
+ break;
+
+ case FilterType.Notch:
+ _b0 = 1;
+ _b1 = -2 * cosOmega;
+ _b2 = 1;
+ a0 = 1 + alpha;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha;
+ break;
+
+ case FilterType.Peaking:
+ _b0 = 1 + alpha * a;
+ _b1 = -2 * cosOmega;
+ _b2 = 1 - alpha * a;
+ a0 = 1 + alpha / a;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha / a;
+ break;
+
+ case FilterType.LowShelf:
+ {
+ var sqrtA = MathF.Sqrt(a);
+ var alphaShelf = sinOmega / 2 * MathF.Sqrt((a + 1 / a) * (1 / shelfSlope - 1) + 2);
+
+ _b0 = a * ((a + 1) - (a - 1) * cosOmega + 2 * sqrtA * alphaShelf);
+ _b1 = 2 * a * ((a - 1) - (a + 1) * cosOmega);
+ _b2 = a * ((a + 1) - (a - 1) * cosOmega - 2 * sqrtA * alphaShelf);
+ a0 = (a + 1) + (a - 1) * cosOmega + 2 * sqrtA * alphaShelf;
+ _a1 = -2 * ((a - 1) + (a + 1) * cosOmega);
+ _a2 = (a + 1) + (a - 1) * cosOmega - 2 * sqrtA * alphaShelf;
+ break;
+ }
+
+ case FilterType.HighShelf:
+ {
+ var sqrtA = MathF.Sqrt(a);
+ var alphaShelf = sinOmega / 2 * MathF.Sqrt((a + 1 / a) * (1 / shelfSlope - 1) + 2);
+
+ _b0 = a * ((a + 1) + (a - 1) * cosOmega + 2 * sqrtA * alphaShelf);
+ _b1 = -2 * a * ((a - 1) + (a + 1) * cosOmega);
+ _b2 = a * ((a + 1) + (a - 1) * cosOmega - 2 * sqrtA * alphaShelf);
+ a0 = (a + 1) - (a - 1) * cosOmega + 2 * sqrtA * alphaShelf;
+ _a1 = 2 * ((a - 1) - (a + 1) * cosOmega);
+ _a2 = (a + 1) - (a - 1) * cosOmega - 2 * sqrtA * alphaShelf;
+ break;
+ }
+
+ case FilterType.AllPass:
+ _b0 = 1 - alpha;
+ _b1 = -2 * cosOmega;
+ _b2 = 1 + alpha;
+ a0 = 1 + alpha;
+ _a1 = -2 * cosOmega;
+ _a2 = 1 - alpha;
+ break;
+
+ default:
+ // Pass-through if unknown
+ _b0 = 1; _b1 = 0; _b2 = 0; a0 = 1; _a1 = 0; _a2 = 0;
+ break;
+ }
+
+ // Normalize coefficients by a0
+ var invA0 = 1.0f / a0;
+ _b0 *= invA0;
+ _b1 *= invA0;
+ _b2 *= invA0;
+ _a1 *= invA0;
+ _a2 *= invA0;
+ }
+
+ ///
+ /// Processes a single audio sample through the filter.
+ ///
+ /// The input sample.
+ /// The filtered output sample.
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ public float Process(float sample)
+ {
+ var output = _b0 * sample + _b1 * _x1 + _b2 * _x2 - _a1 * _y1 - _a2 * _y2;
+
+ // Shift state
+ _x2 = _x1;
+ _x1 = sample;
+ _y2 = _y1;
+ _y1 = output;
+
+ return output;
+ }
+
+ ///
+ /// Resets the internal state of the filter.
+ ///
+ public void Reset()
+ {
+ _x1 = _x2 = _y1 = _y2 = 0;
+ }
+}
\ No newline at end of file
diff --git a/Src/Utils/Log.cs b/Src/Utils/Log.cs
index 3b67de4..cc7ca9a 100644
--- a/Src/Utils/Log.cs
+++ b/Src/Utils/Log.cs
@@ -1,4 +1,6 @@
-namespace SoundFlow.Utils;
+using System.Runtime.CompilerServices;
+
+namespace SoundFlow.Utils;
///
/// Defines the severity levels for log messages.
@@ -27,6 +29,54 @@ public enum LogLevel
Critical
}
+///
+/// Represents a single log event.
+/// Using a 'readonly struct' avoids heap allocation and GC pressure for high-performance scenarios.
+///
+public readonly struct LogEntry
+{
+ ///
+ /// Gets the severity level of the log entry.
+ ///
+ public LogLevel Level { get; }
+
+ ///
+ /// Gets the log message.
+ ///
+ public string Message { get; }
+
+ ///
+ /// Gets the date and time when the log entry was created.
+ ///
+ public DateTime Timestamp { get; }
+
+ ///
+ /// Gets information about the calling member (e.g., 'ClassName.MethodName').
+ ///
+ public string Caller { get; }
+
+ ///
+ /// Initializes a new instance of the struct.
+ ///
+ /// The severity level of the log entry.
+ /// The log message.
+ /// The date and time of the log entry.
+ /// Information about the calling member.
+ public LogEntry(LogLevel level, string message, DateTime timestamp, string caller)
+ {
+ Level = level;
+ Message = message;
+ Timestamp = timestamp;
+ Caller = caller;
+ }
+
+ ///
+ public override string ToString()
+ {
+ return $"[{Timestamp:HH:mm:ss}] {Level.ToString().ToUpper()} {Caller}: {Message}";
+ }
+}
+
///
/// Provides a centralized, decoupled logging mechanism for the SoundFlow library.
/// End-users can subscribe to the static OnLog event to capture and handle log messages.
@@ -37,35 +87,76 @@ public static class Log
/// Occurs when the SoundFlow library generates a log message.
/// Subscribe to this event in your application to route logs to a console, file, or UI.
///
- public static event Action? OnLog;
+ public static event Action? OnLog;
+
+ ///
+ /// Creates a and invokes the event.
+ ///
+ /// The severity level for the log entry.
+ /// The log message.
+ /// The name of the calling member. Populated by the compiler.
+ /// The path of the source file of the caller. Populated by the compiler.
+ private static void Dispatch(LogLevel level, string message, string memberName, string filePath)
+ {
+ // Early exit if no one is listening.
+ if (OnLog == null) return;
+
+ var className = Path.GetFileNameWithoutExtension(filePath);
+ var fullCaller = $"{className}.{memberName}";
+
+ // This now creates a struct, which is extremely cheap.
+ var entry = new LogEntry(level, message, DateTime.Now, fullCaller);
+
+ OnLog.Invoke(entry);
+ }
///
- /// For public library use. Invokes the OnLog event with a Debug level message.
+ /// Logs a message with the severity level.
///
/// The log message.
- public static void Debug(string message) => OnLog?.Invoke(LogLevel.Debug, message);
+ /// The name of the calling member. This is automatically populated by the compiler and should not be set manually.
+ /// The path of the source file of the caller. This is automatically populated by the compiler and should not be set manually.
+ public static void Debug(string message,
+ [CallerMemberName] string member = "", [CallerFilePath] string path = "")
+ => Dispatch(LogLevel.Debug, message, member, path);
///
- /// For public library use. Invokes the OnLog event with an Info level message.
+ /// Logs a message with the severity level.
///
/// The log message.
- public static void Info(string message) => OnLog?.Invoke(LogLevel.Info, message);
+ /// The name of the calling member. This is automatically populated by the compiler and should not be set manually.
+ /// The path of the source file of the caller. This is automatically populated by the compiler and should not be set manually.
+ public static void Info(string message,
+ [CallerMemberName] string member = "", [CallerFilePath] string path = "")
+ => Dispatch(LogLevel.Info, message, member, path);
///
- /// For public library use. Invokes the OnLog event with a Warning level message.
+ /// Logs a message with the severity level.
///
/// The log message.
- public static void Warning(string message) => OnLog?.Invoke(LogLevel.Warning, message);
+ /// The name of the calling member. This is automatically populated by the compiler and should not be set manually.
+ /// The path of the source file of the caller. This is automatically populated by the compiler and should not be set manually.
+ public static void Warning(string message,
+ [CallerMemberName] string member = "", [CallerFilePath] string path = "")
+ => Dispatch(LogLevel.Warning, message, member, path);
///
- /// For public library use. Invokes the OnLog event with an Error level message.
+ /// Logs a message with the severity level.
///
/// The log message.
- public static void Error(string message) => OnLog?.Invoke(LogLevel.Error, message);
+ /// The name of the calling member. This is automatically populated by the compiler and should not be set manually.
+ /// The path of the source file of the caller. This is automatically populated by the compiler and should not be set manually.
+ public static void Error(string message,
+ [CallerMemberName] string member = "", [CallerFilePath] string path = "")
+ => Dispatch(LogLevel.Error, message, member, path);
///
- /// For public library use. Invokes the OnLog event with a Critical level message.
+ /// Logs a message with the severity level.
///
/// The log message.
- public static void Critical(string message) => OnLog?.Invoke(LogLevel.Critical, message);
+ /// The name of the calling member. This is automatically populated by the compiler and should not be set manually.
+ /// The path of the source file of the caller. This is automatically populated by the compiler and should not be set manually.
+ public static void Critical(string message,
+ [CallerMemberName] string member = "", [CallerFilePath] string path = "")
+ => Dispatch(LogLevel.Critical, message, member, path);
}
\ No newline at end of file
diff --git a/Src/Utils/MathHelper.cs b/Src/Utils/MathHelper.cs
index 20b39cc..7f39b74 100644
--- a/Src/Utils/MathHelper.cs
+++ b/Src/Utils/MathHelper.cs
@@ -560,30 +560,12 @@ private static unsafe float[] HanningWindowAvx(int size)
return window;
}
-
- ///
- /// Performs linear interpolation between two values
- ///
- public static float Lerp(float a, float b, float t) => a + (b - a) * Math.Clamp(t, 0, 1);
-
+
///
/// Checks if a number is a power of two (2, 4, 8, 16, etc.).
///
public static bool IsPowerOfTwo(long n) => (n > 0) && ((n & (n - 1)) == 0);
- ///
- /// Returns the remainder after division, in the range [0, y).
- ///
- public static double Mod(this double x, double y) => x - y * Math.Floor(x / y);
-
- ///
- /// Returns the principal angle of a number in the range [-PI, PI).
- ///
- public static float PrincipalAngle(float angle)
- {
- return angle - (2 * MathF.PI * MathF.Floor((angle + MathF.PI) / (2 * MathF.PI)));
- }
-
///
/// Approximates the cosine of a vector using a highly accurate polynomial on a reduced quadrant.
/// Requires SSE4.1 for Floor/BlendVariable.
diff --git a/Src/Utils/TypeRegistry.cs b/Src/Utils/TypeRegistry.cs
new file mode 100644
index 0000000..008d954
--- /dev/null
+++ b/Src/Utils/TypeRegistry.cs
@@ -0,0 +1,91 @@
+using System.Diagnostics.CodeAnalysis;
+using SoundFlow.Components;
+using SoundFlow.Midi.Modifier;
+using SoundFlow.Modifiers;
+using SoundFlow.Security.Analyzers;
+using SoundFlow.Security.Modifiers;
+using SoundFlow.Visualization;
+
+namespace SoundFlow.Utils;
+
+///
+/// A registry for resolving types by name at runtime in a NativeAOT-compatible way.
+/// This prevents the linker from trimming required types and avoids the use of unsafe reflection.
+///
+public static class TypeRegistry
+{
+ private static readonly Dictionary Registry = new()
+ {
+ // Sound Modifiers
+ { typeof(AlgorithmicReverbModifier).FullName!, typeof(AlgorithmicReverbModifier) },
+ { typeof(BassBoosterModifier).FullName!, typeof(BassBoosterModifier) },
+ { typeof(ChorusModifier).FullName!, typeof(ChorusModifier) },
+ { typeof(CompressorModifier).FullName!, typeof(CompressorModifier) },
+ { typeof(DelayModifier).FullName!, typeof(DelayModifier) },
+ { typeof(Filter).FullName!, typeof(Filter) },
+ { typeof(FrequencyBandModifier).FullName!, typeof(FrequencyBandModifier) },
+ { typeof(HighPassModifier).FullName!, typeof(HighPassModifier) },
+ { typeof(LowPassModifier).FullName!, typeof(LowPassModifier) },
+ { typeof(MultiChannelChorusModifier).FullName!, typeof(MultiChannelChorusModifier) },
+ { typeof(ParametricEqualizer).FullName!, typeof(ParametricEqualizer) },
+ { typeof(ResamplerModifier).FullName!, typeof(ResamplerModifier) },
+ { typeof(TrebleBoosterModifier).FullName!, typeof(TrebleBoosterModifier) },
+ { typeof(VocalExtractorModifier).FullName!, typeof(VocalExtractorModifier) },
+
+ // Security Modifiers
+ { typeof(OwnershipWatermarkEmbedModifier).FullName!, typeof(OwnershipWatermarkEmbedModifier) },
+ { typeof(IntegrityWatermarkEmbedModifier).FullName!, typeof(IntegrityWatermarkEmbedModifier) },
+ { typeof(StreamEncryptionModifier).FullName!, typeof(StreamEncryptionModifier) },
+
+ // Audio Analyzers
+ { typeof(LevelMeterAnalyzer).FullName!, typeof(LevelMeterAnalyzer) },
+ { typeof(SpectrumAnalyzer).FullName!, typeof(SpectrumAnalyzer) },
+ { typeof(VoiceActivityDetector).FullName!, typeof(VoiceActivityDetector) },
+ { typeof(ContentFingerprintAnalyzer).FullName!, typeof(ContentFingerprintAnalyzer) },
+
+ // Security Analyzers
+ { typeof(OwnershipWatermarkExtractAnalyzer).FullName!, typeof(OwnershipWatermarkExtractAnalyzer) },
+ { typeof(IntegrityWatermarkVerifyAnalyzer).FullName!, typeof(IntegrityWatermarkVerifyAnalyzer) },
+
+ // MIDI Modifiers
+ { typeof(ArpeggiatorModifier).FullName!, typeof(ArpeggiatorModifier) },
+ { typeof(ChannelFilterModifier).FullName!, typeof(ChannelFilterModifier) },
+ { typeof(HarmonizerModifier).FullName!, typeof(HarmonizerModifier) },
+ { typeof(RandomizerModifier).FullName!, typeof(RandomizerModifier) },
+ { typeof(TransposeModifier).FullName!, typeof(TransposeModifier) },
+ { typeof(VelocityModifier).FullName!, typeof(VelocityModifier) }
+ };
+
+ ///
+ /// Registers a custom type to ensure it can be resolved during project loading.
+ /// This method must be called for any user-defined Modifiers or Analyzers when running in NativeAOT.
+ ///
+ /// The type to register. Must have public properties and constructors.
+ public static void RegisterType<[DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicConstructors)] T>()
+ {
+ var type = typeof(T);
+ if (type.FullName != null)
+ {
+ Registry[type.FullName] = type;
+ }
+ }
+
+ ///
+ /// Resolves a type by its full name from the registry.
+ ///
+ /// The full name of the type.
+ /// The resolved or null if not found.
+ [return: DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicProperties | DynamicallyAccessedMemberTypes.PublicConstructors)]
+ public static Type? ResolveType(string typeName)
+ {
+ if (Registry.TryGetValue(typeName, out var type))
+ return type;
+
+ // Fallback for JIT environments (non-AOT) where Type.GetType might still work for unregistered types.
+#pragma warning disable IL2057
+#pragma warning disable IL2026 // Suppress warning about requires unreferenced code
+ return Type.GetType(typeName);
+#pragma warning restore IL2026
+#pragma warning restore IL2057
+ }
+}
\ No newline at end of file
diff --git a/Src/Visualization/LevelMeterAnalyzer.cs b/Src/Visualization/LevelMeterAnalyzer.cs
index 79c5a7e..d52c3e0 100644
--- a/Src/Visualization/LevelMeterAnalyzer.cs
+++ b/Src/Visualization/LevelMeterAnalyzer.cs
@@ -33,7 +33,7 @@ public LevelMeterAnalyzer(AudioFormat format, IVisualizer? visualizer = null) :
public float Peak { get; private set; }
///
- protected override void Analyze(Span buffer, int channels)
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
{
var peak = 0f;
var sumSquares = 0f;
diff --git a/Src/Visualization/LevelMeterVisualizer.cs b/Src/Visualization/LevelMeterVisualizer.cs
index 8e7a6c8..ced148c 100644
--- a/Src/Visualization/LevelMeterVisualizer.cs
+++ b/Src/Visualization/LevelMeterVisualizer.cs
@@ -61,7 +61,7 @@ public LevelMeterVisualizer(LevelMeterAnalyzer levelMeterAnalyzer)
}
///
- public void ProcessOnAudioData(Span audioData)
+ public void ProcessOnAudioData(ReadOnlySpan audioData)
{
_level = _levelMeterAnalyzer.Rms;
diff --git a/Src/Visualization/SpectrumAnalyzer.cs b/Src/Visualization/SpectrumAnalyzer.cs
index 45cc034..c558008 100644
--- a/Src/Visualization/SpectrumAnalyzer.cs
+++ b/Src/Visualization/SpectrumAnalyzer.cs
@@ -42,10 +42,10 @@ public SpectrumAnalyzer(AudioFormat format, int fftSize, IVisualizer? visualizer
///
/// Gets the spectrum data.
///
- public ReadOnlySpan SpectrumData => _spectrumData;
+ public float[] SpectrumData => _spectrumData;
///
- protected override void Analyze(Span buffer, int channels)
+ protected override void Analyze(ReadOnlySpan buffer, int channels)
{
// Apply window function and copy to FFT buffer
var numSamples = Math.Min(buffer.Length, _fftSize);
diff --git a/Src/Visualization/SpectrumVisualizer.cs b/Src/Visualization/SpectrumVisualizer.cs
index 83c7ecb..132fb69 100644
--- a/Src/Visualization/SpectrumVisualizer.cs
+++ b/Src/Visualization/SpectrumVisualizer.cs
@@ -42,7 +42,7 @@ public SpectrumVisualizer(SpectrumAnalyzer spectrumAnalyzer)
}
///
- public void ProcessOnAudioData(Span audioData)
+ public void ProcessOnAudioData(ReadOnlySpan audioData)
{
// No need to do anything here, the spectrum analyzer already has the data.
VisualizationUpdated?.Invoke(this, EventArgs.Empty);
diff --git a/Src/Visualization/WaveformVisualizer.cs b/Src/Visualization/WaveformVisualizer.cs
index 6e0ad22..0e84d40 100644
--- a/Src/Visualization/WaveformVisualizer.cs
+++ b/Src/Visualization/WaveformVisualizer.cs
@@ -37,7 +37,7 @@ public Color WaveformColor
public Vector2 Size => new(800, 200);
///
- public void ProcessOnAudioData(Span audioData)
+ public void ProcessOnAudioData(ReadOnlySpan audioData)
{
Waveform.Clear();
Waveform.AddRange(audioData.ToArray());