diff --git a/.gitignore b/.gitignore index 4ecd141..ba653b0 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ /webrtc-audio-processing-0.3.1.tar.xz +/webrtc-audio-processing-1.3.tar.xz diff --git a/65f002e.patch b/65f002e.patch new file mode 100644 index 0000000..f9098ab --- /dev/null +++ b/65f002e.patch @@ -0,0 +1,313 @@ +--- a/webrtc/common_audio/wav_file.cc ++++ b/webrtc/common_audio/wav_file.cc +@@ -10,6 +10,7 @@ + + #include "common_audio/wav_file.h" + ++#include + #include + + #include +@@ -34,6 +35,38 @@ bool FormatSupported(WavFormat format) { + format == WavFormat::kWavFormatIeeeFloat; + } + ++template ++void TranslateEndianness(T* destination, const T* source, size_t length) { ++ static_assert(sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, ++ "no converter, use integral types"); ++ if (sizeof(T) == 2) { ++ const uint16_t* src = reinterpret_cast(source); ++ uint16_t* dst = reinterpret_cast(destination); ++ for (size_t index = 0; index < length; index++) { ++ dst[index] = bswap_16(src[index]); ++ } ++ } ++ if (sizeof(T) == 4) { ++ const uint32_t* src = reinterpret_cast(source); ++ uint32_t* dst = reinterpret_cast(destination); ++ for (size_t index = 0; index < length; index++) { ++ dst[index] = bswap_32(src[index]); ++ } ++ } ++ if (sizeof(T) == 8) { ++ const uint64_t* src = reinterpret_cast(source); ++ uint64_t* dst = reinterpret_cast(destination); ++ for (size_t index = 0; index < length; index++) { ++ dst[index] = bswap_64(src[index]); ++ } ++ } ++} ++ ++template ++void TranslateEndianness(T* buffer, size_t length) { ++ TranslateEndianness(buffer, buffer, length); ++} ++ + // Doesn't take ownership of the file handle and won't close it. + class WavHeaderFileReader : public WavHeaderReader { + public: +@@ -89,10 +122,6 @@ void WavReader::Reset() { + + size_t WavReader::ReadSamples(const size_t num_samples, + int16_t* const samples) { +-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +-#error "Need to convert samples to big-endian when reading from WAV file" +-#endif +- + size_t num_samples_left_to_read = num_samples; + size_t next_chunk_start = 0; + while (num_samples_left_to_read > 0 && num_unread_samples_ > 0) { +@@ -105,6 +134,9 @@ size_t WavReader::ReadSamples(const size_t num_samples, + num_bytes_read = file_.Read(samples_to_convert.data(), + chunk_size * sizeof(samples_to_convert[0])); + num_samples_read = num_bytes_read / sizeof(samples_to_convert[0]); ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(samples_to_convert.data(), num_samples_read); ++#endif + + for (size_t j = 0; j < num_samples_read; ++j) { + samples[next_chunk_start + j] = FloatToS16(samples_to_convert[j]); +@@ -114,6 +146,10 @@ size_t WavReader::ReadSamples(const size_t num_samples, + num_bytes_read = file_.Read(&samples[next_chunk_start], + chunk_size * sizeof(samples[0])); + num_samples_read = num_bytes_read / sizeof(samples[0]); ++ ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(&samples[next_chunk_start], num_samples_read); ++#endif + } + RTC_CHECK(num_samples_read == 0 || (num_bytes_read % num_samples_read) == 0) + << "Corrupt file: file ended in the middle of a sample."; +@@ -129,10 +165,6 @@ size_t WavReader::ReadSamples(const size_t num_samples, + } + + size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) { +-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +-#error "Need to convert samples to big-endian when reading from WAV file" +-#endif +- + size_t num_samples_left_to_read = num_samples; + size_t next_chunk_start = 0; + while (num_samples_left_to_read > 0 && num_unread_samples_ > 0) { +@@ -145,6 +177,9 @@ size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) { + num_bytes_read = file_.Read(samples_to_convert.data(), + chunk_size * sizeof(samples_to_convert[0])); + num_samples_read = num_bytes_read / sizeof(samples_to_convert[0]); ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(samples_to_convert.data(), num_samples_read); ++#endif + + for (size_t j = 0; j < num_samples_read; ++j) { + samples[next_chunk_start + j] = +@@ -155,6 +190,9 @@ size_t WavReader::ReadSamples(const size_t num_samples, float* const samples) { + num_bytes_read = file_.Read(&samples[next_chunk_start], + chunk_size * sizeof(samples[0])); + num_samples_read = num_bytes_read / sizeof(samples[0]); ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(&samples[next_chunk_start], num_samples_read); ++#endif + + for (size_t j = 0; j < num_samples_read; ++j) { + samples[next_chunk_start + j] = +@@ -213,24 +251,32 @@ WavWriter::WavWriter(FileWrapper file, + } + + void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) { +-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +-#error "Need to convert samples to little-endian when writing to WAV file" +-#endif +- + for (size_t i = 0; i < num_samples; i += kMaxChunksize) { + const size_t num_remaining_samples = num_samples - i; + const size_t num_samples_to_write = + std::min(kMaxChunksize, num_remaining_samples); + + if (format_ == WavFormat::kWavFormatPcm) { ++#ifndef WEBRTC_ARCH_BIG_ENDIAN + RTC_CHECK( + file_.Write(&samples[i], num_samples_to_write * sizeof(samples[0]))); ++#else ++ std::array converted_samples; ++ TranslateEndianness(converted_samples.data(), &samples[i], ++ num_samples_to_write); ++ RTC_CHECK( ++ file_.Write(converted_samples.data(), ++ num_samples_to_write * sizeof(converted_samples[0]))); ++#endif + } else { + RTC_CHECK_EQ(format_, WavFormat::kWavFormatIeeeFloat); + std::array converted_samples; + for (size_t j = 0; j < num_samples_to_write; ++j) { + converted_samples[j] = S16ToFloat(samples[i + j]); + } ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(converted_samples.data(), num_samples_to_write); ++#endif + RTC_CHECK( + file_.Write(converted_samples.data(), + num_samples_to_write * sizeof(converted_samples[0]))); +@@ -243,10 +289,6 @@ void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) { + } + + void WavWriter::WriteSamples(const float* samples, size_t num_samples) { +-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +-#error "Need to convert samples to little-endian when writing to WAV file" +-#endif +- + for (size_t i = 0; i < num_samples; i += kMaxChunksize) { + const size_t num_remaining_samples = num_samples - i; + const size_t num_samples_to_write = +@@ -257,6 +299,9 @@ void WavWriter::WriteSamples(const float* samples, size_t num_samples) { + for (size_t j = 0; j < num_samples_to_write; ++j) { + converted_samples[j] = FloatS16ToS16(samples[i + j]); + } ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(converted_samples.data(), num_samples_to_write); ++#endif + RTC_CHECK( + file_.Write(converted_samples.data(), + num_samples_to_write * sizeof(converted_samples[0]))); +@@ -266,6 +311,9 @@ void WavWriter::WriteSamples(const float* samples, size_t num_samples) { + for (size_t j = 0; j < num_samples_to_write; ++j) { + converted_samples[j] = FloatS16ToFloat(samples[i + j]); + } ++#ifdef WEBRTC_ARCH_BIG_ENDIAN ++ TranslateEndianness(converted_samples.data(), num_samples_to_write); ++#endif + RTC_CHECK( + file_.Write(converted_samples.data(), + num_samples_to_write * sizeof(converted_samples[0]))); +--- a/webrtc/common_audio/wav_header.cc ++++ b/webrtc/common_audio/wav_header.cc +@@ -14,6 +14,8 @@ + + #include "common_audio/wav_header.h" + ++#include ++ + #include + #include + #include +@@ -26,10 +28,6 @@ + namespace webrtc { + namespace { + +-#ifndef WEBRTC_ARCH_LITTLE_ENDIAN +-#error "Code not working properly for big endian platforms." +-#endif +- + #pragma pack(2) + struct ChunkHeader { + uint32_t ID; +@@ -174,6 +172,8 @@ bool FindWaveChunk(ChunkHeader* chunk_header, + if (readable->Read(chunk_header, sizeof(*chunk_header)) != + sizeof(*chunk_header)) + return false; // EOF. ++ chunk_header->Size = le32toh(chunk_header->Size); ++ + if (ReadFourCC(chunk_header->ID) == sought_chunk_id) + return true; // Sought chunk found. + // Ignore current chunk by skipping its payload. +@@ -187,6 +187,13 @@ bool ReadFmtChunkData(FmtPcmSubchunk* fmt_subchunk, WavHeaderReader* readable) { + if (readable->Read(&(fmt_subchunk->AudioFormat), kFmtPcmSubchunkSize) != + kFmtPcmSubchunkSize) + return false; ++ fmt_subchunk->AudioFormat = le16toh(fmt_subchunk->AudioFormat); ++ fmt_subchunk->NumChannels = le16toh(fmt_subchunk->NumChannels); ++ fmt_subchunk->SampleRate = le32toh(fmt_subchunk->SampleRate); ++ fmt_subchunk->ByteRate = le32toh(fmt_subchunk->ByteRate); ++ fmt_subchunk->BlockAlign = le16toh(fmt_subchunk->BlockAlign); ++ fmt_subchunk->BitsPerSample = le16toh(fmt_subchunk->BitsPerSample); ++ + const uint32_t fmt_size = fmt_subchunk->header.Size; + if (fmt_size != kFmtPcmSubchunkSize) { + // There is an optional two-byte extension field permitted to be present +@@ -214,19 +221,22 @@ void WritePcmWavHeader(size_t num_channels, + auto header = rtc::MsanUninitialized({}); + const size_t bytes_in_payload = bytes_per_sample * num_samples; + +- header.riff.header.ID = PackFourCC('R', 'I', 'F', 'F'); +- header.riff.header.Size = RiffChunkSize(bytes_in_payload, *header_size); +- header.riff.Format = PackFourCC('W', 'A', 'V', 'E'); +- header.fmt.header.ID = PackFourCC('f', 'm', 't', ' '); +- header.fmt.header.Size = kFmtPcmSubchunkSize; +- header.fmt.AudioFormat = MapWavFormatToHeaderField(WavFormat::kWavFormatPcm); +- header.fmt.NumChannels = static_cast(num_channels); +- header.fmt.SampleRate = sample_rate; +- header.fmt.ByteRate = ByteRate(num_channels, sample_rate, bytes_per_sample); +- header.fmt.BlockAlign = BlockAlign(num_channels, bytes_per_sample); +- header.fmt.BitsPerSample = static_cast(8 * bytes_per_sample); +- header.data.header.ID = PackFourCC('d', 'a', 't', 'a'); +- header.data.header.Size = static_cast(bytes_in_payload); ++ header.riff.header.ID = htole32(PackFourCC('R', 'I', 'F', 'F')); ++ header.riff.header.Size = ++ htole32(RiffChunkSize(bytes_in_payload, *header_size)); ++ header.riff.Format = htole32(PackFourCC('W', 'A', 'V', 'E')); ++ header.fmt.header.ID = htole32(PackFourCC('f', 'm', 't', ' ')); ++ header.fmt.header.Size = htole32(kFmtPcmSubchunkSize); ++ header.fmt.AudioFormat = ++ htole16(MapWavFormatToHeaderField(WavFormat::kWavFormatPcm)); ++ header.fmt.NumChannels = htole16(num_channels); ++ header.fmt.SampleRate = htole32(sample_rate); ++ header.fmt.ByteRate = ++ htole32(ByteRate(num_channels, sample_rate, bytes_per_sample)); ++ header.fmt.BlockAlign = htole16(BlockAlign(num_channels, bytes_per_sample)); ++ header.fmt.BitsPerSample = htole16(8 * bytes_per_sample); ++ header.data.header.ID = htole32(PackFourCC('d', 'a', 't', 'a')); ++ header.data.header.Size = htole32(bytes_in_payload); + + // Do an extra copy rather than writing everything to buf directly, since buf + // might not be correctly aligned. +@@ -245,24 +255,26 @@ void WriteIeeeFloatWavHeader(size_t num_channels, + auto header = rtc::MsanUninitialized({}); + const size_t bytes_in_payload = bytes_per_sample * num_samples; + +- header.riff.header.ID = PackFourCC('R', 'I', 'F', 'F'); +- header.riff.header.Size = RiffChunkSize(bytes_in_payload, *header_size); +- header.riff.Format = PackFourCC('W', 'A', 'V', 'E'); +- header.fmt.header.ID = PackFourCC('f', 'm', 't', ' '); +- header.fmt.header.Size = kFmtIeeeFloatSubchunkSize; ++ header.riff.header.ID = htole32(PackFourCC('R', 'I', 'F', 'F')); ++ header.riff.header.Size = ++ htole32(RiffChunkSize(bytes_in_payload, *header_size)); ++ header.riff.Format = htole32(PackFourCC('W', 'A', 'V', 'E')); ++ header.fmt.header.ID = htole32(PackFourCC('f', 'm', 't', ' ')); ++ header.fmt.header.Size = htole32(kFmtIeeeFloatSubchunkSize); + header.fmt.AudioFormat = +- MapWavFormatToHeaderField(WavFormat::kWavFormatIeeeFloat); +- header.fmt.NumChannels = static_cast(num_channels); +- header.fmt.SampleRate = sample_rate; +- header.fmt.ByteRate = ByteRate(num_channels, sample_rate, bytes_per_sample); +- header.fmt.BlockAlign = BlockAlign(num_channels, bytes_per_sample); +- header.fmt.BitsPerSample = static_cast(8 * bytes_per_sample); +- header.fmt.ExtensionSize = 0; +- header.fact.header.ID = PackFourCC('f', 'a', 'c', 't'); +- header.fact.header.Size = 4; +- header.fact.SampleLength = static_cast(num_channels * num_samples); +- header.data.header.ID = PackFourCC('d', 'a', 't', 'a'); +- header.data.header.Size = static_cast(bytes_in_payload); ++ htole16(MapWavFormatToHeaderField(WavFormat::kWavFormatIeeeFloat)); ++ header.fmt.NumChannels = htole16(num_channels); ++ header.fmt.SampleRate = htole32(sample_rate); ++ header.fmt.ByteRate = ++ htole32(ByteRate(num_channels, sample_rate, bytes_per_sample)); ++ header.fmt.BlockAlign = htole16(BlockAlign(num_channels, bytes_per_sample)); ++ header.fmt.BitsPerSample = htole16(8 * bytes_per_sample); ++ header.fmt.ExtensionSize = htole16(0); ++ header.fact.header.ID = htole32(PackFourCC('f', 'a', 'c', 't')); ++ header.fact.header.Size = htole32(4); ++ header.fact.SampleLength = htole32(num_channels * num_samples); ++ header.data.header.ID = htole32(PackFourCC('d', 'a', 't', 'a')); ++ header.data.header.Size = htole32(bytes_in_payload); + + // Do an extra copy rather than writing everything to buf directly, since buf + // might not be correctly aligned. +@@ -391,6 +403,7 @@ bool ReadWavHeader(WavHeaderReader* readable, + return false; + if (ReadFourCC(header.riff.Format) != "WAVE") + return false; ++ header.riff.header.Size = le32toh(header.riff.header.Size); + + // Find "fmt " and "data" chunks. While the official Wave file specification + // does not put requirements on the chunks order, it is uncommon to find the diff --git a/arches.patch b/arches.patch new file mode 100644 index 0000000..eaf43df --- /dev/null +++ b/arches.patch @@ -0,0 +1,86 @@ +--- webrtc-audio-processing-1.3/webrtc/rtc_base/system/arch.h 2023-09-05 10:19:47.000000000 -0500 ++++ webrtc-audio-processing-1.3/webrtc/rtc_base/system/arch.h 2024-02-12 10:04:12.114812565 -0600 +@@ -15,8 +15,9 @@ + #define RTC_BASE_SYSTEM_ARCH_H_ + + // Processor architecture detection. For more info on what's defined, see: +-// http://msdn.microsoft.com/en-us/library/b0084kay.aspx +-// http://www.agner.org/optimize/calling_conventions.pdf ++// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros ++// https://www.agner.org/optimize/calling_conventions.pdf ++// https://sourceforge.net/p/predef/wiki/Architectures/ + // or with gcc, run: "echo | gcc -E -dM -" + #if defined(_M_X64) || defined(__x86_64__) + #define WEBRTC_ARCH_X86_FAMILY +@@ -27,29 +28,50 @@ + #define WEBRTC_ARCH_ARM_FAMILY + #define WEBRTC_ARCH_64_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN +-#elif defined(__riscv) || defined(__riscv__) +-#define WEBRTC_ARCH_LITTLE_ENDIAN +-#if __riscv_xlen == 64 +-#define WEBRTC_ARCH_64_BITS +-#else +-#define WEBRTC_ARCH_32_BITS +-#endif + #elif defined(_M_IX86) || defined(__i386__) + #define WEBRTC_ARCH_X86_FAMILY + #define WEBRTC_ARCH_X86 + #define WEBRTC_ARCH_32_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN +-#elif defined(__ARMEL__) ++#elif defined(_M_ARM) || defined(__ARMEL__) + #define WEBRTC_ARCH_ARM_FAMILY + #define WEBRTC_ARCH_32_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN +-#elif defined(__MIPSEL__) ++#elif defined(__MIPSEL__) || defined(__MIPSEB__) + #define WEBRTC_ARCH_MIPS_FAMILY + #if defined(__LP64__) + #define WEBRTC_ARCH_64_BITS + #else + #define WEBRTC_ARCH_32_BITS + #endif ++#if defined(__MIPSEL__) ++#define WEBRTC_ARCH_LITTLE_ENDIAN ++#else ++#define WEBRTC_ARCH_BIG_ENDIAN ++#endif ++#elif defined(__PPC__) ++#if defined(__PPC64__) ++#define WEBRTC_ARCH_64_BITS ++#else ++#define WEBRTC_ARCH_32_BITS ++#endif ++#if defined(__LITTLE_ENDIAN__) ++#define WEBRTC_ARCH_LITTLE_ENDIAN ++#else ++#define WEBRTC_ARCH_BIG_ENDIAN ++#endif ++#elif defined(__sparc) || defined(__sparc__) ++#if __SIZEOF_LONG__ == 8 ++#define WEBRTC_ARCH_64_BITS ++#else ++#define WEBRTC_ARCH_32_BITS ++#endif ++#define WEBRTC_ARCH_BIG_ENDIAN ++#elif defined(__riscv) && __riscv_xlen == 64 ++#define WEBRTC_ARCH_64_BITS ++#define WEBRTC_ARCH_LITTLE_ENDIAN ++#elif defined(__riscv) && __riscv_xlen == 32 ++#define WEBRTC_ARCH_32_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN + #elif defined(__pnacl__) + #define WEBRTC_ARCH_32_BITS +--- webrtc-audio-processing-1.3/webrtc/rtc_base/system/arch.h~ 2024-02-12 10:14:11.277835532 -0600 ++++ webrtc-audio-processing-1.3/webrtc/rtc_base/system/arch.h 2024-02-12 10:25:11.558554823 -0600 +@@ -79,6 +79,9 @@ + #elif defined(__EMSCRIPTEN__) + #define WEBRTC_ARCH_32_BITS + #define WEBRTC_ARCH_LITTLE_ENDIAN ++#elif defined(__s390x__) ++#define WEBRTC_ARCH_64_BITS ++#define WEBRTC_ARCH_BIG_ENDIAN + #else + #error Please add support for your architecture in rtc_base/system/arch.h + #endif diff --git a/sources b/sources index 37600c9..b25567a 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -SHA512 (webrtc-audio-processing-0.3.1.tar.xz) = 1c7a2d16f7f6c03cf6d60405d0dcd224caae6e80c9c4d43f8373bad2446affcdf49a02efb0085387328289aa79c8981dcaedff876cde55be9602dbde9c3f440b +SHA512 (webrtc-audio-processing-1.3.tar.xz) = daabaed06ff9d1d4076b2abba14efbbebeb8930b14a99fb47974399d2812f3f851e3d6a691b09fbcfb1a3535c6ade967bac4c17a3728f3138b302e3b844c5c67 diff --git a/webrtc-audio-processing-0.2-big-endian.patch b/webrtc-audio-processing-0.2-big-endian.patch deleted file mode 100644 index 9361725..0000000 --- a/webrtc-audio-processing-0.2-big-endian.patch +++ /dev/null @@ -1,90 +0,0 @@ -diff -up webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc.than webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc ---- webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc.than 2016-05-24 08:28:45.749940095 -0400 -+++ webrtc-audio-processing-0.2/webrtc/common_audio/wav_file.cc 2016-05-24 08:50:30.361020010 -0400 -@@ -64,9 +64,6 @@ WavReader::~WavReader() { - } - - size_t WavReader::ReadSamples(size_t num_samples, int16_t* samples) { --#ifndef WEBRTC_ARCH_LITTLE_ENDIAN --#error "Need to convert samples to big-endian when reading from WAV file" --#endif - // There could be metadata after the audio; ensure we don't read it. - num_samples = std::min(rtc::checked_cast(num_samples), - num_samples_remaining_); -@@ -76,6 +73,12 @@ size_t WavReader::ReadSamples(size_t num - RTC_CHECK(read == num_samples || feof(file_handle_)); - RTC_CHECK_LE(read, num_samples_remaining_); - num_samples_remaining_ -= rtc::checked_cast(read); -+#ifndef WEBRTC_ARCH_LITTLE_ENDIAN -+ //convert to big-endian -+ for(size_t idx = 0; idx < num_samples; idx++) { -+ samples[idx] = (samples[idx]<<8) | (samples[idx]>>8); -+ } -+#endif - return read; - } - -@@ -120,10 +123,17 @@ WavWriter::~WavWriter() { - - void WavWriter::WriteSamples(const int16_t* samples, size_t num_samples) { - #ifndef WEBRTC_ARCH_LITTLE_ENDIAN --#error "Need to convert samples to little-endian when writing to WAV file" --#endif -+ int16_t * le_samples = new int16_t[num_samples]; -+ for(size_t idx = 0; idx < num_samples; idx++) { -+ le_samples[idx] = (samples[idx]<<8) | (samples[idx]>>8); -+ } -+ const size_t written = -+ fwrite(le_samples, sizeof(*le_samples), num_samples, file_handle_); -+ delete []le_samples; -+#else - const size_t written = - fwrite(samples, sizeof(*samples), num_samples, file_handle_); -+#endif - RTC_CHECK_EQ(num_samples, written); - num_samples_ += static_cast(written); - RTC_CHECK(written <= std::numeric_limits::max() || -diff -up webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc.than webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc ---- webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc.than 2016-05-24 08:50:52.591379263 -0400 -+++ webrtc-audio-processing-0.2/webrtc/common_audio/wav_header.cc 2016-05-24 08:52:08.552606848 -0400 -@@ -129,7 +129,39 @@ static inline std::string ReadFourCC(uin - return std::string(reinterpret_cast(&x), 4); - } - #else --#error "Write be-to-le conversion functions" -+static inline void WriteLE16(uint16_t* f, uint16_t x) { -+ *f = ((x << 8) & 0xff00) | ( ( x >> 8) & 0x00ff); -+} -+ -+static inline void WriteLE32(uint32_t* f, uint32_t x) { -+ *f = ( (x & 0x000000ff) << 24 ) -+ | ((x & 0x0000ff00) << 8) -+ | ((x & 0x00ff0000) >> 8) -+ | ((x & 0xff000000) >> 24 ); -+} -+ -+static inline void WriteFourCC(uint32_t* f, char a, char b, char c, char d) { -+ *f = (static_cast(a) << 24 ) -+ | (static_cast(b) << 16) -+ | (static_cast(c) << 8) -+ | (static_cast(d) ); -+} -+ -+static inline uint16_t ReadLE16(uint16_t x) { -+ return (( x & 0x00ff) << 8 )| ((x & 0xff00)>>8); -+} -+ -+static inline uint32_t ReadLE32(uint32_t x) { -+ return ( (x & 0x000000ff) << 24 ) -+ | ( (x & 0x0000ff00) << 8 ) -+ | ( (x & 0x00ff0000) >> 8) -+ | ( (x & 0xff000000) >> 24 ); -+} -+ -+static inline std::string ReadFourCC(uint32_t x) { -+ x = ReadLE32(x); -+ return std::string(reinterpret_cast(&x), 4); -+} - #endif - - static inline uint32_t RiffChunkSize(uint32_t bytes_in_payload) { diff --git a/webrtc-audio-processing.spec b/webrtc-audio-processing.spec index a6f0294..ff6e2ef 100644 --- a/webrtc-audio-processing.spec +++ b/webrtc-audio-processing.spec @@ -1,21 +1,19 @@ Name: webrtc-audio-processing -Version: 0.3.1 -Release: 12%{?dist} +Version: 1.3 +Release: 1%{?dist} Summary: Library for echo cancellation License: BSD-3-Clause URL: http://www.freedesktop.org/software/pulseaudio/webrtc-audio-processing/ Source0: http://freedesktop.org/software/pulseaudio/webrtc-audio-processing/%{name}-%{version}.tar.xz -## upstream patches +Patch0: arches.patch +Patch1: 65f002e.patch -Patch100: webrtc-fix-typedefs-on-other-arches.patch -# bz#1336466, https://bugs.freedesktop.org/show_bug.cgi?id=95738 -Patch104: webrtc-audio-processing-0.2-big-endian.patch - -BuildRequires: make -BuildRequires: autoconf automake libtool +BuildRequires: meson BuildRequires: gcc gcc-c++ +BuildRequires: abseil-cpp-devel +#BuildRequires: neon-devel %description %{name} is a library derived from Google WebRTC project that @@ -34,24 +32,15 @@ files for developing applications that use %{name}. %autosetup -p1 %build -# for patch1 -autoreconf -vif - -%configure \ -%ifarch %{arm} aarch64 - --enable-neon=no \ -%endif - --disable-silent-rules \ - --disable-static - -%make_build +%meson +%meson_build \ +#%%ifarch %%{arm} aarch64 +# -Dneon=no \ +#%endif %install -%make_install - -# remove libtool archives -find %{buildroot} -type f -name "*.la" -delete +%meson_install %ldconfig_scriptlets @@ -59,15 +48,21 @@ find %{buildroot} -type f -name "*.la" -delete %files %doc NEWS AUTHORS README.md %license COPYING -%{_libdir}/libwebrtc_audio_processing.so.1* +%{_libdir}/libwebrtc-audio-coding-1.so.3* +%{_libdir}/libwebrtc-audio-processing-1.so.3* %files devel -%{_libdir}/libwebrtc_audio_processing.so -%{_libdir}/pkgconfig/webrtc-audio-processing.pc -%{_includedir}/webrtc_audio_processing/ +%{_libdir}/libwebrtc-audio-coding-1.so +%{_libdir}/libwebrtc-audio-processing-1.so +%{_libdir}/pkgconfig/webrtc-audio-coding-1.pc +%{_libdir}/pkgconfig/webrtc-audio-processing-1.pc +%{_includedir}/webrtc-audio-processing-1/ %changelog +* Fri Feb 09 2024 Gwyn Ciesla - 1.3-1 +- 1.3 + * Sat Jan 27 2024 Fedora Release Engineering - 0.3.1-12 - Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild diff --git a/webrtc-fix-typedefs-on-other-arches.patch b/webrtc-fix-typedefs-on-other-arches.patch deleted file mode 100644 index 81e5ae5..0000000 --- a/webrtc-fix-typedefs-on-other-arches.patch +++ /dev/null @@ -1,24 +0,0 @@ -diff -up webrtc-audio-processing-0.2/webrtc/typedefs.h.typedef webrtc-audio-processing-0.2/webrtc/typedefs.h ---- webrtc-audio-processing-0.2/webrtc/typedefs.h.typedef 2016-05-12 09:08:53.885000410 -0500 -+++ webrtc-audio-processing-0.2/webrtc/typedefs.h 2016-05-12 09:12:38.006851953 -0500 -@@ -48,7 +48,19 @@ - #define WEBRTC_ARCH_32_BITS - #define WEBRTC_ARCH_LITTLE_ENDIAN - #else --#error Please add support for your architecture in typedefs.h -+/* instead of failing, use typical unix defines... */ -+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -+#define WEBRTC_ARCH_LITTLE_ENDIAN -+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+#define WEBRTC_ARCH_BIG_ENDIAN -+#else -+#error __BYTE_ORDER__ is not defined -+#endif -+#if defined(__LP64__) -+#define WEBRTC_ARCH_64_BITS -+#else -+#define WEBRTC_ARCH_32_BITS -+#endif - #endif - - #if !(defined(WEBRTC_ARCH_LITTLE_ENDIAN) ^ defined(WEBRTC_ARCH_BIG_ENDIAN))