Compare commits

...

No commits in common. "c8" and "c9-beta" have entirely different histories.
c8 ... c9-beta

13 changed files with 74 additions and 1067 deletions

2
.gitignore vendored
View File

@ -1 +1 @@
SOURCES/libwebp-1.0.0.tar.gz
SOURCES/libwebp-1.2.0.tar.gz

View File

@ -1 +1 @@
49ddec20de631f7a26587801b474f129e953f444 SOURCES/libwebp-1.0.0.tar.gz
350503d8ffea6cb1cb3ac1eaa9bc371458de10ae SOURCES/libwebp-1.2.0.tar.gz

View File

@ -1,572 +0,0 @@
diff -up libwebp-1.0.0/src/dec/vp8l_dec.c.old libwebp-1.0.0/src/dec/vp8l_dec.c
--- libwebp-1.0.0/src/dec/vp8l_dec.c.old 2023-09-15 11:07:05.229790896 +0200
+++ libwebp-1.0.0/src/dec/vp8l_dec.c 2023-09-15 10:54:47.046025885 +0200
@@ -253,11 +253,11 @@ static int ReadHuffmanCodeLengths(
int symbol;
int max_symbol;
int prev_code_len = DEFAULT_CODE_LENGTH;
- HuffmanCode table[1 << LENGTHS_TABLE_BITS];
+ HuffmanTables tables;
- if (!VP8LBuildHuffmanTable(table, LENGTHS_TABLE_BITS,
- code_length_code_lengths,
- NUM_CODE_LENGTH_CODES)) {
+ if (!VP8LHuffmanTablesAllocate(1 << LENGTHS_TABLE_BITS, &tables) ||
+ !VP8LBuildHuffmanTable(&tables, LENGTHS_TABLE_BITS,
+ code_length_code_lengths, NUM_CODE_LENGTH_CODES)) {
goto End;
}
@@ -277,7 +277,7 @@ static int ReadHuffmanCodeLengths(
int code_len;
if (max_symbol-- == 0) break;
VP8LFillBitWindow(br);
- p = &table[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
+ p = &tables.curr_segment->start[VP8LPrefetchBits(br) & LENGTHS_TABLE_MASK];
VP8LSetBitPos(br, br->bit_pos_ + p->bits);
code_len = p->value;
if (code_len < kCodeLengthLiterals) {
@@ -300,6 +300,7 @@ static int ReadHuffmanCodeLengths(
ok = 1;
End:
+ VP8LHuffmanTablesDeallocate(&tables);
if (!ok) dec->status_ = VP8_STATUS_BITSTREAM_ERROR;
return ok;
}
@@ -307,7 +308,8 @@ static int ReadHuffmanCodeLengths(
// 'code_lengths' is pre-allocated temporary buffer, used for creating Huffman
// tree.
static int ReadHuffmanCode(int alphabet_size, VP8LDecoder* const dec,
- int* const code_lengths, HuffmanCode* const table) {
+ int* const code_lengths,
+ HuffmanTables* const table) {
int ok = 0;
int size = 0;
VP8LBitReader* const br = &dec->br_;
@@ -362,12 +364,7 @@ static int ReadHuffmanCodes(VP8LDecoder*
VP8LMetadata* const hdr = &dec->hdr_;
uint32_t* huffman_image = NULL;
HTreeGroup* htree_groups = NULL;
- // When reading htrees, some might be unused, as the format allows it.
- // We will still read them but put them in this htree_group_bogus.
- HTreeGroup htree_group_bogus;
- HuffmanCode* huffman_tables = NULL;
- HuffmanCode* huffman_tables_bogus = NULL;
- HuffmanCode* next = NULL;
+ HuffmanTables* huffman_tables = &hdr->huffman_tables_;
int num_htree_groups = 1;
int num_htree_groups_max = 1;
int max_alphabet_size = 0;
@@ -376,6 +373,10 @@ static int ReadHuffmanCodes(VP8LDecoder*
int* mapping = NULL;
int ok = 0;
+ // Check the table has been 0 initialized (through InitMetadata).
+ assert(huffman_tables->root.start == NULL);
+ assert(huffman_tables->curr_segment == NULL);
+
if (allow_recursion && VP8LReadBits(br, 1)) {
// use meta Huffman codes.
const int huffman_precision = VP8LReadBits(br, 3) + 2;
@@ -418,12 +419,6 @@ static int ReadHuffmanCodes(VP8LDecoder*
if (*mapped_group == -1) *mapped_group = num_htree_groups++;
huffman_image[i] = *mapped_group;
}
- huffman_tables_bogus = (HuffmanCode*)WebPSafeMalloc(
- table_size, sizeof(*huffman_tables_bogus));
- if (huffman_tables_bogus == NULL) {
- dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
- goto Error;
- }
} else {
num_htree_groups = num_htree_groups_max;
}
@@ -444,72 +439,80 @@ static int ReadHuffmanCodes(VP8LDecoder*
code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
sizeof(*code_lengths));
- huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
- sizeof(*huffman_tables));
htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
- if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
+ if (htree_groups == NULL || code_lengths == NULL ||
+ !VP8LHuffmanTablesAllocate(num_htree_groups * table_size,
+ huffman_tables)) {
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
goto Error;
}
- next = huffman_tables;
for (i = 0; i < num_htree_groups_max; ++i) {
- // If the index "i" is unused in the Huffman image, read the coefficients
- // but store them to a bogus htree_group.
- const int is_bogus = (mapping != NULL && mapping[i] == -1);
- HTreeGroup* const htree_group =
- is_bogus ? &htree_group_bogus :
- &htree_groups[(mapping == NULL) ? i : mapping[i]];
- HuffmanCode** const htrees = htree_group->htrees;
- HuffmanCode* huffman_tables_i = is_bogus ? huffman_tables_bogus : next;
- int size;
- int total_size = 0;
- int is_trivial_literal = 1;
- int max_bits = 0;
- for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
- int alphabet_size = kAlphabetSize[j];
- htrees[j] = huffman_tables_i;
- if (j == 0 && color_cache_bits > 0) {
- alphabet_size += 1 << color_cache_bits;
- }
- size =
- ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables_i);
- if (size == 0) {
- goto Error;
- }
- if (is_trivial_literal && kLiteralMap[j] == 1) {
- is_trivial_literal = (huffman_tables_i->bits == 0);
+ // If the index "i" is unused in the Huffman image, just make sure the
+ // coefficients are valid but do not store them.
+ if (mapping != NULL && mapping[i] == -1) {
+ for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
+ int alphabet_size = kAlphabetSize[j];
+ if (j == 0 && color_cache_bits > 0) {
+ alphabet_size += (1 << color_cache_bits);
+ }
+ // Passing in NULL so that nothing gets filled.
+ if (!ReadHuffmanCode(alphabet_size, dec, code_lengths, NULL)) {
+ goto Error;
+ }
}
- total_size += huffman_tables_i->bits;
- huffman_tables_i += size;
- if (j <= ALPHA) {
- int local_max_bits = code_lengths[0];
- int k;
- for (k = 1; k < alphabet_size; ++k) {
- if (code_lengths[k] > local_max_bits) {
- local_max_bits = code_lengths[k];
+ } else {
+ HTreeGroup* const htree_group =
+ &htree_groups[(mapping == NULL) ? i : mapping[i]];
+ HuffmanCode** const htrees = htree_group->htrees;
+ int size;
+ int total_size = 0;
+ int is_trivial_literal = 1;
+ int max_bits = 0;
+ for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
+ int alphabet_size = kAlphabetSize[j];
+ if (j == 0 && color_cache_bits > 0) {
+ alphabet_size += (1 << color_cache_bits);
+ }
+ size =
+ ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables);
+ htrees[j] = huffman_tables->curr_segment->curr_table;
+ if (size == 0) {
+ goto Error;
+ }
+ if (is_trivial_literal && kLiteralMap[j] == 1) {
+ is_trivial_literal = (htrees[j]->bits == 0);
+ }
+ total_size += htrees[j]->bits;
+ huffman_tables->curr_segment->curr_table += size;
+ if (j <= ALPHA) {
+ int local_max_bits = code_lengths[0];
+ int k;
+ for (k = 1; k < alphabet_size; ++k) {
+ if (code_lengths[k] > local_max_bits) {
+ local_max_bits = code_lengths[k];
+ }
}
+ max_bits += local_max_bits;
}
- max_bits += local_max_bits;
}
- }
- if (!is_bogus) next = huffman_tables_i;
- htree_group->is_trivial_literal = is_trivial_literal;
- htree_group->is_trivial_code = 0;
- if (is_trivial_literal) {
- const int red = htrees[RED][0].value;
- const int blue = htrees[BLUE][0].value;
- const int alpha = htrees[ALPHA][0].value;
- htree_group->literal_arb = ((uint32_t)alpha << 24) | (red << 16) | blue;
- if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) {
- htree_group->is_trivial_code = 1;
- htree_group->literal_arb |= htrees[GREEN][0].value << 8;
+ htree_group->is_trivial_literal = is_trivial_literal;
+ htree_group->is_trivial_code = 0;
+ if (is_trivial_literal) {
+ const int red = htrees[RED][0].value;
+ const int blue = htrees[BLUE][0].value;
+ const int alpha = htrees[ALPHA][0].value;
+ htree_group->literal_arb = ((uint32_t)alpha << 24) | (red << 16) | blue;
+ if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) {
+ htree_group->is_trivial_code = 1;
+ htree_group->literal_arb |= htrees[GREEN][0].value << 8;
+ }
}
+ htree_group->use_packed_table =
+ !htree_group->is_trivial_code && (max_bits < HUFFMAN_PACKED_BITS);
+ if (htree_group->use_packed_table) BuildPackedTable(htree_group);
}
- htree_group->use_packed_table =
- !htree_group->is_trivial_code && (max_bits < HUFFMAN_PACKED_BITS);
- if (htree_group->use_packed_table) BuildPackedTable(htree_group);
}
ok = 1;
@@ -517,15 +520,13 @@ static int ReadHuffmanCodes(VP8LDecoder*
hdr->huffman_image_ = huffman_image;
hdr->num_htree_groups_ = num_htree_groups;
hdr->htree_groups_ = htree_groups;
- hdr->huffman_tables_ = huffman_tables;
Error:
WebPSafeFree(code_lengths);
- WebPSafeFree(huffman_tables_bogus);
WebPSafeFree(mapping);
if (!ok) {
WebPSafeFree(huffman_image);
- WebPSafeFree(huffman_tables);
+ VP8LHuffmanTablesDeallocate(huffman_tables);
VP8LHtreeGroupsFree(htree_groups);
}
return ok;
@@ -757,11 +758,11 @@ static WEBP_INLINE HTreeGroup* GetHtreeG
typedef void (*ProcessRowsFunc)(VP8LDecoder* const dec, int row);
-static void ApplyInverseTransforms(VP8LDecoder* const dec, int num_rows,
+static void ApplyInverseTransforms(VP8LDecoder* const dec,
+ int start_row, int num_rows,
const uint32_t* const rows) {
int n = dec->next_transform_;
const int cache_pixs = dec->width_ * num_rows;
- const int start_row = dec->last_row_;
const int end_row = start_row + num_rows;
const uint32_t* rows_in = rows;
uint32_t* const rows_out = dec->argb_cache_;
@@ -792,8 +793,7 @@ static void ProcessRows(VP8LDecoder* con
VP8Io* const io = dec->io_;
uint8_t* rows_data = (uint8_t*)dec->argb_cache_;
const int in_stride = io->width * sizeof(uint32_t); // in unit of RGBA
-
- ApplyInverseTransforms(dec, num_rows, rows);
+ ApplyInverseTransforms(dec, dec->last_row_, num_rows, rows);
if (!SetCropWindow(io, dec->last_row_, row, &rows_data, in_stride)) {
// Nothing to output (this time).
} else {
@@ -951,7 +951,6 @@ static WEBP_INLINE void CopyBlock8b(uint
break;
default:
goto Copy;
- break;
}
CopySmallPattern8b(src, dst, length, pattern);
return;
@@ -1196,6 +1195,7 @@ static int DecodeImageData(VP8LDecoder*
VP8LFillBitWindow(br);
dist_code = GetCopyDistance(dist_symbol, br);
dist = PlaneCodeToDistance(width, dist_code);
+
if (VP8LIsEndOfStream(br)) break;
if (src - data < (ptrdiff_t)dist || src_end - src < (ptrdiff_t)length) {
goto Error;
@@ -1357,7 +1357,7 @@ static void ClearMetadata(VP8LMetadata*
assert(hdr != NULL);
WebPSafeFree(hdr->huffman_image_);
- WebPSafeFree(hdr->huffman_tables_);
+ VP8LHuffmanTablesDeallocate(&hdr->huffman_tables_);
VP8LHtreeGroupsFree(hdr->htree_groups_);
VP8LColorCacheClear(&hdr->color_cache_);
VP8LColorCacheClear(&hdr->saved_color_cache_);
@@ -1556,7 +1556,7 @@ static void ExtractAlphaRows(VP8LDecoder
const int cache_pixs = width * num_rows_to_process;
uint8_t* const dst = output + width * cur_row;
const uint32_t* const src = dec->argb_cache_;
- ApplyInverseTransforms(dec, num_rows_to_process, in);
+ ApplyInverseTransforms(dec, cur_row, num_rows_to_process, in);
WebPExtractGreen(src, dst, cache_pixs);
AlphaApplyFilter(alph_dec,
cur_row, cur_row + num_rows_to_process, dst, width);
@@ -1673,7 +1673,7 @@ int VP8LDecodeImage(VP8LDecoder* const d
// Sanity checks.
if (dec == NULL) return 0;
- assert(dec->hdr_.huffman_tables_ != NULL);
+ assert(dec->hdr_.huffman_tables_.root.start != NULL);
assert(dec->hdr_.htree_groups_ != NULL);
assert(dec->hdr_.num_htree_groups_ > 0);
diff -up libwebp-1.0.0/src/dec/vp8li_dec.h.old libwebp-1.0.0/src/dec/vp8li_dec.h
--- libwebp-1.0.0/src/dec/vp8li_dec.h.old 2023-09-15 11:07:13.032063220 +0200
+++ libwebp-1.0.0/src/dec/vp8li_dec.h 2023-09-15 10:54:47.046025885 +0200
@@ -37,7 +37,7 @@ struct VP8LTransform {
int bits_; // subsampling bits defining transform window.
int xsize_; // transform window X index.
int ysize_; // transform window Y index.
- uint32_t *data_; // transform data.
+ uint32_t* data_; // transform data.
};
typedef struct {
@@ -48,23 +48,23 @@ typedef struct {
int huffman_mask_;
int huffman_subsample_bits_;
int huffman_xsize_;
- uint32_t *huffman_image_;
+ uint32_t* huffman_image_;
int num_htree_groups_;
- HTreeGroup *htree_groups_;
- HuffmanCode *huffman_tables_;
+ HTreeGroup* htree_groups_;
+ HuffmanTables huffman_tables_;
} VP8LMetadata;
typedef struct VP8LDecoder VP8LDecoder;
struct VP8LDecoder {
VP8StatusCode status_;
VP8LDecodeState state_;
- VP8Io *io_;
+ VP8Io* io_;
- const WebPDecBuffer *output_; // shortcut to io->opaque->output
+ const WebPDecBuffer* output_; // shortcut to io->opaque->output
- uint32_t *pixels_; // Internal data: either uint8_t* for alpha
+ uint32_t* pixels_; // Internal data: either uint8_t* for alpha
// or uint32_t* for BGRA.
- uint32_t *argb_cache_; // Scratch buffer for temporary BGRA storage.
+ uint32_t* argb_cache_; // Scratch buffer for temporary BGRA storage.
VP8LBitReader br_;
int incremental_; // if true, incremental decoding is expected
@@ -86,8 +86,8 @@ struct VP8LDecoder {
// or'd bitset storing the transforms types.
uint32_t transforms_seen_;
- uint8_t *rescaler_memory; // Working memory for rescaling work.
- WebPRescaler *rescaler; // Common rescaler for all channels.
+ uint8_t* rescaler_memory; // Working memory for rescaling work.
+ WebPRescaler* rescaler; // Common rescaler for all channels.
};
//------------------------------------------------------------------------------
@@ -132,4 +132,4 @@ void VP8LDelete(VP8LDecoder* const dec);
} // extern "C"
#endif
-#endif /* WEBP_DEC_VP8LI_DEC_H_ */
+#endif // WEBP_DEC_VP8LI_DEC_H_
diff -up libwebp-1.0.0/src/utils/huffman_utils.c.old libwebp-1.0.0/src/utils/huffman_utils.c
--- libwebp-1.0.0/src/utils/huffman_utils.c.old 2018-04-21 05:04:55.000000000 +0200
+++ libwebp-1.0.0/src/utils/huffman_utils.c 2023-09-15 10:54:47.047025920 +0200
@@ -91,7 +91,8 @@ static int BuildHuffmanTable(HuffmanCode
assert(code_lengths_size != 0);
assert(code_lengths != NULL);
- assert(root_table != NULL);
+ assert((root_table != NULL && sorted != NULL) ||
+ (root_table == NULL && sorted == NULL));
assert(root_bits > 0);
// Build histogram of code lengths.
@@ -120,16 +121,22 @@ static int BuildHuffmanTable(HuffmanCode
for (symbol = 0; symbol < code_lengths_size; ++symbol) {
const int symbol_code_length = code_lengths[symbol];
if (code_lengths[symbol] > 0) {
- sorted[offset[symbol_code_length]++] = symbol;
+ if (sorted != NULL) {
+ sorted[offset[symbol_code_length]++] = symbol;
+ } else {
+ offset[symbol_code_length]++;
+ }
}
}
// Special case code with only one value.
if (offset[MAX_ALLOWED_CODE_LENGTH] == 1) {
- HuffmanCode code;
- code.bits = 0;
- code.value = (uint16_t)sorted[0];
- ReplicateValue(table, 1, total_size, code);
+ if (sorted != NULL) {
+ HuffmanCode code;
+ code.bits = 0;
+ code.value = (uint16_t)sorted[0];
+ ReplicateValue(table, 1, total_size, code);
+ }
return total_size;
}
@@ -151,6 +158,7 @@ static int BuildHuffmanTable(HuffmanCode
if (num_open < 0) {
return 0;
}
+ if (root_table == NULL) continue;
for (; count[len] > 0; --count[len]) {
HuffmanCode code;
code.bits = (uint8_t)len;
@@ -172,17 +180,21 @@ static int BuildHuffmanTable(HuffmanCode
for (; count[len] > 0; --count[len]) {
HuffmanCode code;
if ((key & mask) != low) {
- table += table_size;
+ if (root_table != NULL) table += table_size;
table_bits = NextTableBitSize(count, len, root_bits);
table_size = 1 << table_bits;
total_size += table_size;
low = key & mask;
- root_table[low].bits = (uint8_t)(table_bits + root_bits);
- root_table[low].value = (uint16_t)((table - root_table) - low);
+ if (root_table != NULL) {
+ root_table[low].bits = (uint8_t)(table_bits + root_bits);
+ root_table[low].value = (uint16_t)((table - root_table) - low);
+ }
+ }
+ if (root_table != NULL) {
+ code.bits = (uint8_t)(len - root_bits);
+ code.value = (uint16_t)sorted[symbol++];
+ ReplicateValue(&table[key >> root_bits], step, table_size, code);
}
- code.bits = (uint8_t)(len - root_bits);
- code.value = (uint16_t)sorted[symbol++];
- ReplicateValue(&table[key >> root_bits], step, table_size, code);
key = GetNextKey(key, len);
}
}
@@ -202,22 +214,83 @@ static int BuildHuffmanTable(HuffmanCode
((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES)
// Cut-off value for switching between heap and stack allocation.
#define SORTED_SIZE_CUTOFF 512
-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
const int code_lengths[], int code_lengths_size) {
- int total_size;
+ const int total_size =
+ BuildHuffmanTable(NULL, root_bits, code_lengths, code_lengths_size, NULL);
assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE);
+ if (total_size == 0 || root_table == NULL) return total_size;
+
+ if (root_table->curr_segment->curr_table + total_size >=
+ root_table->curr_segment->start + root_table->curr_segment->size) {
+ // If 'root_table' does not have enough memory, allocate a new segment.
+ // The available part of root_table->curr_segment is left unused because we
+ // need a contiguous buffer.
+ const int segment_size = root_table->curr_segment->size;
+ struct HuffmanTablesSegment* next =
+ (HuffmanTablesSegment*)WebPSafeMalloc(1, sizeof(*next));
+ if (next == NULL) return 0;
+ // Fill the new segment.
+ // We need at least 'total_size' but if that value is small, it is better to
+ // allocate a big chunk to prevent more allocations later. 'segment_size' is
+ // therefore chosen (any other arbitrary value could be chosen).
+ next->size = total_size > segment_size ? total_size : segment_size;
+ next->start =
+ (HuffmanCode*)WebPSafeMalloc(next->size, sizeof(*next->start));
+ if (next->start == NULL) {
+ WebPSafeFree(next);
+ return 0;
+ }
+ next->curr_table = next->start;
+ next->next = NULL;
+ // Point to the new segment.
+ root_table->curr_segment->next = next;
+ root_table->curr_segment = next;
+ }
if (code_lengths_size <= SORTED_SIZE_CUTOFF) {
// use local stack-allocated array.
uint16_t sorted[SORTED_SIZE_CUTOFF];
- total_size = BuildHuffmanTable(root_table, root_bits,
- code_lengths, code_lengths_size, sorted);
- } else { // rare case. Use heap allocation.
+ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
+ code_lengths, code_lengths_size, sorted);
+ } else { // rare case. Use heap allocation.
uint16_t* const sorted =
(uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted));
if (sorted == NULL) return 0;
- total_size = BuildHuffmanTable(root_table, root_bits,
- code_lengths, code_lengths_size, sorted);
+ BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits,
+ code_lengths, code_lengths_size, sorted);
WebPSafeFree(sorted);
}
return total_size;
}
+
+int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables) {
+ // Have 'segment' point to the first segment for now, 'root'.
+ HuffmanTablesSegment* const root = &huffman_tables->root;
+ huffman_tables->curr_segment = root;
+ // Allocate root.
+ root->start = (HuffmanCode*)WebPSafeMalloc(size, sizeof(*root->start));
+ if (root->start == NULL) return 0;
+ root->curr_table = root->start;
+ root->next = NULL;
+ root->size = size;
+ return 1;
+}
+
+void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables) {
+ HuffmanTablesSegment *current, *next;
+ if (huffman_tables == NULL) return;
+ // Free the root node.
+ current = &huffman_tables->root;
+ next = current->next;
+ WebPSafeFree(current->start);
+ current->start = NULL;
+ current->next = NULL;
+ current = next;
+ // Free the following nodes.
+ while (current != NULL) {
+ next = current->next;
+ WebPSafeFree(current->start);
+ WebPSafeFree(current);
+ current = next;
+ }
+}
diff -up libwebp-1.0.0/src/utils/huffman_utils.h.old libwebp-1.0.0/src/utils/huffman_utils.h
--- libwebp-1.0.0/src/utils/huffman_utils.h.old 2018-04-21 05:04:55.000000000 +0200
+++ libwebp-1.0.0/src/utils/huffman_utils.h 2023-09-15 10:54:47.047025920 +0200
@@ -43,6 +43,29 @@ typedef struct {
// or non-literal symbol otherwise
} HuffmanCode32;
+// Contiguous memory segment of HuffmanCodes.
+typedef struct HuffmanTablesSegment {
+ HuffmanCode* start;
+ // Pointer to where we are writing into the segment. Starts at 'start' and
+ // cannot go beyond 'start' + 'size'.
+ HuffmanCode* curr_table;
+ // Pointer to the next segment in the chain.
+ struct HuffmanTablesSegment* next;
+ int size;
+} HuffmanTablesSegment;
+
+// Chained memory segments of HuffmanCodes.
+typedef struct HuffmanTables {
+ HuffmanTablesSegment root;
+ // Currently processed segment. At first, this is 'root'.
+ HuffmanTablesSegment* curr_segment;
+} HuffmanTables;
+
+// Allocates a HuffmanTables with 'size' contiguous HuffmanCodes. Returns 0 on
+// memory allocation error, 1 otherwise.
+int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables);
+void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables);
+
#define HUFFMAN_PACKED_BITS 6
#define HUFFMAN_PACKED_TABLE_SIZE (1u << HUFFMAN_PACKED_BITS)
@@ -78,7 +101,7 @@ void VP8LHtreeGroupsFree(HTreeGroup* con
// the huffman table.
// Returns built table size or 0 in case of error (invalid tree or
// memory error).
-int VP8LBuildHuffmanTable(HuffmanCode* const root_table, int root_bits,
+int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits,
const int code_lengths[], int code_lengths_size);
#ifdef __cplusplus

View File

@ -0,0 +1,26 @@
diff -rupN --no-dereference libwebp-1.2.0/CMakeLists.txt libwebp-1.2.0-new/CMakeLists.txt
--- libwebp-1.2.0/CMakeLists.txt 2021-01-21 04:43:45.000000000 +0100
+++ libwebp-1.2.0-new/CMakeLists.txt 2021-02-01 11:37:20.760764115 +0100
@@ -506,8 +506,8 @@ endif()
if(WEBP_BUILD_VWEBP)
# vwebp
- find_package(GLUT)
- if(GLUT_FOUND)
+ find_package(FreeGLUT)
+ if(FreeGLUT_FOUND)
include_directories(${WEBP_DEP_IMG_INCLUDE_DIRS})
parse_makefile_am(${CMAKE_CURRENT_SOURCE_DIR}/examples "VWEBP_SRCS" "vwebp")
add_executable(vwebp ${VWEBP_SRCS})
diff -rupN --no-dereference libwebp-1.2.0/examples/vwebp.c libwebp-1.2.0-new/examples/vwebp.c
--- libwebp-1.2.0/examples/vwebp.c 2021-01-21 04:43:45.000000000 +0100
+++ libwebp-1.2.0-new/examples/vwebp.c 2021-02-01 11:37:20.760764115 +0100
@@ -27,7 +27,7 @@
#if defined(HAVE_GLUT_GLUT_H)
#include <GLUT/glut.h>
#else
-#include <GL/glut.h>
+#include <GL/freeglut.h>
#ifdef FREEGLUT
#include <GL/freeglut.h>
#endif

View File

@ -1,68 +0,0 @@
# HG changeset patch
# User Timothy Nikkel <tnikkel@gmail.com>
# Date 1678835815 0
# Node ID 53b805c752ff23080e100eda2b3b4280d4370b2e
# Parent 8fcdaf8d685d5903b127e041feb1716637b6008f
Bug 1819244. Cherry pic webp commit fix. r=aosmond, a=dsmith
https://github.com/webmproject/libwebp/commit/a486d800b60d0af4cc0836bf7ed8f21e12974129
Differential Revision: https://phabricator.services.mozilla.com/D171814
diff --git a/media/libwebp/src/enc/alpha_enc.c b/media/libwebp/src/enc/alpha_enc.c
--- a/src/enc/alpha_enc.c
+++ b/src/enc/alpha_enc.c
@@ -8,16 +8,17 @@
// -----------------------------------------------------------------------------
//
// Alpha-plane compression.
//
// Author: Skal (pascal.massimino@gmail.com)
#include <assert.h>
#include <stdlib.h>
+#include <string.h>
#include "src/enc/vp8i_enc.h"
#include "src/dsp/dsp.h"
#include "src/utils/filters_utils.h"
#include "src/utils/quant_levels_utils.h"
#include "src/utils/utils.h"
#include "src/webp/format_constants.h"
@@ -143,31 +144,32 @@ static int EncodeAlphaInternal(const uin
output_size = VP8LBitWriterNumBytes(&tmp_bw);
if (output_size > data_size) {
// compressed size is larger than source! Revert to uncompressed mode.
method = ALPHA_NO_COMPRESSION;
VP8LBitWriterWipeOut(&tmp_bw);
}
} else {
VP8LBitWriterWipeOut(&tmp_bw);
+ memset(&result->bw, 0, sizeof(result->bw));
return 0;
}
}
if (method == ALPHA_NO_COMPRESSION) {
output = alpha_src;
output_size = data_size;
ok = 1;
}
// Emit final result.
header = method | (filter << 2);
if (reduce_levels) header |= ALPHA_PREPROCESSED_LEVELS << 4;
- VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size);
+ if (!VP8BitWriterInit(&result->bw, ALPHA_HEADER_LEN + output_size)) ok = 0;
ok = ok && VP8BitWriterAppend(&result->bw, &header, ALPHA_HEADER_LEN);
ok = ok && VP8BitWriterAppend(&result->bw, output, output_size);
if (method != ALPHA_NO_COMPRESSION) {
VP8LBitWriterWipeOut(&tmp_bw);
}
ok = ok && !result->bw.error_;
result->score = VP8BitWriterSize(&result->bw);

View File

@ -1,15 +0,0 @@
diff --git a/src/dec/buffer_dec.c b/src/dec/buffer_dec.c
index 75eb3c4..3cd94eb 100644
--- a/src/dec/buffer_dec.c
+++ b/src/dec/buffer_dec.c
@@ -74,7 +74,8 @@
} else { // RGB checks
const WebPRGBABuffer* const buf = &buffer->u.RGBA;
const int stride = abs(buf->stride);
- const uint64_t size = MIN_BUFFER_SIZE(width, height, stride);
+ const uint64_t size =
+ MIN_BUFFER_SIZE(width * kModeBpp[mode], height, stride);
ok &= (size <= buf->size);
ok &= (stride >= width * kModeBpp[mode]);
ok &= (buf->rgba != NULL);

View File

@ -1,19 +0,0 @@
diff --git a/src/dec/idec_dec.c b/src/dec/idec_dec.c
index a371ed7..258d15b 100644
--- a/src/dec/idec_dec.c
+++ b/src/dec/idec_dec.c
@@ -283,10 +283,8 @@
static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) {
if (idec->state_ == STATE_VP8_DATA) {
- VP8Io* const io = &idec->io_;
- if (io->teardown != NULL) {
- io->teardown(io);
- }
+ // Synchronize the thread, clean-up and check for errors.
+ VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_);
}
idec->state_ = STATE_ERROR;
return error;

View File

@ -1,42 +0,0 @@
diff --git a/src/mux/muxread.c b/src/mux/muxread.c
index 0b55286..eb5070b 100644
--- a/src/mux/muxread.c
+++ b/src/mux/muxread.c
@@ -187,7 +187,7 @@
size = bitstream->size;
if (data == NULL) return NULL;
- if (size < RIFF_HEADER_SIZE) return NULL;
+ if (size < RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE) return NULL;
if (GetLE32(data + 0) != MKFOURCC('R', 'I', 'F', 'F') ||
GetLE32(data + CHUNK_HEADER_SIZE) != MKFOURCC('W', 'E', 'B', 'P')) {
return NULL;
@@ -196,8 +196,6 @@
mux = WebPMuxNew();
if (mux == NULL) return NULL;
- if (size < RIFF_HEADER_SIZE + TAG_SIZE) goto Err;
-
tag = GetLE32(data + RIFF_HEADER_SIZE);
if (tag != kChunks[IDX_VP8].tag &&
tag != kChunks[IDX_VP8L].tag &&
@@ -206,12 +204,11 @@
}
riff_size = SizeWithPadding(GetLE32(data + TAG_SIZE));
- if (riff_size > MAX_CHUNK_PAYLOAD || riff_size > size) {
- goto Err;
- } else {
- if (riff_size < size) { // Redundant data after last chunk.
- size = riff_size; // To make sure we don't read any data beyond mux_size.
- }
+ if (riff_size < CHUNK_HEADER_SIZE) goto Err;
+ if (riff_size > MAX_CHUNK_PAYLOAD || riff_size > size) goto Err;
+ // There's no point in reading past the end of the RIFF chunk.
+ if (size > riff_size + CHUNK_HEADER_SIZE) {
+ size = riff_size + CHUNK_HEADER_SIZE;
}
end = data + size;

View File

@ -1,61 +0,0 @@
diff --git a/src/mux/muxi.h b/src/mux/muxi.h
index 6b57eea..14fd6e2 100644
--- a/src/mux/muxi.h
+++ b/src/mux/muxi.h
@@ -14,6 +14,7 @@
#ifndef WEBP_MUX_MUXI_H_
#define WEBP_MUX_MUXI_H_
+#include <assert.h>
#include <stdlib.h>
#include "src/dec/vp8i_dec.h"
#include "src/dec/vp8li_dec.h"
@@ -143,13 +144,13 @@
// Returns size of the chunk including chunk header and padding byte (if any).
static WEBP_INLINE size_t SizeWithPadding(size_t chunk_size) {
+ assert(chunk_size <= MAX_CHUNK_PAYLOAD);
return CHUNK_HEADER_SIZE + ((chunk_size + 1) & ~1U);
}
// Size of a chunk including header and padding.
static WEBP_INLINE size_t ChunkDiskSize(const WebPChunk* chunk) {
const size_t data_size = chunk->data_.size;
- assert(data_size < MAX_CHUNK_PAYLOAD);
return SizeWithPadding(data_size);
}
diff --git a/src/mux/muxread.c b/src/mux/muxread.c
index eb5070b..ef50dae 100644
--- a/src/mux/muxread.c
+++ b/src/mux/muxread.c
@@ -59,6 +59,7 @@
// Sanity checks.
if (data_size < CHUNK_HEADER_SIZE) return WEBP_MUX_NOT_ENOUGH_DATA;
chunk_size = GetLE32(data + TAG_SIZE);
+ if (chunk_size > MAX_CHUNK_PAYLOAD) return WEBP_MUX_BAD_DATA;
{
const size_t chunk_disk_size = SizeWithPadding(chunk_size);
@@ -203,9 +204,14 @@
goto Err; // First chunk should be VP8, VP8L or VP8X.
}
- riff_size = SizeWithPadding(GetLE32(data + TAG_SIZE));
+ riff_size = GetLE32(data + TAG_SIZE);
+ if (riff_size > MAX_CHUNK_PAYLOAD) goto Err;
+
+ // Note this padding is historical and differs from demux.c which does not
+ // pad the file size.
+ riff_size = SizeWithPadding(riff_size);
if (riff_size < CHUNK_HEADER_SIZE) goto Err;
- if (riff_size > MAX_CHUNK_PAYLOAD || riff_size > size) goto Err;
+ if (riff_size > size) goto Err;
// There's no point in reading past the end of the RIFF chunk.
if (size > riff_size + CHUNK_HEADER_SIZE) {
size = riff_size + CHUNK_HEADER_SIZE;

View File

@ -1,213 +0,0 @@
diff -up libwebp-1.0.0/src/dec/vp8l_dec.c.old libwebp-1.0.0/src/dec/vp8l_dec.c
--- libwebp-1.0.0/src/dec/vp8l_dec.c.old 2018-04-21 05:04:55.000000000 +0200
+++ libwebp-1.0.0/src/dec/vp8l_dec.c 2018-11-09 00:29:51.000000000 +0100
@@ -362,12 +362,19 @@ static int ReadHuffmanCodes(VP8LDecoder*
VP8LMetadata* const hdr = &dec->hdr_;
uint32_t* huffman_image = NULL;
HTreeGroup* htree_groups = NULL;
+ // When reading htrees, some might be unused, as the format allows it.
+ // We will still read them but put them in this htree_group_bogus.
+ HTreeGroup htree_group_bogus;
HuffmanCode* huffman_tables = NULL;
+ HuffmanCode* huffman_tables_bogus = NULL;
HuffmanCode* next = NULL;
int num_htree_groups = 1;
+ int num_htree_groups_max = 1;
int max_alphabet_size = 0;
int* code_lengths = NULL;
const int table_size = kTableSize[color_cache_bits];
+ int* mapping = NULL;
+ int ok = 0;
if (allow_recursion && VP8LReadBits(br, 1)) {
// use meta Huffman codes.
@@ -384,10 +391,42 @@ static int ReadHuffmanCodes(VP8LDecoder*
// The huffman data is stored in red and green bytes.
const int group = (huffman_image[i] >> 8) & 0xffff;
huffman_image[i] = group;
- if (group >= num_htree_groups) {
- num_htree_groups = group + 1;
+ if (group >= num_htree_groups_max) {
+ num_htree_groups_max = group + 1;
}
}
+ // Check the validity of num_htree_groups_max. If it seems too big, use a
+ // smaller value for later. This will prevent big memory allocations to end
+ // up with a bad bitstream anyway.
+ // The value of 1000 is totally arbitrary. We know that num_htree_groups_max
+ // is smaller than (1 << 16) and should be smaller than the number of pixels
+ // (though the format allows it to be bigger).
+ if (num_htree_groups_max > 1000 || num_htree_groups_max > xsize * ysize) {
+ // Create a mapping from the used indices to the minimal set of used
+ // values [0, num_htree_groups)
+ mapping = (int*)WebPSafeMalloc(num_htree_groups_max, sizeof(*mapping));
+ if (mapping == NULL) {
+ dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
+ goto Error;
+ }
+ // -1 means a value is unmapped, and therefore unused in the Huffman
+ // image.
+ memset(mapping, 0xff, num_htree_groups_max * sizeof(*mapping));
+ for (num_htree_groups = 0, i = 0; i < huffman_pixs; ++i) {
+ // Get the current mapping for the group and remap the Huffman image.
+ int* const mapped_group = &mapping[huffman_image[i]];
+ if (*mapped_group == -1) *mapped_group = num_htree_groups++;
+ huffman_image[i] = *mapped_group;
+ }
+ huffman_tables_bogus = (HuffmanCode*)WebPSafeMalloc(
+ table_size, sizeof(*huffman_tables_bogus));
+ if (huffman_tables_bogus == NULL) {
+ dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
+ goto Error;
+ }
+ } else {
+ num_htree_groups = num_htree_groups_max;
+ }
}
if (br->eos_) goto Error;
@@ -403,11 +442,11 @@ static int ReadHuffmanCodes(VP8LDecoder*
}
}
+ code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
+ sizeof(*code_lengths));
huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
sizeof(*huffman_tables));
htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
- code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
- sizeof(*code_lengths));
if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
@@ -415,28 +454,35 @@ static int ReadHuffmanCodes(VP8LDecoder*
}
next = huffman_tables;
- for (i = 0; i < num_htree_groups; ++i) {
- HTreeGroup* const htree_group = &htree_groups[i];
+ for (i = 0; i < num_htree_groups_max; ++i) {
+ // If the index "i" is unused in the Huffman image, read the coefficients
+ // but store them to a bogus htree_group.
+ const int is_bogus = (mapping != NULL && mapping[i] == -1);
+ HTreeGroup* const htree_group =
+ is_bogus ? &htree_group_bogus :
+ &htree_groups[(mapping == NULL) ? i : mapping[i]];
HuffmanCode** const htrees = htree_group->htrees;
+ HuffmanCode* huffman_tables_i = is_bogus ? huffman_tables_bogus : next;
int size;
int total_size = 0;
int is_trivial_literal = 1;
int max_bits = 0;
for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
int alphabet_size = kAlphabetSize[j];
- htrees[j] = next;
+ htrees[j] = huffman_tables_i;
if (j == 0 && color_cache_bits > 0) {
alphabet_size += 1 << color_cache_bits;
}
- size = ReadHuffmanCode(alphabet_size, dec, code_lengths, next);
+ size =
+ ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables_i);
if (size == 0) {
goto Error;
}
if (is_trivial_literal && kLiteralMap[j] == 1) {
- is_trivial_literal = (next->bits == 0);
+ is_trivial_literal = (huffman_tables_i->bits == 0);
}
- total_size += next->bits;
- next += size;
+ total_size += huffman_tables_i->bits;
+ huffman_tables_i += size;
if (j <= ALPHA) {
int local_max_bits = code_lengths[0];
int k;
@@ -448,38 +494,41 @@ static int ReadHuffmanCodes(VP8LDecoder*
max_bits += local_max_bits;
}
}
+ if (!is_bogus) next = huffman_tables_i;
htree_group->is_trivial_literal = is_trivial_literal;
htree_group->is_trivial_code = 0;
if (is_trivial_literal) {
const int red = htrees[RED][0].value;
const int blue = htrees[BLUE][0].value;
const int alpha = htrees[ALPHA][0].value;
- htree_group->literal_arb =
- ((uint32_t)alpha << 24) | (red << 16) | blue;
+ htree_group->literal_arb = ((uint32_t)alpha << 24) | (red << 16) | blue;
if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) {
htree_group->is_trivial_code = 1;
htree_group->literal_arb |= htrees[GREEN][0].value << 8;
}
}
- htree_group->use_packed_table = !htree_group->is_trivial_code &&
- (max_bits < HUFFMAN_PACKED_BITS);
+ htree_group->use_packed_table =
+ !htree_group->is_trivial_code && (max_bits < HUFFMAN_PACKED_BITS);
if (htree_group->use_packed_table) BuildPackedTable(htree_group);
}
- WebPSafeFree(code_lengths);
+ ok = 1;
- // All OK. Finalize pointers and return.
+ // All OK. Finalize pointers.
hdr->huffman_image_ = huffman_image;
hdr->num_htree_groups_ = num_htree_groups;
hdr->htree_groups_ = htree_groups;
hdr->huffman_tables_ = huffman_tables;
- return 1;
Error:
WebPSafeFree(code_lengths);
- WebPSafeFree(huffman_image);
- WebPSafeFree(huffman_tables);
- VP8LHtreeGroupsFree(htree_groups);
- return 0;
+ WebPSafeFree(huffman_tables_bogus);
+ WebPSafeFree(mapping);
+ if (!ok) {
+ WebPSafeFree(huffman_image);
+ WebPSafeFree(huffman_tables);
+ VP8LHtreeGroupsFree(htree_groups);
+ }
+ return ok;
}
//------------------------------------------------------------------------------
@@ -884,7 +933,11 @@ static WEBP_INLINE void CopyBlock8b(uint
#endif
break;
case 2:
+#if !defined(WORDS_BIGENDIAN)
memcpy(&pattern, src, sizeof(uint16_t));
+#else
+ pattern = ((uint32_t)src[0] << 8) | src[1];
+#endif
#if defined(__arm__) || defined(_M_ARM)
pattern |= pattern << 16;
#elif defined(WEBP_USE_MIPS_DSP_R2)
@@ -1523,7 +1576,6 @@ int VP8LDecodeAlphaHeader(ALPHDecoder* c
if (dec == NULL) return 0;
assert(alph_dec != NULL);
- alph_dec->vp8l_dec_ = dec;
dec->width_ = alph_dec->width_;
dec->height_ = alph_dec->height_;
@@ -1555,11 +1607,12 @@ int VP8LDecodeAlphaHeader(ALPHDecoder* c
if (!ok) goto Err;
+ // Only set here, once we are sure it is valid (to avoid thread races).
+ alph_dec->vp8l_dec_ = dec;
return 1;
Err:
- VP8LDelete(alph_dec->vp8l_dec_);
- alph_dec->vp8l_dec_ = NULL;
+ VP8LDelete(dec);
return 0;
}

View File

@ -1,14 +0,0 @@
diff --git a/src/mux/muxread.c b/src/mux/muxread.c
index fbe9f05..ea07dbf 100644
--- a/src/mux/muxread.c
+++ b/src/mux/muxread.c
@@ -264,6 +264,7 @@
chunk_list = MuxGetChunkListFromId(mux, id); // List to add this chunk.
if (ChunkSetNth(&chunk, chunk_list, 0) != WEBP_MUX_OK) goto Err;
if (id == WEBP_CHUNK_VP8X) { // grab global specs
+ if (data_size < CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE) goto Err;
mux->canvas_width_ = GetLE24(data + 12) + 1;
mux->canvas_height_ = GetLE24(data + 15) + 1;
}

View File

@ -1,36 +0,0 @@
diff --git a/src/utils/quant_levels_dec_utils.c b/src/utils/quant_levels_dec_utils.c
index 3818a78..f65b6cd 100644
--- a/src/utils/quant_levels_dec_utils.c
+++ b/src/utils/quant_levels_dec_utils.c
@@ -261,9 +261,15 @@
int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride,
int strength) {
- const int radius = 4 * strength / 100;
+ int radius = 4 * strength / 100;
+
if (strength < 0 || strength > 100) return 0;
if (data == NULL || width <= 0 || height <= 0) return 0; // bad params
+
+ // limit the filter size to not exceed the image dimensions
+ if (2 * radius + 1 > width) radius = (width - 1) >> 1;
+ if (2 * radius + 1 > height) radius = (height - 1) >> 1;
+
if (radius > 0) {
SmoothParams p;
memset(&p, 0, sizeof(p));
diff --git a/src/mux/muxread.c b/src/mux/muxread.c
index ef50dae..fbe9f05 100644
--- a/src/mux/muxread.c
+++ b/src/mux/muxread.c
@@ -138,6 +138,7 @@
wpi->is_partial_ = 1; // Waiting for a VP8 chunk.
break;
case WEBP_CHUNK_IMAGE:
+ if (wpi->img_ != NULL) goto Fail; // Only 1 image chunk allowed.
if (ChunkSetNth(&subchunk, &wpi->img_, 1) != WEBP_MUX_OK) goto Fail;
if (!MuxImageFinalize(wpi)) goto Fail;
wpi->is_partial_ = 0; // wpi is completely filled.

View File

@ -1,23 +1,15 @@
%global _hardened_build 1
Name: libwebp
Version: 1.0.0
Release: 9%{?dist}.1
Version: 1.2.0
Release: 3%{?dist}
URL: http://webmproject.org/
Summary: Library and tools for the WebP graphics format
# Additional IPR is licensed as well. See PATENTS file for details
License: BSD
Source0: http://downloads.webmproject.org/releases/webp/%{name}-%{version}.tar.gz
Source1: libwebp_jni_example.java
Patch0: rhbz-1956829.patch
Patch1: rhbz-1956843.patch
Patch2: rhbz-1956919.patch
Patch3: rhbz-1956853.patch
Patch4: rhbz-1956856.patch
Patch5: rhbz-1956917.patch
Patch6: rhbz-1956868.patch
Patch7: mozilla-1819244.patch
Patch8: 4619a48fc-1.0.0.patch
Patch0: libwebp-freeglut.patch
BuildRequires: libjpeg-devel
BuildRequires: libpng-devel
@ -28,6 +20,7 @@ BuildRequires: jpackage-utils
BuildRequires: swig
BuildRequires: autoconf automake libtool
BuildRequires: freeglut-devel
BuildRequires: make
%description
WebP is an image format that does lossy compression of digital
@ -39,7 +32,6 @@ images more efficiently.
%package tools
Summary: The WebP command line tools
Requires: %{name}%{?_isa} = %{version}-%{release}
%description tools
WebP is an image format that does lossy compression of digital
@ -86,6 +78,7 @@ export CFLAGS="%{optflags} -frename-registers"
--enable-libwebpdemux --enable-libwebpdecoder \
--disable-neon
%make_build V=1
make -C examples vwebp
# swig generated Java bindings
cp %{SOURCE1} .
@ -150,23 +143,51 @@ cp swig/*.jar swig/*.so %{buildroot}/%{_libdir}/%{name}-java/
%changelog
* Fri Sep 15 2023 Martin Stransky <stransky@redhat.com> - 1.0.0-9.el8_9.1
- Added fix for CVE-2023-4863
* Mon Aug 09 2021 Mohan Boddu <mboddu@redhat.com> - 1.2.0-3
- Rebuilt for IMA sigs, glibc 2.34, aarch64 flags
Related: rhbz#1991688
* Wed May 03 2023 Tomas Popela <tpopela@redhat.com> - 1.0.0-9
- Bump the release to "9" to accommodate the 9.1.0.z release bumps as
libwebp-1.0.0-8.el9 < libwebp-1.0.0-8.el8_7
* Fri Apr 16 2021 Mohan Boddu <mboddu@redhat.com> - 1.2.0-2
- Rebuilt for RHEL 9 BETA on Apr 15th 2021. Related: rhbz#1947937
* Wed May 03 2023 Tomas Popela <tpopela@redhat.com> - 1.0.0-8
- Add fix for mzbz#1819244
- Fix tools subpackage dependency
- Bump the release to "8" to accommodate the 8.7.0.z release bumps
* Mon Feb 01 2021 Sandro Mani <manisandro@gmail.com> - 1.2.0-1
- Update to 1.2.0
* Thu May 27 2021 Martin Stransky <stransky@redhat.com> - 1.0.0-5
- Added fixes for rhbz#1956853, rhbz#1956856, rhbz#1956868, rhbz#1956917
* Tue Jan 26 2021 Fedora Release Engineering <releng@fedoraproject.org> - 1.1.0-6
- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
* Mon May 17 2021 Martin Stransky <stransky@redhat.com> - 1.0.0-3
- Added fixes for rhbz#1956829, rhbz#1956843, rhbz#1956919
* Tue Jul 28 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.1.0-5
- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
* Sat Jul 11 2020 Jiri Vanek <jvanek@redhat.com> - 1.1.0-4
- Rebuilt for JDK-11, see https://fedoraproject.org/wiki/Changes/Java11
* Mon May 18 2020 Sandro Mani <manisandro@gmail.com> - 1.1.0-3
- Don't manually and incorrectly install vwebp, Makefile already does it correctly (#1836640)
* Wed Jan 29 2020 Fedora Release Engineering <releng@fedoraproject.org> - 1.1.0-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
* Tue Jan 07 2020 Sandro Mani <manisandro@gmail.com> - 1.1.0-1
- Update to 1.1.0
* Tue Sep 17 2019 Gwyn Ciesla <gwync@protonmail.com> - 1.0.3-3
- Rebuilt for new freeglut
* Thu Jul 25 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1.0.3-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
* Mon Jul 15 2019 Sandro Mani <manisandro@gmail.com> - 1.0.3-1
- Update to 1.0.3
* Fri Feb 01 2019 Fedora Release Engineering <releng@fedoraproject.org> - 1.0.2-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
* Tue Jan 22 2019 Sandro Mani <manisandro@gmail.com> - 1.0.2-1
- Update to 1.0.2
* Fri Jul 13 2018 Fedora Release Engineering <releng@fedoraproject.org> - 1.0.0-2
- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
* Thu Apr 26 2018 Sandro Mani <manisandro@gmail.com> - 1.0.0-1
- Update to 1.0.0