mirror of
https://github.com/opencv/opencv.git
synced 2025-06-10 02:53:07 +08:00
3rdparty: update libwebp 1.0.0 => 1.0.2
https://github.com/webmproject/libwebp/releases/tag/v1.0.2
This commit is contained in:
parent
f355b3505f
commit
d58f9ae824
30
3rdparty/libwebp/COPYING
vendored
Normal file
30
3rdparty/libwebp/COPYING
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
Copyright (c) 2010, Google Inc. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer in
|
||||||
|
the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
|
||||||
|
* Neither the name of Google nor the names of its contributors may
|
||||||
|
be used to endorse or promote products derived from this software
|
||||||
|
without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
2
3rdparty/libwebp/src/dec/alphai_dec.h
vendored
2
3rdparty/libwebp/src/dec/alphai_dec.h
vendored
@ -51,4 +51,4 @@ void WebPDeallocateAlphaMemory(VP8Decoder* const dec);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DEC_ALPHAI_DEC_H_ */
|
#endif // WEBP_DEC_ALPHAI_DEC_H_
|
||||||
|
3
3rdparty/libwebp/src/dec/buffer_dec.c
vendored
3
3rdparty/libwebp/src/dec/buffer_dec.c
vendored
@ -74,7 +74,8 @@ static VP8StatusCode CheckDecBuffer(const WebPDecBuffer* const buffer) {
|
|||||||
} else { // RGB checks
|
} else { // RGB checks
|
||||||
const WebPRGBABuffer* const buf = &buffer->u.RGBA;
|
const WebPRGBABuffer* const buf = &buffer->u.RGBA;
|
||||||
const int stride = abs(buf->stride);
|
const int stride = abs(buf->stride);
|
||||||
const uint64_t size = MIN_BUFFER_SIZE(width, height, stride);
|
const uint64_t size =
|
||||||
|
MIN_BUFFER_SIZE(width * kModeBpp[mode], height, stride);
|
||||||
ok &= (size <= buf->size);
|
ok &= (size <= buf->size);
|
||||||
ok &= (stride >= width * kModeBpp[mode]);
|
ok &= (stride >= width * kModeBpp[mode]);
|
||||||
ok &= (buf->rgba != NULL);
|
ok &= (buf->rgba != NULL);
|
||||||
|
9
3rdparty/libwebp/src/dec/frame_dec.c
vendored
9
3rdparty/libwebp/src/dec/frame_dec.c
vendored
@ -338,7 +338,6 @@ void VP8InitDithering(const WebPDecoderOptions* const options,
|
|||||||
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
|
for (s = 0; s < NUM_MB_SEGMENTS; ++s) {
|
||||||
VP8QuantMatrix* const dqm = &dec->dqm_[s];
|
VP8QuantMatrix* const dqm = &dec->dqm_[s];
|
||||||
if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
|
if (dqm->uv_quant_ < DITHER_AMP_TAB_SIZE) {
|
||||||
// TODO(skal): should we specially dither more for uv_quant_ < 0?
|
|
||||||
const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
|
const int idx = (dqm->uv_quant_ < 0) ? 0 : dqm->uv_quant_;
|
||||||
dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
|
dqm->dither_ = (f * kQuantToDitherAmp[idx]) >> 3;
|
||||||
}
|
}
|
||||||
@ -669,15 +668,9 @@ int VP8GetThreadMethod(const WebPDecoderOptions* const options,
|
|||||||
(void)height;
|
(void)height;
|
||||||
assert(headers == NULL || !headers->is_lossless);
|
assert(headers == NULL || !headers->is_lossless);
|
||||||
#if defined(WEBP_USE_THREAD)
|
#if defined(WEBP_USE_THREAD)
|
||||||
if (width < MIN_WIDTH_FOR_THREADS) return 0;
|
if (width >= MIN_WIDTH_FOR_THREADS) return 2;
|
||||||
// TODO(skal): tune the heuristic further
|
|
||||||
#if 0
|
|
||||||
if (height < 2 * width) return 2;
|
|
||||||
#endif
|
#endif
|
||||||
return 2;
|
|
||||||
#else // !WEBP_USE_THREAD
|
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef MT_CACHE_LINES
|
#undef MT_CACHE_LINES
|
||||||
|
25
3rdparty/libwebp/src/dec/idec_dec.c
vendored
25
3rdparty/libwebp/src/dec/idec_dec.c
vendored
@ -140,10 +140,9 @@ static void DoRemap(WebPIDecoder* const idec, ptrdiff_t offset) {
|
|||||||
if (NeedCompressedAlpha(idec)) {
|
if (NeedCompressedAlpha(idec)) {
|
||||||
ALPHDecoder* const alph_dec = dec->alph_dec_;
|
ALPHDecoder* const alph_dec = dec->alph_dec_;
|
||||||
dec->alpha_data_ += offset;
|
dec->alpha_data_ += offset;
|
||||||
if (alph_dec != NULL) {
|
if (alph_dec != NULL && alph_dec->vp8l_dec_ != NULL) {
|
||||||
if (alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION) {
|
if (alph_dec->method_ == ALPHA_LOSSLESS_COMPRESSION) {
|
||||||
VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec_;
|
VP8LDecoder* const alph_vp8l_dec = alph_dec->vp8l_dec_;
|
||||||
assert(alph_vp8l_dec != NULL);
|
|
||||||
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN);
|
assert(dec->alpha_data_size_ >= ALPHA_HEADER_LEN);
|
||||||
VP8LBitReaderSetBuffer(&alph_vp8l_dec->br_,
|
VP8LBitReaderSetBuffer(&alph_vp8l_dec->br_,
|
||||||
dec->alpha_data_ + ALPHA_HEADER_LEN,
|
dec->alpha_data_ + ALPHA_HEADER_LEN,
|
||||||
@ -283,10 +282,8 @@ static void RestoreContext(const MBContext* context, VP8Decoder* const dec,
|
|||||||
|
|
||||||
static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) {
|
static VP8StatusCode IDecError(WebPIDecoder* const idec, VP8StatusCode error) {
|
||||||
if (idec->state_ == STATE_VP8_DATA) {
|
if (idec->state_ == STATE_VP8_DATA) {
|
||||||
VP8Io* const io = &idec->io_;
|
// Synchronize the thread, clean-up and check for errors.
|
||||||
if (io->teardown != NULL) {
|
VP8ExitCritical((VP8Decoder*)idec->dec_, &idec->io_);
|
||||||
io->teardown(io);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
idec->state_ = STATE_ERROR;
|
idec->state_ = STATE_ERROR;
|
||||||
return error;
|
return error;
|
||||||
@ -451,7 +448,10 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
|
|||||||
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
|
VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
|
||||||
VP8Io* const io = &idec->io_;
|
VP8Io* const io = &idec->io_;
|
||||||
|
|
||||||
assert(dec->ready_);
|
// Make sure partition #0 has been read before, to set dec to ready_.
|
||||||
|
if (!dec->ready_) {
|
||||||
|
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
|
||||||
|
}
|
||||||
for (; dec->mb_y_ < dec->mb_h_; ++dec->mb_y_) {
|
for (; dec->mb_y_ < dec->mb_h_; ++dec->mb_y_) {
|
||||||
if (idec->last_mb_y_ != dec->mb_y_) {
|
if (idec->last_mb_y_ != dec->mb_y_) {
|
||||||
if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
|
if (!VP8ParseIntraModeRow(&dec->br_, dec)) {
|
||||||
@ -473,6 +473,12 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
|
|||||||
MemDataSize(&idec->mem_) > MAX_MB_SIZE) {
|
MemDataSize(&idec->mem_) > MAX_MB_SIZE) {
|
||||||
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
|
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
|
||||||
}
|
}
|
||||||
|
// Synchronize the threads.
|
||||||
|
if (dec->mt_method_ > 0) {
|
||||||
|
if (!WebPGetWorkerInterface()->Sync(&dec->worker_)) {
|
||||||
|
return IDecError(idec, VP8_STATUS_BITSTREAM_ERROR);
|
||||||
|
}
|
||||||
|
}
|
||||||
RestoreContext(&context, dec, token_br);
|
RestoreContext(&context, dec, token_br);
|
||||||
return VP8_STATUS_SUSPENDED;
|
return VP8_STATUS_SUSPENDED;
|
||||||
}
|
}
|
||||||
@ -491,6 +497,7 @@ static VP8StatusCode DecodeRemaining(WebPIDecoder* const idec) {
|
|||||||
}
|
}
|
||||||
// Synchronize the thread and check for errors.
|
// Synchronize the thread and check for errors.
|
||||||
if (!VP8ExitCritical(dec, io)) {
|
if (!VP8ExitCritical(dec, io)) {
|
||||||
|
idec->state_ = STATE_ERROR; // prevent re-entry in IDecError
|
||||||
return IDecError(idec, VP8_STATUS_USER_ABORT);
|
return IDecError(idec, VP8_STATUS_USER_ABORT);
|
||||||
}
|
}
|
||||||
dec->ready_ = 0;
|
dec->ready_ = 0;
|
||||||
@ -571,6 +578,10 @@ static VP8StatusCode IDecode(WebPIDecoder* idec) {
|
|||||||
status = DecodePartition0(idec);
|
status = DecodePartition0(idec);
|
||||||
}
|
}
|
||||||
if (idec->state_ == STATE_VP8_DATA) {
|
if (idec->state_ == STATE_VP8_DATA) {
|
||||||
|
const VP8Decoder* const dec = (VP8Decoder*)idec->dec_;
|
||||||
|
if (dec == NULL) {
|
||||||
|
return VP8_STATUS_SUSPENDED; // can't continue if we have no decoder.
|
||||||
|
}
|
||||||
status = DecodeRemaining(idec);
|
status = DecodeRemaining(idec);
|
||||||
}
|
}
|
||||||
if (idec->state_ == STATE_VP8L_HEADER) {
|
if (idec->state_ == STATE_VP8L_HEADER) {
|
||||||
|
2
3rdparty/libwebp/src/dec/vp8_dec.h
vendored
2
3rdparty/libwebp/src/dec/vp8_dec.h
vendored
@ -182,4 +182,4 @@ WEBP_EXTERN int VP8LGetInfo(
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DEC_VP8_DEC_H_ */
|
#endif // WEBP_DEC_VP8_DEC_H_
|
||||||
|
4
3rdparty/libwebp/src/dec/vp8i_dec.h
vendored
4
3rdparty/libwebp/src/dec/vp8i_dec.h
vendored
@ -32,7 +32,7 @@ extern "C" {
|
|||||||
// version numbers
|
// version numbers
|
||||||
#define DEC_MAJ_VERSION 1
|
#define DEC_MAJ_VERSION 1
|
||||||
#define DEC_MIN_VERSION 0
|
#define DEC_MIN_VERSION 0
|
||||||
#define DEC_REV_VERSION 0
|
#define DEC_REV_VERSION 2
|
||||||
|
|
||||||
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
|
// YUV-cache parameters. Cache is 32-bytes wide (= one cacheline).
|
||||||
// Constraints are: We need to store one 16x16 block of luma samples (y),
|
// Constraints are: We need to store one 16x16 block of luma samples (y),
|
||||||
@ -316,4 +316,4 @@ const uint8_t* VP8DecompressAlphaRows(VP8Decoder* const dec,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DEC_VP8I_DEC_H_ */
|
#endif // WEBP_DEC_VP8I_DEC_H_
|
||||||
|
97
3rdparty/libwebp/src/dec/vp8l_dec.c
vendored
97
3rdparty/libwebp/src/dec/vp8l_dec.c
vendored
@ -362,12 +362,19 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
|
|||||||
VP8LMetadata* const hdr = &dec->hdr_;
|
VP8LMetadata* const hdr = &dec->hdr_;
|
||||||
uint32_t* huffman_image = NULL;
|
uint32_t* huffman_image = NULL;
|
||||||
HTreeGroup* htree_groups = NULL;
|
HTreeGroup* htree_groups = NULL;
|
||||||
|
// When reading htrees, some might be unused, as the format allows it.
|
||||||
|
// We will still read them but put them in this htree_group_bogus.
|
||||||
|
HTreeGroup htree_group_bogus;
|
||||||
HuffmanCode* huffman_tables = NULL;
|
HuffmanCode* huffman_tables = NULL;
|
||||||
|
HuffmanCode* huffman_tables_bogus = NULL;
|
||||||
HuffmanCode* next = NULL;
|
HuffmanCode* next = NULL;
|
||||||
int num_htree_groups = 1;
|
int num_htree_groups = 1;
|
||||||
|
int num_htree_groups_max = 1;
|
||||||
int max_alphabet_size = 0;
|
int max_alphabet_size = 0;
|
||||||
int* code_lengths = NULL;
|
int* code_lengths = NULL;
|
||||||
const int table_size = kTableSize[color_cache_bits];
|
const int table_size = kTableSize[color_cache_bits];
|
||||||
|
int* mapping = NULL;
|
||||||
|
int ok = 0;
|
||||||
|
|
||||||
if (allow_recursion && VP8LReadBits(br, 1)) {
|
if (allow_recursion && VP8LReadBits(br, 1)) {
|
||||||
// use meta Huffman codes.
|
// use meta Huffman codes.
|
||||||
@ -384,10 +391,42 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
|
|||||||
// The huffman data is stored in red and green bytes.
|
// The huffman data is stored in red and green bytes.
|
||||||
const int group = (huffman_image[i] >> 8) & 0xffff;
|
const int group = (huffman_image[i] >> 8) & 0xffff;
|
||||||
huffman_image[i] = group;
|
huffman_image[i] = group;
|
||||||
if (group >= num_htree_groups) {
|
if (group >= num_htree_groups_max) {
|
||||||
num_htree_groups = group + 1;
|
num_htree_groups_max = group + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Check the validity of num_htree_groups_max. If it seems too big, use a
|
||||||
|
// smaller value for later. This will prevent big memory allocations to end
|
||||||
|
// up with a bad bitstream anyway.
|
||||||
|
// The value of 1000 is totally arbitrary. We know that num_htree_groups_max
|
||||||
|
// is smaller than (1 << 16) and should be smaller than the number of pixels
|
||||||
|
// (though the format allows it to be bigger).
|
||||||
|
if (num_htree_groups_max > 1000 || num_htree_groups_max > xsize * ysize) {
|
||||||
|
// Create a mapping from the used indices to the minimal set of used
|
||||||
|
// values [0, num_htree_groups)
|
||||||
|
mapping = (int*)WebPSafeMalloc(num_htree_groups_max, sizeof(*mapping));
|
||||||
|
if (mapping == NULL) {
|
||||||
|
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
|
||||||
|
goto Error;
|
||||||
|
}
|
||||||
|
// -1 means a value is unmapped, and therefore unused in the Huffman
|
||||||
|
// image.
|
||||||
|
memset(mapping, 0xff, num_htree_groups_max * sizeof(*mapping));
|
||||||
|
for (num_htree_groups = 0, i = 0; i < huffman_pixs; ++i) {
|
||||||
|
// Get the current mapping for the group and remap the Huffman image.
|
||||||
|
int* const mapped_group = &mapping[huffman_image[i]];
|
||||||
|
if (*mapped_group == -1) *mapped_group = num_htree_groups++;
|
||||||
|
huffman_image[i] = *mapped_group;
|
||||||
|
}
|
||||||
|
huffman_tables_bogus = (HuffmanCode*)WebPSafeMalloc(
|
||||||
|
table_size, sizeof(*huffman_tables_bogus));
|
||||||
|
if (huffman_tables_bogus == NULL) {
|
||||||
|
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
|
||||||
|
goto Error;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
num_htree_groups = num_htree_groups_max;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (br->eos_) goto Error;
|
if (br->eos_) goto Error;
|
||||||
@ -403,11 +442,11 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
|
||||||
|
sizeof(*code_lengths));
|
||||||
huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
|
huffman_tables = (HuffmanCode*)WebPSafeMalloc(num_htree_groups * table_size,
|
||||||
sizeof(*huffman_tables));
|
sizeof(*huffman_tables));
|
||||||
htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
|
htree_groups = VP8LHtreeGroupsNew(num_htree_groups);
|
||||||
code_lengths = (int*)WebPSafeCalloc((uint64_t)max_alphabet_size,
|
|
||||||
sizeof(*code_lengths));
|
|
||||||
|
|
||||||
if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
|
if (htree_groups == NULL || code_lengths == NULL || huffman_tables == NULL) {
|
||||||
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
|
dec->status_ = VP8_STATUS_OUT_OF_MEMORY;
|
||||||
@ -415,28 +454,35 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
|
|||||||
}
|
}
|
||||||
|
|
||||||
next = huffman_tables;
|
next = huffman_tables;
|
||||||
for (i = 0; i < num_htree_groups; ++i) {
|
for (i = 0; i < num_htree_groups_max; ++i) {
|
||||||
HTreeGroup* const htree_group = &htree_groups[i];
|
// If the index "i" is unused in the Huffman image, read the coefficients
|
||||||
|
// but store them to a bogus htree_group.
|
||||||
|
const int is_bogus = (mapping != NULL && mapping[i] == -1);
|
||||||
|
HTreeGroup* const htree_group =
|
||||||
|
is_bogus ? &htree_group_bogus :
|
||||||
|
&htree_groups[(mapping == NULL) ? i : mapping[i]];
|
||||||
HuffmanCode** const htrees = htree_group->htrees;
|
HuffmanCode** const htrees = htree_group->htrees;
|
||||||
|
HuffmanCode* huffman_tables_i = is_bogus ? huffman_tables_bogus : next;
|
||||||
int size;
|
int size;
|
||||||
int total_size = 0;
|
int total_size = 0;
|
||||||
int is_trivial_literal = 1;
|
int is_trivial_literal = 1;
|
||||||
int max_bits = 0;
|
int max_bits = 0;
|
||||||
for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
|
for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; ++j) {
|
||||||
int alphabet_size = kAlphabetSize[j];
|
int alphabet_size = kAlphabetSize[j];
|
||||||
htrees[j] = next;
|
htrees[j] = huffman_tables_i;
|
||||||
if (j == 0 && color_cache_bits > 0) {
|
if (j == 0 && color_cache_bits > 0) {
|
||||||
alphabet_size += 1 << color_cache_bits;
|
alphabet_size += 1 << color_cache_bits;
|
||||||
}
|
}
|
||||||
size = ReadHuffmanCode(alphabet_size, dec, code_lengths, next);
|
size =
|
||||||
|
ReadHuffmanCode(alphabet_size, dec, code_lengths, huffman_tables_i);
|
||||||
if (size == 0) {
|
if (size == 0) {
|
||||||
goto Error;
|
goto Error;
|
||||||
}
|
}
|
||||||
if (is_trivial_literal && kLiteralMap[j] == 1) {
|
if (is_trivial_literal && kLiteralMap[j] == 1) {
|
||||||
is_trivial_literal = (next->bits == 0);
|
is_trivial_literal = (huffman_tables_i->bits == 0);
|
||||||
}
|
}
|
||||||
total_size += next->bits;
|
total_size += huffman_tables_i->bits;
|
||||||
next += size;
|
huffman_tables_i += size;
|
||||||
if (j <= ALPHA) {
|
if (j <= ALPHA) {
|
||||||
int local_max_bits = code_lengths[0];
|
int local_max_bits = code_lengths[0];
|
||||||
int k;
|
int k;
|
||||||
@ -448,38 +494,41 @@ static int ReadHuffmanCodes(VP8LDecoder* const dec, int xsize, int ysize,
|
|||||||
max_bits += local_max_bits;
|
max_bits += local_max_bits;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!is_bogus) next = huffman_tables_i;
|
||||||
htree_group->is_trivial_literal = is_trivial_literal;
|
htree_group->is_trivial_literal = is_trivial_literal;
|
||||||
htree_group->is_trivial_code = 0;
|
htree_group->is_trivial_code = 0;
|
||||||
if (is_trivial_literal) {
|
if (is_trivial_literal) {
|
||||||
const int red = htrees[RED][0].value;
|
const int red = htrees[RED][0].value;
|
||||||
const int blue = htrees[BLUE][0].value;
|
const int blue = htrees[BLUE][0].value;
|
||||||
const int alpha = htrees[ALPHA][0].value;
|
const int alpha = htrees[ALPHA][0].value;
|
||||||
htree_group->literal_arb =
|
htree_group->literal_arb = ((uint32_t)alpha << 24) | (red << 16) | blue;
|
||||||
((uint32_t)alpha << 24) | (red << 16) | blue;
|
|
||||||
if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) {
|
if (total_size == 0 && htrees[GREEN][0].value < NUM_LITERAL_CODES) {
|
||||||
htree_group->is_trivial_code = 1;
|
htree_group->is_trivial_code = 1;
|
||||||
htree_group->literal_arb |= htrees[GREEN][0].value << 8;
|
htree_group->literal_arb |= htrees[GREEN][0].value << 8;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
htree_group->use_packed_table = !htree_group->is_trivial_code &&
|
htree_group->use_packed_table =
|
||||||
(max_bits < HUFFMAN_PACKED_BITS);
|
!htree_group->is_trivial_code && (max_bits < HUFFMAN_PACKED_BITS);
|
||||||
if (htree_group->use_packed_table) BuildPackedTable(htree_group);
|
if (htree_group->use_packed_table) BuildPackedTable(htree_group);
|
||||||
}
|
}
|
||||||
WebPSafeFree(code_lengths);
|
ok = 1;
|
||||||
|
|
||||||
// All OK. Finalize pointers and return.
|
// All OK. Finalize pointers.
|
||||||
hdr->huffman_image_ = huffman_image;
|
hdr->huffman_image_ = huffman_image;
|
||||||
hdr->num_htree_groups_ = num_htree_groups;
|
hdr->num_htree_groups_ = num_htree_groups;
|
||||||
hdr->htree_groups_ = htree_groups;
|
hdr->htree_groups_ = htree_groups;
|
||||||
hdr->huffman_tables_ = huffman_tables;
|
hdr->huffman_tables_ = huffman_tables;
|
||||||
return 1;
|
|
||||||
|
|
||||||
Error:
|
Error:
|
||||||
WebPSafeFree(code_lengths);
|
WebPSafeFree(code_lengths);
|
||||||
|
WebPSafeFree(huffman_tables_bogus);
|
||||||
|
WebPSafeFree(mapping);
|
||||||
|
if (!ok) {
|
||||||
WebPSafeFree(huffman_image);
|
WebPSafeFree(huffman_image);
|
||||||
WebPSafeFree(huffman_tables);
|
WebPSafeFree(huffman_tables);
|
||||||
VP8LHtreeGroupsFree(htree_groups);
|
VP8LHtreeGroupsFree(htree_groups);
|
||||||
return 0;
|
}
|
||||||
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
@ -884,7 +933,11 @@ static WEBP_INLINE void CopyBlock8b(uint8_t* const dst, int dist, int length) {
|
|||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
|
#if !defined(WORDS_BIGENDIAN)
|
||||||
memcpy(&pattern, src, sizeof(uint16_t));
|
memcpy(&pattern, src, sizeof(uint16_t));
|
||||||
|
#else
|
||||||
|
pattern = ((uint32_t)src[0] << 8) | src[1];
|
||||||
|
#endif
|
||||||
#if defined(__arm__) || defined(_M_ARM)
|
#if defined(__arm__) || defined(_M_ARM)
|
||||||
pattern |= pattern << 16;
|
pattern |= pattern << 16;
|
||||||
#elif defined(WEBP_USE_MIPS_DSP_R2)
|
#elif defined(WEBP_USE_MIPS_DSP_R2)
|
||||||
@ -1523,7 +1576,6 @@ int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec,
|
|||||||
if (dec == NULL) return 0;
|
if (dec == NULL) return 0;
|
||||||
|
|
||||||
assert(alph_dec != NULL);
|
assert(alph_dec != NULL);
|
||||||
alph_dec->vp8l_dec_ = dec;
|
|
||||||
|
|
||||||
dec->width_ = alph_dec->width_;
|
dec->width_ = alph_dec->width_;
|
||||||
dec->height_ = alph_dec->height_;
|
dec->height_ = alph_dec->height_;
|
||||||
@ -1555,11 +1607,12 @@ int VP8LDecodeAlphaHeader(ALPHDecoder* const alph_dec,
|
|||||||
|
|
||||||
if (!ok) goto Err;
|
if (!ok) goto Err;
|
||||||
|
|
||||||
|
// Only set here, once we are sure it is valid (to avoid thread races).
|
||||||
|
alph_dec->vp8l_dec_ = dec;
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
Err:
|
Err:
|
||||||
VP8LDelete(alph_dec->vp8l_dec_);
|
VP8LDelete(dec);
|
||||||
alph_dec->vp8l_dec_ = NULL;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
3rdparty/libwebp/src/dec/vp8li_dec.h
vendored
2
3rdparty/libwebp/src/dec/vp8li_dec.h
vendored
@ -132,4 +132,4 @@ void VP8LDelete(VP8LDecoder* const dec);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DEC_VP8LI_DEC_H_ */
|
#endif // WEBP_DEC_VP8LI_DEC_H_
|
||||||
|
2
3rdparty/libwebp/src/dec/webpi_dec.h
vendored
2
3rdparty/libwebp/src/dec/webpi_dec.h
vendored
@ -130,4 +130,4 @@ int WebPAvoidSlowMemory(const WebPDecBuffer* const output,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DEC_WEBPI_DEC_H_ */
|
#endif // WEBP_DEC_WEBPI_DEC_H_
|
||||||
|
2
3rdparty/libwebp/src/demux/demux.c
vendored
2
3rdparty/libwebp/src/demux/demux.c
vendored
@ -25,7 +25,7 @@
|
|||||||
|
|
||||||
#define DMUX_MAJ_VERSION 1
|
#define DMUX_MAJ_VERSION 1
|
||||||
#define DMUX_MIN_VERSION 0
|
#define DMUX_MIN_VERSION 0
|
||||||
#define DMUX_REV_VERSION 0
|
#define DMUX_REV_VERSION 2
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
size_t start_; // start location of the data
|
size_t start_; // start location of the data
|
||||||
|
6
3rdparty/libwebp/src/dsp/cost.c
vendored
6
3rdparty/libwebp/src/dsp/cost.c
vendored
@ -377,6 +377,7 @@ VP8SetResidualCoeffsFunc VP8SetResidualCoeffs;
|
|||||||
extern void VP8EncDspCostInitMIPS32(void);
|
extern void VP8EncDspCostInitMIPS32(void);
|
||||||
extern void VP8EncDspCostInitMIPSdspR2(void);
|
extern void VP8EncDspCostInitMIPSdspR2(void);
|
||||||
extern void VP8EncDspCostInitSSE2(void);
|
extern void VP8EncDspCostInitSSE2(void);
|
||||||
|
extern void VP8EncDspCostInitNEON(void);
|
||||||
|
|
||||||
WEBP_DSP_INIT_FUNC(VP8EncDspCostInit) {
|
WEBP_DSP_INIT_FUNC(VP8EncDspCostInit) {
|
||||||
VP8GetResidualCost = GetResidualCost_C;
|
VP8GetResidualCost = GetResidualCost_C;
|
||||||
@ -398,6 +399,11 @@ WEBP_DSP_INIT_FUNC(VP8EncDspCostInit) {
|
|||||||
if (VP8GetCPUInfo(kSSE2)) {
|
if (VP8GetCPUInfo(kSSE2)) {
|
||||||
VP8EncDspCostInitSSE2();
|
VP8EncDspCostInitSSE2();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
#if defined(WEBP_USE_NEON)
|
||||||
|
if (VP8GetCPUInfo(kNEON)) {
|
||||||
|
VP8EncDspCostInitNEON();
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
122
3rdparty/libwebp/src/dsp/cost_neon.c
vendored
Normal file
122
3rdparty/libwebp/src/dsp/cost_neon.c
vendored
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license
|
||||||
|
// that can be found in the COPYING file in the root of the source
|
||||||
|
// tree. An additional intellectual property rights grant can be found
|
||||||
|
// in the file PATENTS. All contributing project authors may
|
||||||
|
// be found in the AUTHORS file in the root of the source tree.
|
||||||
|
// -----------------------------------------------------------------------------
|
||||||
|
//
|
||||||
|
// ARM NEON version of cost functions
|
||||||
|
|
||||||
|
#include "src/dsp/dsp.h"
|
||||||
|
|
||||||
|
#if defined(WEBP_USE_NEON)
|
||||||
|
|
||||||
|
#include "src/dsp/neon.h"
|
||||||
|
#include "src/enc/cost_enc.h"
|
||||||
|
|
||||||
|
static const uint8_t position[16] = { 1, 2, 3, 4, 5, 6, 7, 8,
|
||||||
|
9, 10, 11, 12, 13, 14, 15, 16 };
|
||||||
|
|
||||||
|
static void SetResidualCoeffs_NEON(const int16_t* const coeffs,
|
||||||
|
VP8Residual* const res) {
|
||||||
|
const int16x8_t minus_one = vdupq_n_s16(-1);
|
||||||
|
const int16x8_t coeffs_0 = vld1q_s16(coeffs);
|
||||||
|
const int16x8_t coeffs_1 = vld1q_s16(coeffs + 8);
|
||||||
|
const uint16x8_t eob_0 = vtstq_s16(coeffs_0, minus_one);
|
||||||
|
const uint16x8_t eob_1 = vtstq_s16(coeffs_1, minus_one);
|
||||||
|
const uint8x16_t eob = vcombine_u8(vqmovn_u16(eob_0), vqmovn_u16(eob_1));
|
||||||
|
const uint8x16_t masked = vandq_u8(eob, vld1q_u8(position));
|
||||||
|
|
||||||
|
#ifdef __aarch64__
|
||||||
|
res->last = vmaxvq_u8(masked) - 1;
|
||||||
|
#else
|
||||||
|
const uint8x8_t eob_8x8 = vmax_u8(vget_low_u8(masked), vget_high_u8(masked));
|
||||||
|
const uint16x8_t eob_16x8 = vmovl_u8(eob_8x8);
|
||||||
|
const uint16x4_t eob_16x4 =
|
||||||
|
vmax_u16(vget_low_u16(eob_16x8), vget_high_u16(eob_16x8));
|
||||||
|
const uint32x4_t eob_32x4 = vmovl_u16(eob_16x4);
|
||||||
|
uint32x2_t eob_32x2 =
|
||||||
|
vmax_u32(vget_low_u32(eob_32x4), vget_high_u32(eob_32x4));
|
||||||
|
eob_32x2 = vpmax_u32(eob_32x2, eob_32x2);
|
||||||
|
|
||||||
|
vst1_lane_s32(&res->last, vreinterpret_s32_u32(eob_32x2), 0);
|
||||||
|
--res->last;
|
||||||
|
#endif // __aarch64__
|
||||||
|
|
||||||
|
res->coeffs = coeffs;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int GetResidualCost_NEON(int ctx0, const VP8Residual* const res) {
|
||||||
|
uint8_t levels[16], ctxs[16];
|
||||||
|
uint16_t abs_levels[16];
|
||||||
|
int n = res->first;
|
||||||
|
// should be prob[VP8EncBands[n]], but it's equivalent for n=0 or 1
|
||||||
|
const int p0 = res->prob[n][ctx0][0];
|
||||||
|
CostArrayPtr const costs = res->costs;
|
||||||
|
const uint16_t* t = costs[n][ctx0];
|
||||||
|
// bit_cost(1, p0) is already incorporated in t[] tables, but only if ctx != 0
|
||||||
|
// (as required by the syntax). For ctx0 == 0, we need to add it here or it'll
|
||||||
|
// be missing during the loop.
|
||||||
|
int cost = (ctx0 == 0) ? VP8BitCost(1, p0) : 0;
|
||||||
|
|
||||||
|
if (res->last < 0) {
|
||||||
|
return VP8BitCost(0, p0);
|
||||||
|
}
|
||||||
|
|
||||||
|
{ // precompute clamped levels and contexts, packed to 8b.
|
||||||
|
const uint8x16_t kCst2 = vdupq_n_u8(2);
|
||||||
|
const uint8x16_t kCst67 = vdupq_n_u8(MAX_VARIABLE_LEVEL);
|
||||||
|
const int16x8_t c0 = vld1q_s16(res->coeffs);
|
||||||
|
const int16x8_t c1 = vld1q_s16(res->coeffs + 8);
|
||||||
|
const uint16x8_t E0 = vreinterpretq_u16_s16(vabsq_s16(c0));
|
||||||
|
const uint16x8_t E1 = vreinterpretq_u16_s16(vabsq_s16(c1));
|
||||||
|
const uint8x16_t F = vcombine_u8(vqmovn_u16(E0), vqmovn_u16(E1));
|
||||||
|
const uint8x16_t G = vminq_u8(F, kCst2); // context = 0,1,2
|
||||||
|
const uint8x16_t H = vminq_u8(F, kCst67); // clamp_level in [0..67]
|
||||||
|
|
||||||
|
vst1q_u8(ctxs, G);
|
||||||
|
vst1q_u8(levels, H);
|
||||||
|
|
||||||
|
vst1q_u16(abs_levels, E0);
|
||||||
|
vst1q_u16(abs_levels + 8, E1);
|
||||||
|
}
|
||||||
|
for (; n < res->last; ++n) {
|
||||||
|
const int ctx = ctxs[n];
|
||||||
|
const int level = levels[n];
|
||||||
|
const int flevel = abs_levels[n]; // full level
|
||||||
|
cost += VP8LevelFixedCosts[flevel] + t[level]; // simplified VP8LevelCost()
|
||||||
|
t = costs[n + 1][ctx];
|
||||||
|
}
|
||||||
|
// Last coefficient is always non-zero
|
||||||
|
{
|
||||||
|
const int level = levels[n];
|
||||||
|
const int flevel = abs_levels[n];
|
||||||
|
assert(flevel != 0);
|
||||||
|
cost += VP8LevelFixedCosts[flevel] + t[level];
|
||||||
|
if (n < 15) {
|
||||||
|
const int b = VP8EncBands[n + 1];
|
||||||
|
const int ctx = ctxs[n];
|
||||||
|
const int last_p0 = res->prob[b][ctx][0];
|
||||||
|
cost += VP8BitCost(0, last_p0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return cost;
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
// Entry point
|
||||||
|
|
||||||
|
extern void VP8EncDspCostInitNEON(void);
|
||||||
|
|
||||||
|
WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspCostInitNEON(void) {
|
||||||
|
VP8SetResidualCoeffs = SetResidualCoeffs_NEON;
|
||||||
|
VP8GetResidualCost = GetResidualCost_NEON;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else // !WEBP_USE_NEON
|
||||||
|
|
||||||
|
WEBP_DSP_INIT_STUB(VP8EncDspCostInitNEON)
|
||||||
|
|
||||||
|
#endif // WEBP_USE_NEON
|
6
3rdparty/libwebp/src/dsp/dsp.h
vendored
6
3rdparty/libwebp/src/dsp/dsp.h
vendored
@ -76,10 +76,6 @@ extern "C" {
|
|||||||
#define WEBP_USE_SSE41
|
#define WEBP_USE_SSE41
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(__AVX2__) || defined(WEBP_HAVE_AVX2)
|
|
||||||
#define WEBP_USE_AVX2
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// The intrinsics currently cause compiler errors with arm-nacl-gcc and the
|
// The intrinsics currently cause compiler errors with arm-nacl-gcc and the
|
||||||
// inline assembly would need to be modified for use with Native Client.
|
// inline assembly would need to be modified for use with Native Client.
|
||||||
#if (defined(__ARM_NEON__) || \
|
#if (defined(__ARM_NEON__) || \
|
||||||
@ -679,4 +675,4 @@ void VP8FiltersInit(void);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DSP_DSP_H_ */
|
#endif // WEBP_DSP_DSP_H_
|
||||||
|
6
3rdparty/libwebp/src/dsp/enc.c
vendored
6
3rdparty/libwebp/src/dsp/enc.c
vendored
@ -734,7 +734,6 @@ VP8BlockCopy VP8Copy16x8;
|
|||||||
|
|
||||||
extern void VP8EncDspInitSSE2(void);
|
extern void VP8EncDspInitSSE2(void);
|
||||||
extern void VP8EncDspInitSSE41(void);
|
extern void VP8EncDspInitSSE41(void);
|
||||||
extern void VP8EncDspInitAVX2(void);
|
|
||||||
extern void VP8EncDspInitNEON(void);
|
extern void VP8EncDspInitNEON(void);
|
||||||
extern void VP8EncDspInitMIPS32(void);
|
extern void VP8EncDspInitMIPS32(void);
|
||||||
extern void VP8EncDspInitMIPSdspR2(void);
|
extern void VP8EncDspInitMIPSdspR2(void);
|
||||||
@ -784,11 +783,6 @@ WEBP_DSP_INIT_FUNC(VP8EncDspInit) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if defined(WEBP_USE_AVX2)
|
|
||||||
if (VP8GetCPUInfo(kAVX2)) {
|
|
||||||
VP8EncDspInitAVX2();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#if defined(WEBP_USE_MIPS32)
|
#if defined(WEBP_USE_MIPS32)
|
||||||
if (VP8GetCPUInfo(kMIPS32)) {
|
if (VP8GetCPUInfo(kMIPS32)) {
|
||||||
VP8EncDspInitMIPS32();
|
VP8EncDspInitMIPS32();
|
||||||
|
21
3rdparty/libwebp/src/dsp/enc_avx2.c
vendored
21
3rdparty/libwebp/src/dsp/enc_avx2.c
vendored
@ -1,21 +0,0 @@
|
|||||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
|
||||||
//
|
|
||||||
// Use of this source code is governed by a BSD-style license
|
|
||||||
// that can be found in the COPYING file in the root of the source
|
|
||||||
// tree. An additional intellectual property rights grant can be found
|
|
||||||
// in the file PATENTS. All contributing project authors may
|
|
||||||
// be found in the AUTHORS file in the root of the source tree.
|
|
||||||
// -----------------------------------------------------------------------------
|
|
||||||
//
|
|
||||||
// AVX2 version of speed-critical encoding functions.
|
|
||||||
|
|
||||||
#include "src/dsp/dsp.h"
|
|
||||||
|
|
||||||
#if defined(WEBP_USE_AVX2)
|
|
||||||
|
|
||||||
#endif // WEBP_USE_AVX2
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
|
||||||
// Entry point
|
|
||||||
|
|
||||||
WEBP_DSP_INIT_STUB(VP8EncDspInitAVX2)
|
|
2
3rdparty/libwebp/src/dsp/lossless.c
vendored
2
3rdparty/libwebp/src/dsp/lossless.c
vendored
@ -23,8 +23,6 @@
|
|||||||
#include "src/dsp/lossless.h"
|
#include "src/dsp/lossless.h"
|
||||||
#include "src/dsp/lossless_common.h"
|
#include "src/dsp/lossless_common.h"
|
||||||
|
|
||||||
#define MAX_DIFF_COST (1e30f)
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Image transforms.
|
// Image transforms.
|
||||||
|
|
||||||
|
10
3rdparty/libwebp/src/dsp/lossless.h
vendored
10
3rdparty/libwebp/src/dsp/lossless.h
vendored
@ -163,7 +163,7 @@ extern VP8LCostCombinedFunc VP8LExtraCostCombined;
|
|||||||
extern VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy;
|
extern VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy;
|
||||||
|
|
||||||
typedef struct { // small struct to hold counters
|
typedef struct { // small struct to hold counters
|
||||||
int counts[2]; // index: 0=zero steak, 1=non-zero streak
|
int counts[2]; // index: 0=zero streak, 1=non-zero streak
|
||||||
int streaks[2][2]; // [zero/non-zero][streak<3 / streak>=3]
|
int streaks[2][2]; // [zero/non-zero][streak<3 / streak>=3]
|
||||||
} VP8LStreaks;
|
} VP8LStreaks;
|
||||||
|
|
||||||
@ -194,10 +194,14 @@ extern VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined;
|
|||||||
void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n,
|
void VP8LBitsEntropyUnrefined(const uint32_t* const array, int n,
|
||||||
VP8LBitEntropy* const entropy);
|
VP8LBitEntropy* const entropy);
|
||||||
|
|
||||||
typedef void (*VP8LHistogramAddFunc)(const VP8LHistogram* const a,
|
typedef void (*VP8LAddVectorFunc)(const uint32_t* a, const uint32_t* b,
|
||||||
|
uint32_t* out, int size);
|
||||||
|
extern VP8LAddVectorFunc VP8LAddVector;
|
||||||
|
typedef void (*VP8LAddVectorEqFunc)(const uint32_t* a, uint32_t* out, int size);
|
||||||
|
extern VP8LAddVectorEqFunc VP8LAddVectorEq;
|
||||||
|
void VP8LHistogramAdd(const VP8LHistogram* const a,
|
||||||
const VP8LHistogram* const b,
|
const VP8LHistogram* const b,
|
||||||
VP8LHistogram* const out);
|
VP8LHistogram* const out);
|
||||||
extern VP8LHistogramAddFunc VP8LHistogramAdd;
|
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// PrefixEncode()
|
// PrefixEncode()
|
||||||
|
86
3rdparty/libwebp/src/dsp/lossless_enc.c
vendored
86
3rdparty/libwebp/src/dsp/lossless_enc.c
vendored
@ -632,38 +632,67 @@ static double ExtraCostCombined_C(const uint32_t* X, const uint32_t* Y,
|
|||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
static void HistogramAdd_C(const VP8LHistogram* const a,
|
static void AddVector_C(const uint32_t* a, const uint32_t* b, uint32_t* out,
|
||||||
const VP8LHistogram* const b,
|
int size) {
|
||||||
VP8LHistogram* const out) {
|
int i;
|
||||||
|
for (i = 0; i < size; ++i) out[i] = a[i] + b[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void AddVectorEq_C(const uint32_t* a, uint32_t* out, int size) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < size; ++i) out[i] += a[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ADD(X, ARG, LEN) do { \
|
||||||
|
if (a->is_used_[X]) { \
|
||||||
|
if (b->is_used_[X]) { \
|
||||||
|
VP8LAddVector(a->ARG, b->ARG, out->ARG, (LEN)); \
|
||||||
|
} else { \
|
||||||
|
memcpy(&out->ARG[0], &a->ARG[0], (LEN) * sizeof(out->ARG[0])); \
|
||||||
|
} \
|
||||||
|
} else if (b->is_used_[X]) { \
|
||||||
|
memcpy(&out->ARG[0], &b->ARG[0], (LEN) * sizeof(out->ARG[0])); \
|
||||||
|
} else { \
|
||||||
|
memset(&out->ARG[0], 0, (LEN) * sizeof(out->ARG[0])); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define ADD_EQ(X, ARG, LEN) do { \
|
||||||
|
if (a->is_used_[X]) { \
|
||||||
|
if (out->is_used_[X]) { \
|
||||||
|
VP8LAddVectorEq(a->ARG, out->ARG, (LEN)); \
|
||||||
|
} else { \
|
||||||
|
memcpy(&out->ARG[0], &a->ARG[0], (LEN) * sizeof(out->ARG[0])); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
void VP8LHistogramAdd(const VP8LHistogram* const a,
|
||||||
|
const VP8LHistogram* const b, VP8LHistogram* const out) {
|
||||||
int i;
|
int i;
|
||||||
const int literal_size = VP8LHistogramNumCodes(a->palette_code_bits_);
|
const int literal_size = VP8LHistogramNumCodes(a->palette_code_bits_);
|
||||||
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
||||||
|
|
||||||
if (b != out) {
|
if (b != out) {
|
||||||
for (i = 0; i < literal_size; ++i) {
|
ADD(0, literal_, literal_size);
|
||||||
out->literal_[i] = a->literal_[i] + b->literal_[i];
|
ADD(1, red_, NUM_LITERAL_CODES);
|
||||||
}
|
ADD(2, blue_, NUM_LITERAL_CODES);
|
||||||
for (i = 0; i < NUM_DISTANCE_CODES; ++i) {
|
ADD(3, alpha_, NUM_LITERAL_CODES);
|
||||||
out->distance_[i] = a->distance_[i] + b->distance_[i];
|
ADD(4, distance_, NUM_DISTANCE_CODES);
|
||||||
}
|
for (i = 0; i < 5; ++i) {
|
||||||
for (i = 0; i < NUM_LITERAL_CODES; ++i) {
|
out->is_used_[i] = (a->is_used_[i] | b->is_used_[i]);
|
||||||
out->red_[i] = a->red_[i] + b->red_[i];
|
|
||||||
out->blue_[i] = a->blue_[i] + b->blue_[i];
|
|
||||||
out->alpha_[i] = a->alpha_[i] + b->alpha_[i];
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < literal_size; ++i) {
|
ADD_EQ(0, literal_, literal_size);
|
||||||
out->literal_[i] += a->literal_[i];
|
ADD_EQ(1, red_, NUM_LITERAL_CODES);
|
||||||
}
|
ADD_EQ(2, blue_, NUM_LITERAL_CODES);
|
||||||
for (i = 0; i < NUM_DISTANCE_CODES; ++i) {
|
ADD_EQ(3, alpha_, NUM_LITERAL_CODES);
|
||||||
out->distance_[i] += a->distance_[i];
|
ADD_EQ(4, distance_, NUM_DISTANCE_CODES);
|
||||||
}
|
for (i = 0; i < 5; ++i) out->is_used_[i] |= a->is_used_[i];
|
||||||
for (i = 0; i < NUM_LITERAL_CODES; ++i) {
|
|
||||||
out->red_[i] += a->red_[i];
|
|
||||||
out->blue_[i] += a->blue_[i];
|
|
||||||
out->alpha_[i] += a->alpha_[i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#undef ADD
|
||||||
|
#undef ADD_EQ
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Image transforms.
|
// Image transforms.
|
||||||
@ -848,7 +877,8 @@ VP8LCombinedShannonEntropyFunc VP8LCombinedShannonEntropy;
|
|||||||
VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined;
|
VP8LGetEntropyUnrefinedFunc VP8LGetEntropyUnrefined;
|
||||||
VP8LGetCombinedEntropyUnrefinedFunc VP8LGetCombinedEntropyUnrefined;
|
VP8LGetCombinedEntropyUnrefinedFunc VP8LGetCombinedEntropyUnrefined;
|
||||||
|
|
||||||
VP8LHistogramAddFunc VP8LHistogramAdd;
|
VP8LAddVectorFunc VP8LAddVector;
|
||||||
|
VP8LAddVectorEqFunc VP8LAddVectorEq;
|
||||||
|
|
||||||
VP8LVectorMismatchFunc VP8LVectorMismatch;
|
VP8LVectorMismatchFunc VP8LVectorMismatch;
|
||||||
VP8LBundleColorMapFunc VP8LBundleColorMap;
|
VP8LBundleColorMapFunc VP8LBundleColorMap;
|
||||||
@ -885,7 +915,8 @@ WEBP_DSP_INIT_FUNC(VP8LEncDspInit) {
|
|||||||
VP8LGetEntropyUnrefined = GetEntropyUnrefined_C;
|
VP8LGetEntropyUnrefined = GetEntropyUnrefined_C;
|
||||||
VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_C;
|
VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_C;
|
||||||
|
|
||||||
VP8LHistogramAdd = HistogramAdd_C;
|
VP8LAddVector = AddVector_C;
|
||||||
|
VP8LAddVectorEq = AddVectorEq_C;
|
||||||
|
|
||||||
VP8LVectorMismatch = VectorMismatch_C;
|
VP8LVectorMismatch = VectorMismatch_C;
|
||||||
VP8LBundleColorMap = VP8LBundleColorMap_C;
|
VP8LBundleColorMap = VP8LBundleColorMap_C;
|
||||||
@ -971,7 +1002,8 @@ WEBP_DSP_INIT_FUNC(VP8LEncDspInit) {
|
|||||||
assert(VP8LCombinedShannonEntropy != NULL);
|
assert(VP8LCombinedShannonEntropy != NULL);
|
||||||
assert(VP8LGetEntropyUnrefined != NULL);
|
assert(VP8LGetEntropyUnrefined != NULL);
|
||||||
assert(VP8LGetCombinedEntropyUnrefined != NULL);
|
assert(VP8LGetCombinedEntropyUnrefined != NULL);
|
||||||
assert(VP8LHistogramAdd != NULL);
|
assert(VP8LAddVector != NULL);
|
||||||
|
assert(VP8LAddVectorEq != NULL);
|
||||||
assert(VP8LVectorMismatch != NULL);
|
assert(VP8LVectorMismatch != NULL);
|
||||||
assert(VP8LBundleColorMap != NULL);
|
assert(VP8LBundleColorMap != NULL);
|
||||||
assert(VP8LPredictorsSub[0] != NULL);
|
assert(VP8LPredictorsSub[0] != NULL);
|
||||||
|
79
3rdparty/libwebp/src/dsp/lossless_enc_mips32.c
vendored
79
3rdparty/libwebp/src/dsp/lossless_enc_mips32.c
vendored
@ -344,65 +344,29 @@ static void GetCombinedEntropyUnrefined_MIPS32(const uint32_t X[],
|
|||||||
ASM_END_COMMON_0 \
|
ASM_END_COMMON_0 \
|
||||||
ASM_END_COMMON_1
|
ASM_END_COMMON_1
|
||||||
|
|
||||||
#define ADD_VECTOR(A, B, OUT, SIZE, EXTRA_SIZE) do { \
|
static void AddVector_MIPS32(const uint32_t* pa, const uint32_t* pb,
|
||||||
const uint32_t* pa = (const uint32_t*)(A); \
|
uint32_t* pout, int size) {
|
||||||
const uint32_t* pb = (const uint32_t*)(B); \
|
|
||||||
uint32_t* pout = (uint32_t*)(OUT); \
|
|
||||||
const uint32_t* const LoopEnd = pa + (SIZE); \
|
|
||||||
assert((SIZE) % 4 == 0); \
|
|
||||||
ASM_START \
|
|
||||||
ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout) \
|
|
||||||
ASM_END_0 \
|
|
||||||
if ((EXTRA_SIZE) > 0) { \
|
|
||||||
const int last = (EXTRA_SIZE); \
|
|
||||||
int i; \
|
|
||||||
for (i = 0; i < last; ++i) pout[i] = pa[i] + pb[i]; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define ADD_VECTOR_EQ(A, OUT, SIZE, EXTRA_SIZE) do { \
|
|
||||||
const uint32_t* pa = (const uint32_t*)(A); \
|
|
||||||
uint32_t* pout = (uint32_t*)(OUT); \
|
|
||||||
const uint32_t* const LoopEnd = pa + (SIZE); \
|
|
||||||
assert((SIZE) % 4 == 0); \
|
|
||||||
ASM_START \
|
|
||||||
ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout) \
|
|
||||||
ASM_END_1 \
|
|
||||||
if ((EXTRA_SIZE) > 0) { \
|
|
||||||
const int last = (EXTRA_SIZE); \
|
|
||||||
int i; \
|
|
||||||
for (i = 0; i < last; ++i) pout[i] += pa[i]; \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
static void HistogramAdd_MIPS32(const VP8LHistogram* const a,
|
|
||||||
const VP8LHistogram* const b,
|
|
||||||
VP8LHistogram* const out) {
|
|
||||||
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
|
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
|
||||||
const int extra_cache_size = VP8LHistogramNumCodes(a->palette_code_bits_)
|
const uint32_t end = ((size) / 4) * 4;
|
||||||
- (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
|
const uint32_t* const LoopEnd = pa + end;
|
||||||
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
int i;
|
||||||
|
ASM_START
|
||||||
if (b != out) {
|
ADD_TO_OUT(0, 4, 8, 12, 1, pa, pb, pout)
|
||||||
ADD_VECTOR(a->literal_, b->literal_, out->literal_,
|
ASM_END_0
|
||||||
NUM_LITERAL_CODES + NUM_LENGTH_CODES, extra_cache_size);
|
for (i = end; i < size; ++i) pout[i] = pa[i] + pb[i];
|
||||||
ADD_VECTOR(a->distance_, b->distance_, out->distance_,
|
}
|
||||||
NUM_DISTANCE_CODES, 0);
|
|
||||||
ADD_VECTOR(a->red_, b->red_, out->red_, NUM_LITERAL_CODES, 0);
|
static void AddVectorEq_MIPS32(const uint32_t* pa, uint32_t* pout, int size) {
|
||||||
ADD_VECTOR(a->blue_, b->blue_, out->blue_, NUM_LITERAL_CODES, 0);
|
uint32_t temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
|
||||||
ADD_VECTOR(a->alpha_, b->alpha_, out->alpha_, NUM_LITERAL_CODES, 0);
|
const uint32_t end = ((size) / 4) * 4;
|
||||||
} else {
|
const uint32_t* const LoopEnd = pa + end;
|
||||||
ADD_VECTOR_EQ(a->literal_, out->literal_,
|
int i;
|
||||||
NUM_LITERAL_CODES + NUM_LENGTH_CODES, extra_cache_size);
|
ASM_START
|
||||||
ADD_VECTOR_EQ(a->distance_, out->distance_, NUM_DISTANCE_CODES, 0);
|
ADD_TO_OUT(0, 4, 8, 12, 0, pa, pout, pout)
|
||||||
ADD_VECTOR_EQ(a->red_, out->red_, NUM_LITERAL_CODES, 0);
|
ASM_END_1
|
||||||
ADD_VECTOR_EQ(a->blue_, out->blue_, NUM_LITERAL_CODES, 0);
|
for (i = end; i < size; ++i) pout[i] += pa[i];
|
||||||
ADD_VECTOR_EQ(a->alpha_, out->alpha_, NUM_LITERAL_CODES, 0);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef ADD_VECTOR_EQ
|
|
||||||
#undef ADD_VECTOR
|
|
||||||
#undef ASM_END_1
|
#undef ASM_END_1
|
||||||
#undef ASM_END_0
|
#undef ASM_END_0
|
||||||
#undef ASM_END_COMMON_1
|
#undef ASM_END_COMMON_1
|
||||||
@ -422,7 +386,8 @@ WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitMIPS32(void) {
|
|||||||
VP8LExtraCostCombined = ExtraCostCombined_MIPS32;
|
VP8LExtraCostCombined = ExtraCostCombined_MIPS32;
|
||||||
VP8LGetEntropyUnrefined = GetEntropyUnrefined_MIPS32;
|
VP8LGetEntropyUnrefined = GetEntropyUnrefined_MIPS32;
|
||||||
VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_MIPS32;
|
VP8LGetCombinedEntropyUnrefined = GetCombinedEntropyUnrefined_MIPS32;
|
||||||
VP8LHistogramAdd = HistogramAdd_MIPS32;
|
VP8LAddVector = AddVector_MIPS32;
|
||||||
|
VP8LAddVectorEq = AddVectorEq_MIPS32;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // !WEBP_USE_MIPS32
|
#else // !WEBP_USE_MIPS32
|
||||||
|
44
3rdparty/libwebp/src/dsp/lossless_enc_sse2.c
vendored
44
3rdparty/libwebp/src/dsp/lossless_enc_sse2.c
vendored
@ -170,12 +170,13 @@ static void CollectColorRedTransforms_SSE2(const uint32_t* argb, int stride,
|
|||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// Note we are adding uint32_t's as *signed* int32's (using _mm_add_epi32). But
|
||||||
|
// that's ok since the histogram values are less than 1<<28 (max picture size).
|
||||||
#define LINE_SIZE 16 // 8 or 16
|
#define LINE_SIZE 16 // 8 or 16
|
||||||
static void AddVector_SSE2(const uint32_t* a, const uint32_t* b, uint32_t* out,
|
static void AddVector_SSE2(const uint32_t* a, const uint32_t* b, uint32_t* out,
|
||||||
int size) {
|
int size) {
|
||||||
int i;
|
int i;
|
||||||
assert(size % LINE_SIZE == 0);
|
for (i = 0; i + LINE_SIZE <= size; i += LINE_SIZE) {
|
||||||
for (i = 0; i < size; i += LINE_SIZE) {
|
|
||||||
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]);
|
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]);
|
||||||
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]);
|
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]);
|
||||||
#if (LINE_SIZE == 16)
|
#if (LINE_SIZE == 16)
|
||||||
@ -195,12 +196,14 @@ static void AddVector_SSE2(const uint32_t* a, const uint32_t* b, uint32_t* out,
|
|||||||
_mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3));
|
_mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
for (; i < size; ++i) {
|
||||||
|
out[i] = a[i] + b[i];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) {
|
static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) {
|
||||||
int i;
|
int i;
|
||||||
assert(size % LINE_SIZE == 0);
|
for (i = 0; i + LINE_SIZE <= size; i += LINE_SIZE) {
|
||||||
for (i = 0; i < size; i += LINE_SIZE) {
|
|
||||||
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]);
|
const __m128i a0 = _mm_loadu_si128((const __m128i*)&a[i + 0]);
|
||||||
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]);
|
const __m128i a1 = _mm_loadu_si128((const __m128i*)&a[i + 4]);
|
||||||
#if (LINE_SIZE == 16)
|
#if (LINE_SIZE == 16)
|
||||||
@ -220,36 +223,12 @@ static void AddVectorEq_SSE2(const uint32_t* a, uint32_t* out, int size) {
|
|||||||
_mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3));
|
_mm_storeu_si128((__m128i*)&out[i + 12], _mm_add_epi32(a3, b3));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
for (; i < size; ++i) {
|
||||||
|
out[i] += a[i];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#undef LINE_SIZE
|
#undef LINE_SIZE
|
||||||
|
|
||||||
// Note we are adding uint32_t's as *signed* int32's (using _mm_add_epi32). But
|
|
||||||
// that's ok since the histogram values are less than 1<<28 (max picture size).
|
|
||||||
static void HistogramAdd_SSE2(const VP8LHistogram* const a,
|
|
||||||
const VP8LHistogram* const b,
|
|
||||||
VP8LHistogram* const out) {
|
|
||||||
int i;
|
|
||||||
const int literal_size = VP8LHistogramNumCodes(a->palette_code_bits_);
|
|
||||||
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
|
||||||
if (b != out) {
|
|
||||||
AddVector_SSE2(a->literal_, b->literal_, out->literal_, NUM_LITERAL_CODES);
|
|
||||||
AddVector_SSE2(a->red_, b->red_, out->red_, NUM_LITERAL_CODES);
|
|
||||||
AddVector_SSE2(a->blue_, b->blue_, out->blue_, NUM_LITERAL_CODES);
|
|
||||||
AddVector_SSE2(a->alpha_, b->alpha_, out->alpha_, NUM_LITERAL_CODES);
|
|
||||||
} else {
|
|
||||||
AddVectorEq_SSE2(a->literal_, out->literal_, NUM_LITERAL_CODES);
|
|
||||||
AddVectorEq_SSE2(a->red_, out->red_, NUM_LITERAL_CODES);
|
|
||||||
AddVectorEq_SSE2(a->blue_, out->blue_, NUM_LITERAL_CODES);
|
|
||||||
AddVectorEq_SSE2(a->alpha_, out->alpha_, NUM_LITERAL_CODES);
|
|
||||||
}
|
|
||||||
for (i = NUM_LITERAL_CODES; i < literal_size; ++i) {
|
|
||||||
out->literal_[i] = a->literal_[i] + b->literal_[i];
|
|
||||||
}
|
|
||||||
for (i = 0; i < NUM_DISTANCE_CODES; ++i) {
|
|
||||||
out->distance_[i] = a->distance_[i] + b->distance_[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Entropy
|
// Entropy
|
||||||
|
|
||||||
@ -675,7 +654,8 @@ WEBP_TSAN_IGNORE_FUNCTION void VP8LEncDspInitSSE2(void) {
|
|||||||
VP8LTransformColor = TransformColor_SSE2;
|
VP8LTransformColor = TransformColor_SSE2;
|
||||||
VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_SSE2;
|
VP8LCollectColorBlueTransforms = CollectColorBlueTransforms_SSE2;
|
||||||
VP8LCollectColorRedTransforms = CollectColorRedTransforms_SSE2;
|
VP8LCollectColorRedTransforms = CollectColorRedTransforms_SSE2;
|
||||||
VP8LHistogramAdd = HistogramAdd_SSE2;
|
VP8LAddVector = AddVector_SSE2;
|
||||||
|
VP8LAddVectorEq = AddVectorEq_SSE2;
|
||||||
VP8LCombinedShannonEntropy = CombinedShannonEntropy_SSE2;
|
VP8LCombinedShannonEntropy = CombinedShannonEntropy_SSE2;
|
||||||
VP8LVectorMismatch = VectorMismatch_SSE2;
|
VP8LVectorMismatch = VectorMismatch_SSE2;
|
||||||
VP8LBundleColorMap = BundleColorMap_SSE2;
|
VP8LBundleColorMap = BundleColorMap_SSE2;
|
||||||
|
2
3rdparty/libwebp/src/dsp/msa_macro.h
vendored
2
3rdparty/libwebp/src/dsp/msa_macro.h
vendored
@ -1389,4 +1389,4 @@ static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) {
|
|||||||
} while (0)
|
} while (0)
|
||||||
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
|
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
|
||||||
|
|
||||||
#endif /* WEBP_DSP_MSA_MACRO_H_ */
|
#endif // WEBP_DSP_MSA_MACRO_H_
|
||||||
|
70
3rdparty/libwebp/src/dsp/quant.h
vendored
Normal file
70
3rdparty/libwebp/src/dsp/quant.h
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Use of this source code is governed by a BSD-style license
|
||||||
|
// that can be found in the COPYING file in the root of the source
|
||||||
|
// tree. An additional intellectual property rights grant can be found
|
||||||
|
// in the file PATENTS. All contributing project authors may
|
||||||
|
// be found in the AUTHORS file in the root of the source tree.
|
||||||
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef WEBP_DSP_QUANT_H_
|
||||||
|
#define WEBP_DSP_QUANT_H_
|
||||||
|
|
||||||
|
#include "src/dsp/dsp.h"
|
||||||
|
#include "src/webp/types.h"
|
||||||
|
|
||||||
|
#if defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) && \
|
||||||
|
!defined(WEBP_HAVE_NEON_RTCD)
|
||||||
|
#include <arm_neon.h>
|
||||||
|
|
||||||
|
#define IsFlat IsFlat_NEON
|
||||||
|
|
||||||
|
static uint32x2_t horizontal_add_uint32x4(const uint32x4_t a) {
|
||||||
|
const uint64x2_t b = vpaddlq_u32(a);
|
||||||
|
return vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
|
||||||
|
vreinterpret_u32_u64(vget_high_u64(b)));
|
||||||
|
}
|
||||||
|
|
||||||
|
static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
|
||||||
|
int thresh) {
|
||||||
|
const int16x8_t tst_ones = vdupq_n_s16(-1);
|
||||||
|
uint32x4_t sum = vdupq_n_u32(0);
|
||||||
|
|
||||||
|
for (int i = 0; i < num_blocks; ++i) {
|
||||||
|
// Set DC to zero.
|
||||||
|
const int16x8_t a_0 = vsetq_lane_s16(0, vld1q_s16(levels), 0);
|
||||||
|
const int16x8_t a_1 = vld1q_s16(levels + 8);
|
||||||
|
|
||||||
|
const uint16x8_t b_0 = vshrq_n_u16(vtstq_s16(a_0, tst_ones), 15);
|
||||||
|
const uint16x8_t b_1 = vshrq_n_u16(vtstq_s16(a_1, tst_ones), 15);
|
||||||
|
|
||||||
|
sum = vpadalq_u16(sum, b_0);
|
||||||
|
sum = vpadalq_u16(sum, b_1);
|
||||||
|
|
||||||
|
levels += 16;
|
||||||
|
}
|
||||||
|
return thresh >= (int32_t)vget_lane_u32(horizontal_add_uint32x4(sum), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define IsFlat IsFlat_C
|
||||||
|
|
||||||
|
static WEBP_INLINE int IsFlat(const int16_t* levels, int num_blocks,
|
||||||
|
int thresh) {
|
||||||
|
int score = 0;
|
||||||
|
while (num_blocks-- > 0) { // TODO(skal): refine positional scoring?
|
||||||
|
int i;
|
||||||
|
for (i = 1; i < 16; ++i) { // omit DC, we're only interested in AC
|
||||||
|
score += (levels[i] != 0);
|
||||||
|
if (score > thresh) return 0;
|
||||||
|
}
|
||||||
|
levels += 16;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif // defined(WEBP_USE_NEON) && !defined(WEBP_ANDROID_NEON) &&
|
||||||
|
// !defined(WEBP_HAVE_NEON_RTCD)
|
||||||
|
|
||||||
|
#endif // WEBP_DSP_QUANT_H_
|
4
3rdparty/libwebp/src/dsp/rescaler.c
vendored
4
3rdparty/libwebp/src/dsp/rescaler.c
vendored
@ -21,6 +21,7 @@
|
|||||||
|
|
||||||
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
||||||
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
||||||
|
#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Row import
|
// Row import
|
||||||
@ -138,7 +139,7 @@ void WebPRescalerExportRowShrink_C(WebPRescaler* const wrk) {
|
|||||||
if (yscale) {
|
if (yscale) {
|
||||||
for (x_out = 0; x_out < x_out_max; ++x_out) {
|
for (x_out = 0; x_out < x_out_max; ++x_out) {
|
||||||
const uint32_t frac = (uint32_t)MULT_FIX(frow[x_out], yscale);
|
const uint32_t frac = (uint32_t)MULT_FIX(frow[x_out], yscale);
|
||||||
const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
dst[x_out] = v;
|
dst[x_out] = v;
|
||||||
irow[x_out] = frac; // new fractional start
|
irow[x_out] = frac; // new fractional start
|
||||||
@ -153,6 +154,7 @@ void WebPRescalerExportRowShrink_C(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef MULT_FIX_FLOOR
|
||||||
#undef MULT_FIX
|
#undef MULT_FIX
|
||||||
#undef ROUNDER
|
#undef ROUNDER
|
||||||
|
|
||||||
|
4
3rdparty/libwebp/src/dsp/rescaler_mips32.c
vendored
4
3rdparty/libwebp/src/dsp/rescaler_mips32.c
vendored
@ -209,6 +209,7 @@ static void ExportRowExpand_MIPS32(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0 // disabled for now. TODO(skal): make match the C-code
|
||||||
static void ExportRowShrink_MIPS32(WebPRescaler* const wrk) {
|
static void ExportRowShrink_MIPS32(WebPRescaler* const wrk) {
|
||||||
const int x_out_max = wrk->dst_width * wrk->num_channels;
|
const int x_out_max = wrk->dst_width * wrk->num_channels;
|
||||||
uint8_t* dst = wrk->dst;
|
uint8_t* dst = wrk->dst;
|
||||||
@ -273,6 +274,7 @@ static void ExportRowShrink_MIPS32(WebPRescaler* const wrk) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif // 0
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Entry point
|
// Entry point
|
||||||
@ -283,7 +285,7 @@ WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMIPS32(void) {
|
|||||||
WebPRescalerImportRowExpand = ImportRowExpand_MIPS32;
|
WebPRescalerImportRowExpand = ImportRowExpand_MIPS32;
|
||||||
WebPRescalerImportRowShrink = ImportRowShrink_MIPS32;
|
WebPRescalerImportRowShrink = ImportRowShrink_MIPS32;
|
||||||
WebPRescalerExportRowExpand = ExportRowExpand_MIPS32;
|
WebPRescalerExportRowExpand = ExportRowExpand_MIPS32;
|
||||||
WebPRescalerExportRowShrink = ExportRowShrink_MIPS32;
|
// WebPRescalerExportRowShrink = ExportRowShrink_MIPS32;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // !WEBP_USE_MIPS32
|
#else // !WEBP_USE_MIPS32
|
||||||
|
10
3rdparty/libwebp/src/dsp/rescaler_mips_dsp_r2.c
vendored
10
3rdparty/libwebp/src/dsp/rescaler_mips_dsp_r2.c
vendored
@ -20,10 +20,12 @@
|
|||||||
|
|
||||||
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
||||||
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
||||||
|
#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Row export
|
// Row export
|
||||||
|
|
||||||
|
#if 0 // disabled for now. TODO(skal): make match the C-code
|
||||||
static void ExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) {
|
static void ExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) {
|
||||||
int i;
|
int i;
|
||||||
const int x_out_max = wrk->dst_width * wrk->num_channels;
|
const int x_out_max = wrk->dst_width * wrk->num_channels;
|
||||||
@ -106,7 +108,7 @@ static void ExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
for (i = 0; i < (x_out_max & 0x3); ++i) {
|
for (i = 0; i < (x_out_max & 0x3); ++i) {
|
||||||
const uint32_t frac = (uint32_t)MULT_FIX(*frow++, yscale);
|
const uint32_t frac = (uint32_t)MULT_FIX(*frow++, yscale);
|
||||||
const int v = (int)MULT_FIX(*irow - frac, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR(*irow - frac, wrk->fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
*dst++ = v;
|
*dst++ = v;
|
||||||
*irow++ = frac; // new fractional start
|
*irow++ = frac; // new fractional start
|
||||||
@ -154,13 +156,14 @@ static void ExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
for (i = 0; i < (x_out_max & 0x3); ++i) {
|
for (i = 0; i < (x_out_max & 0x3); ++i) {
|
||||||
const int v = (int)MULT_FIX(*irow, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR(*irow, wrk->fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
*dst++ = v;
|
*dst++ = v;
|
||||||
*irow++ = 0;
|
*irow++ = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif // 0
|
||||||
|
|
||||||
static void ExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) {
|
static void ExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) {
|
||||||
int i;
|
int i;
|
||||||
@ -294,6 +297,7 @@ static void ExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef MULT_FIX_FLOOR
|
||||||
#undef MULT_FIX
|
#undef MULT_FIX
|
||||||
#undef ROUNDER
|
#undef ROUNDER
|
||||||
|
|
||||||
@ -304,7 +308,7 @@ extern void WebPRescalerDspInitMIPSdspR2(void);
|
|||||||
|
|
||||||
WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMIPSdspR2(void) {
|
WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMIPSdspR2(void) {
|
||||||
WebPRescalerExportRowExpand = ExportRowExpand_MIPSdspR2;
|
WebPRescalerExportRowExpand = ExportRowExpand_MIPSdspR2;
|
||||||
WebPRescalerExportRowShrink = ExportRowShrink_MIPSdspR2;
|
// WebPRescalerExportRowShrink = ExportRowShrink_MIPSdspR2;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // !WEBP_USE_MIPS_DSP_R2
|
#else // !WEBP_USE_MIPS_DSP_R2
|
||||||
|
7
3rdparty/libwebp/src/dsp/rescaler_msa.c
vendored
7
3rdparty/libwebp/src/dsp/rescaler_msa.c
vendored
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
||||||
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
||||||
|
#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
|
||||||
|
|
||||||
#define CALC_MULT_FIX_16(in0, in1, in2, in3, scale, shift, dst) do { \
|
#define CALC_MULT_FIX_16(in0, in1, in2, in3, scale, shift, dst) do { \
|
||||||
v4u32 tmp0, tmp1, tmp2, tmp3; \
|
v4u32 tmp0, tmp1, tmp2, tmp3; \
|
||||||
@ -262,6 +263,7 @@ static void RescalerExportRowExpand_MIPSdspR2(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if 0 // disabled for now. TODO(skal): make match the C-code
|
||||||
static WEBP_INLINE void ExportRowShrink_0(const uint32_t* frow, uint32_t* irow,
|
static WEBP_INLINE void ExportRowShrink_0(const uint32_t* frow, uint32_t* irow,
|
||||||
uint8_t* dst, int length,
|
uint8_t* dst, int length,
|
||||||
const uint32_t yscale,
|
const uint32_t yscale,
|
||||||
@ -341,7 +343,7 @@ static WEBP_INLINE void ExportRowShrink_0(const uint32_t* frow, uint32_t* irow,
|
|||||||
}
|
}
|
||||||
for (x_out = 0; x_out < length; ++x_out) {
|
for (x_out = 0; x_out < length; ++x_out) {
|
||||||
const uint32_t frac = (uint32_t)MULT_FIX(frow[x_out], yscale);
|
const uint32_t frac = (uint32_t)MULT_FIX(frow[x_out], yscale);
|
||||||
const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
dst[x_out] = v;
|
dst[x_out] = v;
|
||||||
irow[x_out] = frac;
|
irow[x_out] = frac;
|
||||||
@ -426,6 +428,7 @@ static void RescalerExportRowShrink_MIPSdspR2(WebPRescaler* const wrk) {
|
|||||||
ExportRowShrink_1(irow, dst, x_out_max, wrk);
|
ExportRowShrink_1(irow, dst, x_out_max, wrk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif // 0
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Entry point
|
// Entry point
|
||||||
@ -434,7 +437,7 @@ extern void WebPRescalerDspInitMSA(void);
|
|||||||
|
|
||||||
WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMSA(void) {
|
WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitMSA(void) {
|
||||||
WebPRescalerExportRowExpand = RescalerExportRowExpand_MIPSdspR2;
|
WebPRescalerExportRowExpand = RescalerExportRowExpand_MIPSdspR2;
|
||||||
WebPRescalerExportRowShrink = RescalerExportRowShrink_MIPSdspR2;
|
// WebPRescalerExportRowShrink = RescalerExportRowShrink_MIPSdspR2;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // !WEBP_USE_MSA
|
#else // !WEBP_USE_MSA
|
||||||
|
18
3rdparty/libwebp/src/dsp/rescaler_neon.c
vendored
18
3rdparty/libwebp/src/dsp/rescaler_neon.c
vendored
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
||||||
#define MULT_FIX_C(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
#define MULT_FIX_C(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
||||||
|
#define MULT_FIX_FLOOR_C(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
|
||||||
|
|
||||||
#define LOAD_32x4(SRC, DST) const uint32x4_t DST = vld1q_u32((SRC))
|
#define LOAD_32x4(SRC, DST) const uint32x4_t DST = vld1q_u32((SRC))
|
||||||
#define LOAD_32x8(SRC, DST0, DST1) \
|
#define LOAD_32x8(SRC, DST0, DST1) \
|
||||||
@ -35,8 +36,11 @@
|
|||||||
|
|
||||||
#if (WEBP_RESCALER_RFIX == 32)
|
#if (WEBP_RESCALER_RFIX == 32)
|
||||||
#define MAKE_HALF_CST(C) vdupq_n_s32((int32_t)((C) >> 1))
|
#define MAKE_HALF_CST(C) vdupq_n_s32((int32_t)((C) >> 1))
|
||||||
#define MULT_FIX(A, B) /* note: B is actualy scale>>1. See MAKE_HALF_CST */ \
|
// note: B is actualy scale>>1. See MAKE_HALF_CST
|
||||||
|
#define MULT_FIX(A, B) \
|
||||||
vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
|
vreinterpretq_u32_s32(vqrdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
|
||||||
|
#define MULT_FIX_FLOOR(A, B) \
|
||||||
|
vreinterpretq_u32_s32(vqdmulhq_s32(vreinterpretq_s32_u32((A)), (B)))
|
||||||
#else
|
#else
|
||||||
#error "MULT_FIX/WEBP_RESCALER_RFIX need some more work"
|
#error "MULT_FIX/WEBP_RESCALER_RFIX need some more work"
|
||||||
#endif
|
#endif
|
||||||
@ -135,8 +139,8 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
|
|||||||
const uint32x4_t A1 = MULT_FIX(in1, yscale_half);
|
const uint32x4_t A1 = MULT_FIX(in1, yscale_half);
|
||||||
const uint32x4_t B0 = vqsubq_u32(in2, A0);
|
const uint32x4_t B0 = vqsubq_u32(in2, A0);
|
||||||
const uint32x4_t B1 = vqsubq_u32(in3, A1);
|
const uint32x4_t B1 = vqsubq_u32(in3, A1);
|
||||||
const uint32x4_t C0 = MULT_FIX(B0, fxy_scale_half);
|
const uint32x4_t C0 = MULT_FIX_FLOOR(B0, fxy_scale_half);
|
||||||
const uint32x4_t C1 = MULT_FIX(B1, fxy_scale_half);
|
const uint32x4_t C1 = MULT_FIX_FLOOR(B1, fxy_scale_half);
|
||||||
const uint16x4_t D0 = vmovn_u32(C0);
|
const uint16x4_t D0 = vmovn_u32(C0);
|
||||||
const uint16x4_t D1 = vmovn_u32(C1);
|
const uint16x4_t D1 = vmovn_u32(C1);
|
||||||
const uint8x8_t E = vmovn_u16(vcombine_u16(D0, D1));
|
const uint8x8_t E = vmovn_u16(vcombine_u16(D0, D1));
|
||||||
@ -145,7 +149,7 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
for (; x_out < x_out_max; ++x_out) {
|
for (; x_out < x_out_max; ++x_out) {
|
||||||
const uint32_t frac = (uint32_t)MULT_FIX_C(frow[x_out], yscale);
|
const uint32_t frac = (uint32_t)MULT_FIX_C(frow[x_out], yscale);
|
||||||
const int v = (int)MULT_FIX_C(irow[x_out] - frac, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR_C(irow[x_out] - frac, fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
dst[x_out] = v;
|
dst[x_out] = v;
|
||||||
irow[x_out] = frac; // new fractional start
|
irow[x_out] = frac; // new fractional start
|
||||||
@ -170,6 +174,12 @@ static void RescalerExportRowShrink_NEON(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef MULT_FIX_FLOOR_C
|
||||||
|
#undef MULT_FIX_C
|
||||||
|
#undef MULT_FIX_FLOOR
|
||||||
|
#undef MULT_FIX
|
||||||
|
#undef ROUNDER
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
||||||
extern void WebPRescalerDspInitNEON(void);
|
extern void WebPRescalerDspInitNEON(void);
|
||||||
|
35
3rdparty/libwebp/src/dsp/rescaler_sse2.c
vendored
35
3rdparty/libwebp/src/dsp/rescaler_sse2.c
vendored
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
#define ROUNDER (WEBP_RESCALER_ONE >> 1)
|
||||||
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
#define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
|
||||||
|
#define MULT_FIX_FLOOR(x, y) (((uint64_t)(x) * (y)) >> WEBP_RESCALER_RFIX)
|
||||||
|
|
||||||
// input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
|
// input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
|
||||||
static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) {
|
static void LoadTwoPixels_SSE2(const uint8_t* const src, __m128i* out) {
|
||||||
@ -224,6 +225,35 @@ static WEBP_INLINE void ProcessRow_SSE2(const __m128i* const A0,
|
|||||||
_mm_storel_epi64((__m128i*)dst, G);
|
_mm_storel_epi64((__m128i*)dst, G);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static WEBP_INLINE void ProcessRow_Floor_SSE2(const __m128i* const A0,
|
||||||
|
const __m128i* const A1,
|
||||||
|
const __m128i* const A2,
|
||||||
|
const __m128i* const A3,
|
||||||
|
const __m128i* const mult,
|
||||||
|
uint8_t* const dst) {
|
||||||
|
const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
|
||||||
|
const __m128i B0 = _mm_mul_epu32(*A0, *mult);
|
||||||
|
const __m128i B1 = _mm_mul_epu32(*A1, *mult);
|
||||||
|
const __m128i B2 = _mm_mul_epu32(*A2, *mult);
|
||||||
|
const __m128i B3 = _mm_mul_epu32(*A3, *mult);
|
||||||
|
const __m128i D0 = _mm_srli_epi64(B0, WEBP_RESCALER_RFIX);
|
||||||
|
const __m128i D1 = _mm_srli_epi64(B1, WEBP_RESCALER_RFIX);
|
||||||
|
#if (WEBP_RESCALER_RFIX < 32)
|
||||||
|
const __m128i D2 =
|
||||||
|
_mm_and_si128(_mm_slli_epi64(B2, 32 - WEBP_RESCALER_RFIX), mask);
|
||||||
|
const __m128i D3 =
|
||||||
|
_mm_and_si128(_mm_slli_epi64(B3, 32 - WEBP_RESCALER_RFIX), mask);
|
||||||
|
#else
|
||||||
|
const __m128i D2 = _mm_and_si128(B2, mask);
|
||||||
|
const __m128i D3 = _mm_and_si128(B3, mask);
|
||||||
|
#endif
|
||||||
|
const __m128i E0 = _mm_or_si128(D0, D2);
|
||||||
|
const __m128i E1 = _mm_or_si128(D1, D3);
|
||||||
|
const __m128i F = _mm_packs_epi32(E0, E1);
|
||||||
|
const __m128i G = _mm_packus_epi16(F, F);
|
||||||
|
_mm_storel_epi64((__m128i*)dst, G);
|
||||||
|
}
|
||||||
|
|
||||||
static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) {
|
static void RescalerExportRowExpand_SSE2(WebPRescaler* const wrk) {
|
||||||
int x_out;
|
int x_out;
|
||||||
uint8_t* const dst = wrk->dst;
|
uint8_t* const dst = wrk->dst;
|
||||||
@ -322,12 +352,12 @@ static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) {
|
|||||||
const __m128i G1 = _mm_or_si128(D1, F3);
|
const __m128i G1 = _mm_or_si128(D1, F3);
|
||||||
_mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
|
_mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
|
||||||
_mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
|
_mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
|
||||||
ProcessRow_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
|
ProcessRow_Floor_SSE2(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (; x_out < x_out_max; ++x_out) {
|
for (; x_out < x_out_max; ++x_out) {
|
||||||
const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
|
const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
|
||||||
const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
|
const int v = (int)MULT_FIX_FLOOR(irow[x_out] - frac, wrk->fxy_scale);
|
||||||
assert(v >= 0 && v <= 255);
|
assert(v >= 0 && v <= 255);
|
||||||
dst[x_out] = v;
|
dst[x_out] = v;
|
||||||
irow[x_out] = frac; // new fractional start
|
irow[x_out] = frac; // new fractional start
|
||||||
@ -352,6 +382,7 @@ static void RescalerExportRowShrink_SSE2(WebPRescaler* const wrk) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef MULT_FIX_FLOOR
|
||||||
#undef MULT_FIX
|
#undef MULT_FIX
|
||||||
#undef ROUNDER
|
#undef ROUNDER
|
||||||
|
|
||||||
|
2
3rdparty/libwebp/src/dsp/yuv.h
vendored
2
3rdparty/libwebp/src/dsp/yuv.h
vendored
@ -207,4 +207,4 @@ static WEBP_INLINE int VP8RGBToV(int r, int g, int b, int rounding) {
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_DSP_YUV_H_ */
|
#endif // WEBP_DSP_YUV_H_
|
||||||
|
2
3rdparty/libwebp/src/enc/analysis_enc.c
vendored
2
3rdparty/libwebp/src/enc/analysis_enc.c
vendored
@ -458,7 +458,7 @@ static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
|
|||||||
dst->uv_alpha += src->uv_alpha;
|
dst->uv_alpha += src->uv_alpha;
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize the job struct with some TODOs
|
// initialize the job struct with some tasks to perform
|
||||||
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
|
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
|
||||||
int start_row, int end_row) {
|
int start_row, int end_row) {
|
||||||
WebPGetWorkerInterface()->Init(&job->worker);
|
WebPGetWorkerInterface()->Init(&job->worker);
|
||||||
|
@ -67,7 +67,7 @@ static int CostModelBuild(CostModel* const m, int xsize, int cache_bits,
|
|||||||
|
|
||||||
// The following code is similar to VP8LHistogramCreate but converts the
|
// The following code is similar to VP8LHistogramCreate but converts the
|
||||||
// distance to plane code.
|
// distance to plane code.
|
||||||
VP8LHistogramInit(histo, cache_bits);
|
VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 1);
|
||||||
while (VP8LRefsCursorOk(&c)) {
|
while (VP8LRefsCursorOk(&c)) {
|
||||||
VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, VP8LDistanceToPlaneCode,
|
VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, VP8LDistanceToPlaneCode,
|
||||||
xsize);
|
xsize);
|
||||||
|
@ -715,6 +715,7 @@ static int CalculateBestCacheSize(const uint32_t* argb, int quality,
|
|||||||
for (i = 0; i <= cache_bits_max; ++i) {
|
for (i = 0; i <= cache_bits_max; ++i) {
|
||||||
histos[i] = VP8LAllocateHistogram(i);
|
histos[i] = VP8LAllocateHistogram(i);
|
||||||
if (histos[i] == NULL) goto Error;
|
if (histos[i] == NULL) goto Error;
|
||||||
|
VP8LHistogramInit(histos[i], i, /*init_arrays=*/ 1);
|
||||||
if (i == 0) continue;
|
if (i == 0) continue;
|
||||||
cc_init[i] = VP8LColorCacheInit(&hashers[i], i);
|
cc_init[i] = VP8LColorCacheInit(&hashers[i], i);
|
||||||
if (!cc_init[i]) goto Error;
|
if (!cc_init[i]) goto Error;
|
||||||
|
2
3rdparty/libwebp/src/enc/cost_enc.h
vendored
2
3rdparty/libwebp/src/enc/cost_enc.h
vendored
@ -79,4 +79,4 @@ extern const uint16_t VP8FixedCostsI4[NUM_BMODES][NUM_BMODES][NUM_BMODES];
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_ENC_COST_ENC_H_ */
|
#endif // WEBP_ENC_COST_ENC_H_
|
||||||
|
542
3rdparty/libwebp/src/enc/histogram_enc.c
vendored
542
3rdparty/libwebp/src/enc/histogram_enc.c
vendored
@ -51,10 +51,12 @@ static void HistogramCopy(const VP8LHistogram* const src,
|
|||||||
VP8LHistogram* const dst) {
|
VP8LHistogram* const dst) {
|
||||||
uint32_t* const dst_literal = dst->literal_;
|
uint32_t* const dst_literal = dst->literal_;
|
||||||
const int dst_cache_bits = dst->palette_code_bits_;
|
const int dst_cache_bits = dst->palette_code_bits_;
|
||||||
|
const int literal_size = VP8LHistogramNumCodes(dst_cache_bits);
|
||||||
const int histo_size = VP8LGetHistogramSize(dst_cache_bits);
|
const int histo_size = VP8LGetHistogramSize(dst_cache_bits);
|
||||||
assert(src->palette_code_bits_ == dst_cache_bits);
|
assert(src->palette_code_bits_ == dst_cache_bits);
|
||||||
memcpy(dst, src, histo_size);
|
memcpy(dst, src, histo_size);
|
||||||
dst->literal_ = dst_literal;
|
dst->literal_ = dst_literal;
|
||||||
|
memcpy(dst->literal_, src->literal_, literal_size * sizeof(*dst->literal_));
|
||||||
}
|
}
|
||||||
|
|
||||||
int VP8LGetHistogramSize(int cache_bits) {
|
int VP8LGetHistogramSize(int cache_bits) {
|
||||||
@ -91,9 +93,19 @@ void VP8LHistogramCreate(VP8LHistogram* const p,
|
|||||||
VP8LHistogramStoreRefs(refs, p);
|
VP8LHistogramStoreRefs(refs, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits) {
|
void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,
|
||||||
|
int init_arrays) {
|
||||||
p->palette_code_bits_ = palette_code_bits;
|
p->palette_code_bits_ = palette_code_bits;
|
||||||
|
if (init_arrays) {
|
||||||
HistogramClear(p);
|
HistogramClear(p);
|
||||||
|
} else {
|
||||||
|
p->trivial_symbol_ = 0;
|
||||||
|
p->bit_cost_ = 0.;
|
||||||
|
p->literal_cost_ = 0.;
|
||||||
|
p->red_cost_ = 0.;
|
||||||
|
p->blue_cost_ = 0.;
|
||||||
|
memset(p->is_used_, 0, sizeof(p->is_used_));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
|
VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
|
||||||
@ -104,37 +116,84 @@ VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
|
|||||||
histo = (VP8LHistogram*)memory;
|
histo = (VP8LHistogram*)memory;
|
||||||
// literal_ won't necessary be aligned.
|
// literal_ won't necessary be aligned.
|
||||||
histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
|
histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
|
||||||
VP8LHistogramInit(histo, cache_bits);
|
VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0);
|
||||||
return histo;
|
return histo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Resets the pointers of the histograms to point to the bit buffer in the set.
|
||||||
|
static void HistogramSetResetPointers(VP8LHistogramSet* const set,
|
||||||
|
int cache_bits) {
|
||||||
|
int i;
|
||||||
|
const int histo_size = VP8LGetHistogramSize(cache_bits);
|
||||||
|
uint8_t* memory = (uint8_t*) (set->histograms);
|
||||||
|
memory += set->max_size * sizeof(*set->histograms);
|
||||||
|
for (i = 0; i < set->max_size; ++i) {
|
||||||
|
memory = (uint8_t*) WEBP_ALIGN(memory);
|
||||||
|
set->histograms[i] = (VP8LHistogram*) memory;
|
||||||
|
// literal_ won't necessary be aligned.
|
||||||
|
set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
|
||||||
|
memory += histo_size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the total size of the VP8LHistogramSet.
|
||||||
|
static size_t HistogramSetTotalSize(int size, int cache_bits) {
|
||||||
|
const int histo_size = VP8LGetHistogramSize(cache_bits);
|
||||||
|
return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) +
|
||||||
|
histo_size + WEBP_ALIGN_CST));
|
||||||
|
}
|
||||||
|
|
||||||
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {
|
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {
|
||||||
int i;
|
int i;
|
||||||
VP8LHistogramSet* set;
|
VP8LHistogramSet* set;
|
||||||
const int histo_size = VP8LGetHistogramSize(cache_bits);
|
const size_t total_size = HistogramSetTotalSize(size, cache_bits);
|
||||||
const size_t total_size =
|
|
||||||
sizeof(*set) + size * (sizeof(*set->histograms) +
|
|
||||||
histo_size + WEBP_ALIGN_CST);
|
|
||||||
uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
|
uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
|
||||||
if (memory == NULL) return NULL;
|
if (memory == NULL) return NULL;
|
||||||
|
|
||||||
set = (VP8LHistogramSet*)memory;
|
set = (VP8LHistogramSet*)memory;
|
||||||
memory += sizeof(*set);
|
memory += sizeof(*set);
|
||||||
set->histograms = (VP8LHistogram**)memory;
|
set->histograms = (VP8LHistogram**)memory;
|
||||||
memory += size * sizeof(*set->histograms);
|
|
||||||
set->max_size = size;
|
set->max_size = size;
|
||||||
set->size = size;
|
set->size = size;
|
||||||
|
HistogramSetResetPointers(set, cache_bits);
|
||||||
for (i = 0; i < size; ++i) {
|
for (i = 0; i < size; ++i) {
|
||||||
memory = (uint8_t*)WEBP_ALIGN(memory);
|
VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0);
|
||||||
set->histograms[i] = (VP8LHistogram*)memory;
|
|
||||||
// literal_ won't necessary be aligned.
|
|
||||||
set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
|
|
||||||
VP8LHistogramInit(set->histograms[i], cache_bits);
|
|
||||||
memory += histo_size;
|
|
||||||
}
|
}
|
||||||
return set;
|
return set;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VP8LHistogramSetClear(VP8LHistogramSet* const set) {
|
||||||
|
int i;
|
||||||
|
const int cache_bits = set->histograms[0]->palette_code_bits_;
|
||||||
|
const int size = set->max_size;
|
||||||
|
const size_t total_size = HistogramSetTotalSize(size, cache_bits);
|
||||||
|
uint8_t* memory = (uint8_t*)set;
|
||||||
|
|
||||||
|
memset(memory, 0, total_size);
|
||||||
|
memory += sizeof(*set);
|
||||||
|
set->histograms = (VP8LHistogram**)memory;
|
||||||
|
set->max_size = size;
|
||||||
|
set->size = size;
|
||||||
|
HistogramSetResetPointers(set, cache_bits);
|
||||||
|
for (i = 0; i < size; ++i) {
|
||||||
|
set->histograms[i]->palette_code_bits_ = cache_bits;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Removes the histogram 'i' from 'set' by setting it to NULL.
|
||||||
|
static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i,
|
||||||
|
int* const num_used) {
|
||||||
|
assert(set->histograms[i] != NULL);
|
||||||
|
set->histograms[i] = NULL;
|
||||||
|
--*num_used;
|
||||||
|
// If we remove the last valid one, shrink until the next valid one.
|
||||||
|
if (i == set->size - 1) {
|
||||||
|
while (set->size >= 1 && set->histograms[set->size - 1] == NULL) {
|
||||||
|
--set->size;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
|
|
||||||
void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
|
void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
|
||||||
@ -237,7 +296,8 @@ static double FinalHuffmanCost(const VP8LStreaks* const stats) {
|
|||||||
// Get the symbol entropy for the distribution 'population'.
|
// Get the symbol entropy for the distribution 'population'.
|
||||||
// Set 'trivial_sym', if there's only one symbol present in the distribution.
|
// Set 'trivial_sym', if there's only one symbol present in the distribution.
|
||||||
static double PopulationCost(const uint32_t* const population, int length,
|
static double PopulationCost(const uint32_t* const population, int length,
|
||||||
uint32_t* const trivial_sym) {
|
uint32_t* const trivial_sym,
|
||||||
|
uint8_t* const is_used) {
|
||||||
VP8LBitEntropy bit_entropy;
|
VP8LBitEntropy bit_entropy;
|
||||||
VP8LStreaks stats;
|
VP8LStreaks stats;
|
||||||
VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
|
VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
|
||||||
@ -245,6 +305,8 @@ static double PopulationCost(const uint32_t* const population, int length,
|
|||||||
*trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code
|
*trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code
|
||||||
: VP8L_NON_TRIVIAL_SYM;
|
: VP8L_NON_TRIVIAL_SYM;
|
||||||
}
|
}
|
||||||
|
// The histogram is used if there is at least one non-zero streak.
|
||||||
|
*is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0);
|
||||||
|
|
||||||
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
|
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
|
||||||
}
|
}
|
||||||
@ -253,7 +315,9 @@ static double PopulationCost(const uint32_t* const population, int length,
|
|||||||
// non-zero: both the zero-th one, or both the last one.
|
// non-zero: both the zero-th one, or both the last one.
|
||||||
static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
|
static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
|
||||||
const uint32_t* const Y,
|
const uint32_t* const Y,
|
||||||
int length, int trivial_at_end) {
|
int length, int is_X_used,
|
||||||
|
int is_Y_used,
|
||||||
|
int trivial_at_end) {
|
||||||
VP8LStreaks stats;
|
VP8LStreaks stats;
|
||||||
if (trivial_at_end) {
|
if (trivial_at_end) {
|
||||||
// This configuration is due to palettization that transforms an indexed
|
// This configuration is due to palettization that transforms an indexed
|
||||||
@ -262,28 +326,43 @@ static WEBP_INLINE double GetCombinedEntropy(const uint32_t* const X,
|
|||||||
// Only FinalHuffmanCost needs to be evaluated.
|
// Only FinalHuffmanCost needs to be evaluated.
|
||||||
memset(&stats, 0, sizeof(stats));
|
memset(&stats, 0, sizeof(stats));
|
||||||
// Deal with the non-zero value at index 0 or length-1.
|
// Deal with the non-zero value at index 0 or length-1.
|
||||||
stats.streaks[1][0] += 1;
|
stats.streaks[1][0] = 1;
|
||||||
// Deal with the following/previous zero streak.
|
// Deal with the following/previous zero streak.
|
||||||
stats.counts[0] += 1;
|
stats.counts[0] = 1;
|
||||||
stats.streaks[0][1] += length - 1;
|
stats.streaks[0][1] = length - 1;
|
||||||
return FinalHuffmanCost(&stats);
|
return FinalHuffmanCost(&stats);
|
||||||
} else {
|
} else {
|
||||||
VP8LBitEntropy bit_entropy;
|
VP8LBitEntropy bit_entropy;
|
||||||
|
if (is_X_used) {
|
||||||
|
if (is_Y_used) {
|
||||||
VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
|
VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
|
||||||
|
} else {
|
||||||
|
VP8LGetEntropyUnrefined(X, length, &bit_entropy, &stats);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (is_Y_used) {
|
||||||
|
VP8LGetEntropyUnrefined(Y, length, &bit_entropy, &stats);
|
||||||
|
} else {
|
||||||
|
memset(&stats, 0, sizeof(stats));
|
||||||
|
stats.counts[0] = 1;
|
||||||
|
stats.streaks[0][length > 3] = length;
|
||||||
|
VP8LBitEntropyInit(&bit_entropy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
|
return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Estimates the Entropy + Huffman + other block overhead size cost.
|
// Estimates the Entropy + Huffman + other block overhead size cost.
|
||||||
double VP8LHistogramEstimateBits(const VP8LHistogram* const p) {
|
double VP8LHistogramEstimateBits(VP8LHistogram* const p) {
|
||||||
return
|
return
|
||||||
PopulationCost(
|
PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
|
||||||
p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_), NULL)
|
NULL, &p->is_used_[0])
|
||||||
+ PopulationCost(p->red_, NUM_LITERAL_CODES, NULL)
|
+ PopulationCost(p->red_, NUM_LITERAL_CODES, NULL, &p->is_used_[1])
|
||||||
+ PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL)
|
+ PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL, &p->is_used_[2])
|
||||||
+ PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL)
|
+ PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL, &p->is_used_[3])
|
||||||
+ PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL)
|
+ PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL, &p->is_used_[4])
|
||||||
+ VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES)
|
+ VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES)
|
||||||
+ VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES);
|
+ VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES);
|
||||||
}
|
}
|
||||||
@ -299,7 +378,8 @@ static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
|
|||||||
int trivial_at_end = 0;
|
int trivial_at_end = 0;
|
||||||
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
assert(a->palette_code_bits_ == b->palette_code_bits_);
|
||||||
*cost += GetCombinedEntropy(a->literal_, b->literal_,
|
*cost += GetCombinedEntropy(a->literal_, b->literal_,
|
||||||
VP8LHistogramNumCodes(palette_code_bits), 0);
|
VP8LHistogramNumCodes(palette_code_bits),
|
||||||
|
a->is_used_[0], b->is_used_[0], 0);
|
||||||
*cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,
|
*cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,
|
||||||
b->literal_ + NUM_LITERAL_CODES,
|
b->literal_ + NUM_LITERAL_CODES,
|
||||||
NUM_LENGTH_CODES);
|
NUM_LENGTH_CODES);
|
||||||
@ -319,19 +399,23 @@ static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*cost +=
|
*cost +=
|
||||||
GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, trivial_at_end);
|
GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, a->is_used_[1],
|
||||||
|
b->is_used_[1], trivial_at_end);
|
||||||
if (*cost > cost_threshold) return 0;
|
if (*cost > cost_threshold) return 0;
|
||||||
|
|
||||||
*cost +=
|
*cost +=
|
||||||
GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, trivial_at_end);
|
GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, a->is_used_[2],
|
||||||
if (*cost > cost_threshold) return 0;
|
b->is_used_[2], trivial_at_end);
|
||||||
|
|
||||||
*cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,
|
|
||||||
trivial_at_end);
|
|
||||||
if (*cost > cost_threshold) return 0;
|
if (*cost > cost_threshold) return 0;
|
||||||
|
|
||||||
*cost +=
|
*cost +=
|
||||||
GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES, 0);
|
GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,
|
||||||
|
a->is_used_[3], b->is_used_[3], trivial_at_end);
|
||||||
|
if (*cost > cost_threshold) return 0;
|
||||||
|
|
||||||
|
*cost +=
|
||||||
|
GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES,
|
||||||
|
a->is_used_[4], b->is_used_[4], 0);
|
||||||
*cost +=
|
*cost +=
|
||||||
VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES);
|
VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES);
|
||||||
if (*cost > cost_threshold) return 0;
|
if (*cost > cost_threshold) return 0;
|
||||||
@ -377,7 +461,9 @@ static double HistogramAddEval(const VP8LHistogram* const a,
|
|||||||
static double HistogramAddThresh(const VP8LHistogram* const a,
|
static double HistogramAddThresh(const VP8LHistogram* const a,
|
||||||
const VP8LHistogram* const b,
|
const VP8LHistogram* const b,
|
||||||
double cost_threshold) {
|
double cost_threshold) {
|
||||||
double cost = -a->bit_cost_;
|
double cost;
|
||||||
|
assert(a != NULL && b != NULL);
|
||||||
|
cost = -a->bit_cost_;
|
||||||
GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
|
GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
|
||||||
return cost;
|
return cost;
|
||||||
}
|
}
|
||||||
@ -419,16 +505,19 @@ static void UpdateDominantCostRange(
|
|||||||
static void UpdateHistogramCost(VP8LHistogram* const h) {
|
static void UpdateHistogramCost(VP8LHistogram* const h) {
|
||||||
uint32_t alpha_sym, red_sym, blue_sym;
|
uint32_t alpha_sym, red_sym, blue_sym;
|
||||||
const double alpha_cost =
|
const double alpha_cost =
|
||||||
PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym);
|
PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym,
|
||||||
|
&h->is_used_[3]);
|
||||||
const double distance_cost =
|
const double distance_cost =
|
||||||
PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL) +
|
PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
|
||||||
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
|
VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
|
||||||
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
|
const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
|
||||||
h->literal_cost_ = PopulationCost(h->literal_, num_codes, NULL) +
|
h->literal_cost_ =
|
||||||
VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES,
|
PopulationCost(h->literal_, num_codes, NULL, &h->is_used_[0]) +
|
||||||
NUM_LENGTH_CODES);
|
VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES);
|
||||||
h->red_cost_ = PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym);
|
h->red_cost_ =
|
||||||
h->blue_cost_ = PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym);
|
PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym, &h->is_used_[1]);
|
||||||
|
h->blue_cost_ =
|
||||||
|
PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym, &h->is_used_[2]);
|
||||||
h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +
|
h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +
|
||||||
alpha_cost + distance_cost;
|
alpha_cost + distance_cost;
|
||||||
if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {
|
if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {
|
||||||
@ -473,6 +562,7 @@ static void HistogramBuild(
|
|||||||
VP8LHistogram** const histograms = image_histo->histograms;
|
VP8LHistogram** const histograms = image_histo->histograms;
|
||||||
VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);
|
VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);
|
||||||
assert(histo_bits > 0);
|
assert(histo_bits > 0);
|
||||||
|
VP8LHistogramSetClear(image_histo);
|
||||||
while (VP8LRefsCursorOk(&c)) {
|
while (VP8LRefsCursorOk(&c)) {
|
||||||
const PixOrCopy* const v = c.cur_pos;
|
const PixOrCopy* const v = c.cur_pos;
|
||||||
const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);
|
const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);
|
||||||
@ -487,17 +577,37 @@ static void HistogramBuild(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copies the histograms and computes its bit_cost.
|
// Copies the histograms and computes its bit_cost.
|
||||||
static void HistogramCopyAndAnalyze(
|
static const uint16_t kInvalidHistogramSymbol = (uint16_t)(-1);
|
||||||
VP8LHistogramSet* const orig_histo, VP8LHistogramSet* const image_histo) {
|
static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo,
|
||||||
int i;
|
VP8LHistogramSet* const image_histo,
|
||||||
const int histo_size = orig_histo->size;
|
int* const num_used,
|
||||||
|
uint16_t* const histogram_symbols) {
|
||||||
|
int i, cluster_id;
|
||||||
|
int num_used_orig = *num_used;
|
||||||
VP8LHistogram** const orig_histograms = orig_histo->histograms;
|
VP8LHistogram** const orig_histograms = orig_histo->histograms;
|
||||||
VP8LHistogram** const histograms = image_histo->histograms;
|
VP8LHistogram** const histograms = image_histo->histograms;
|
||||||
for (i = 0; i < histo_size; ++i) {
|
assert(image_histo->max_size == orig_histo->max_size);
|
||||||
|
for (cluster_id = 0, i = 0; i < orig_histo->max_size; ++i) {
|
||||||
VP8LHistogram* const histo = orig_histograms[i];
|
VP8LHistogram* const histo = orig_histograms[i];
|
||||||
UpdateHistogramCost(histo);
|
UpdateHistogramCost(histo);
|
||||||
|
|
||||||
|
// Skip the histogram if it is completely empty, which can happen for tiles
|
||||||
|
// with no information (when they are skipped because of LZ77).
|
||||||
|
if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2]
|
||||||
|
&& !histo->is_used_[3] && !histo->is_used_[4]) {
|
||||||
|
// The first histogram is always used. If an histogram is empty, we set
|
||||||
|
// its id to be the same as the previous one: this will improve
|
||||||
|
// compressibility for later LZ77.
|
||||||
|
assert(i > 0);
|
||||||
|
HistogramSetRemoveHistogram(image_histo, i, num_used);
|
||||||
|
HistogramSetRemoveHistogram(orig_histo, i, &num_used_orig);
|
||||||
|
histogram_symbols[i] = kInvalidHistogramSymbol;
|
||||||
|
} else {
|
||||||
// Copy histograms from orig_histo[] to image_histo[].
|
// Copy histograms from orig_histo[] to image_histo[].
|
||||||
HistogramCopy(histo, histograms[i]);
|
HistogramCopy(histo, histograms[i]);
|
||||||
|
histogram_symbols[i] = cluster_id++;
|
||||||
|
assert(cluster_id <= image_histo->max_size);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -514,29 +624,33 @@ static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
|
|||||||
|
|
||||||
// Analyze the dominant (literal, red and blue) entropy costs.
|
// Analyze the dominant (literal, red and blue) entropy costs.
|
||||||
for (i = 0; i < histo_size; ++i) {
|
for (i = 0; i < histo_size; ++i) {
|
||||||
|
if (histograms[i] == NULL) continue;
|
||||||
UpdateDominantCostRange(histograms[i], &cost_range);
|
UpdateDominantCostRange(histograms[i], &cost_range);
|
||||||
}
|
}
|
||||||
|
|
||||||
// bin-hash histograms on three of the dominant (literal, red and blue)
|
// bin-hash histograms on three of the dominant (literal, red and blue)
|
||||||
// symbol costs and store the resulting bin_id for each histogram.
|
// symbol costs and store the resulting bin_id for each histogram.
|
||||||
for (i = 0; i < histo_size; ++i) {
|
for (i = 0; i < histo_size; ++i) {
|
||||||
|
// bin_map[i] is not set to a special value as its use will later be guarded
|
||||||
|
// by another (histograms[i] == NULL).
|
||||||
|
if (histograms[i] == NULL) continue;
|
||||||
bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort);
|
bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compact image_histo[] by merging some histograms with same bin_id together if
|
// Merges some histograms with same bin_id together if it's advantageous.
|
||||||
// it's advantageous.
|
// Sets the remaining histograms to NULL.
|
||||||
static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
|
static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
|
||||||
|
int *num_used,
|
||||||
|
const uint16_t* const clusters,
|
||||||
|
uint16_t* const cluster_mappings,
|
||||||
VP8LHistogram* cur_combo,
|
VP8LHistogram* cur_combo,
|
||||||
const uint16_t* const bin_map,
|
const uint16_t* const bin_map,
|
||||||
int bin_map_size, int num_bins,
|
int num_bins,
|
||||||
double combine_cost_factor,
|
double combine_cost_factor,
|
||||||
int low_effort) {
|
int low_effort) {
|
||||||
VP8LHistogram** const histograms = image_histo->histograms;
|
VP8LHistogram** const histograms = image_histo->histograms;
|
||||||
int idx;
|
int idx;
|
||||||
// Work in-place: processed histograms are put at the beginning of
|
|
||||||
// image_histo[]. At the end, we just have to truncate the array.
|
|
||||||
int size = 0;
|
|
||||||
struct {
|
struct {
|
||||||
int16_t first; // position of the histogram that accumulates all
|
int16_t first; // position of the histogram that accumulates all
|
||||||
// histograms with the same bin_id
|
// histograms with the same bin_id
|
||||||
@ -549,16 +663,19 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
|
|||||||
bin_info[idx].num_combine_failures = 0;
|
bin_info[idx].num_combine_failures = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (idx = 0; idx < bin_map_size; ++idx) {
|
// By default, a cluster matches itself.
|
||||||
const int bin_id = bin_map[idx];
|
for (idx = 0; idx < *num_used; ++idx) cluster_mappings[idx] = idx;
|
||||||
const int first = bin_info[bin_id].first;
|
for (idx = 0; idx < image_histo->size; ++idx) {
|
||||||
assert(size <= idx);
|
int bin_id, first;
|
||||||
|
if (histograms[idx] == NULL) continue;
|
||||||
|
bin_id = bin_map[idx];
|
||||||
|
first = bin_info[bin_id].first;
|
||||||
if (first == -1) {
|
if (first == -1) {
|
||||||
// just move histogram #idx to its final position
|
bin_info[bin_id].first = idx;
|
||||||
histograms[size] = histograms[idx];
|
|
||||||
bin_info[bin_id].first = size++;
|
|
||||||
} else if (low_effort) {
|
} else if (low_effort) {
|
||||||
HistogramAdd(histograms[idx], histograms[first], histograms[first]);
|
HistogramAdd(histograms[idx], histograms[first], histograms[first]);
|
||||||
|
HistogramSetRemoveHistogram(image_histo, idx, num_used);
|
||||||
|
cluster_mappings[clusters[idx]] = clusters[first];
|
||||||
} else {
|
} else {
|
||||||
// try to merge #idx into #first (both share the same bin_id)
|
// try to merge #idx into #first (both share the same bin_id)
|
||||||
const double bit_cost = histograms[idx]->bit_cost_;
|
const double bit_cost = histograms[idx]->bit_cost_;
|
||||||
@ -581,19 +698,18 @@ static void HistogramCombineEntropyBin(VP8LHistogramSet* const image_histo,
|
|||||||
bin_info[bin_id].num_combine_failures >= max_combine_failures) {
|
bin_info[bin_id].num_combine_failures >= max_combine_failures) {
|
||||||
// move the (better) merged histogram to its final slot
|
// move the (better) merged histogram to its final slot
|
||||||
HistogramSwap(&cur_combo, &histograms[first]);
|
HistogramSwap(&cur_combo, &histograms[first]);
|
||||||
|
HistogramSetRemoveHistogram(image_histo, idx, num_used);
|
||||||
|
cluster_mappings[clusters[idx]] = clusters[first];
|
||||||
} else {
|
} else {
|
||||||
histograms[size++] = histograms[idx];
|
|
||||||
++bin_info[bin_id].num_combine_failures;
|
++bin_info[bin_id].num_combine_failures;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
histograms[size++] = histograms[idx];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
image_histo->size = size;
|
|
||||||
if (low_effort) {
|
if (low_effort) {
|
||||||
// for low_effort case, update the final cost when everything is merged
|
// for low_effort case, update the final cost when everything is merged
|
||||||
for (idx = 0; idx < size; ++idx) {
|
for (idx = 0; idx < image_histo->size; ++idx) {
|
||||||
|
if (histograms[idx] == NULL) continue;
|
||||||
UpdateHistogramCost(histograms[idx]);
|
UpdateHistogramCost(histograms[idx]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -624,16 +740,9 @@ typedef struct {
|
|||||||
int max_size;
|
int max_size;
|
||||||
} HistoQueue;
|
} HistoQueue;
|
||||||
|
|
||||||
static int HistoQueueInit(HistoQueue* const histo_queue, const int max_index) {
|
static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) {
|
||||||
histo_queue->size = 0;
|
histo_queue->size = 0;
|
||||||
// max_index^2 for the queue size is safe. If you look at
|
histo_queue->max_size = max_size;
|
||||||
// HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
|
|
||||||
// data to the queue, you insert at most:
|
|
||||||
// - max_index*(max_index-1)/2 (the first two for loops)
|
|
||||||
// - max_index - 1 in the last for loop at the first iteration of the while
|
|
||||||
// loop, max_index - 2 at the second iteration ... therefore
|
|
||||||
// max_index*(max_index-1)/2 overall too
|
|
||||||
histo_queue->max_size = max_index * max_index;
|
|
||||||
// We allocate max_size + 1 because the last element at index "size" is
|
// We allocate max_size + 1 because the last element at index "size" is
|
||||||
// used as temporary data (and it could be up to max_size).
|
// used as temporary data (and it could be up to max_size).
|
||||||
histo_queue->queue = (HistogramPair*)WebPSafeMalloc(
|
histo_queue->queue = (HistogramPair*)WebPSafeMalloc(
|
||||||
@ -674,6 +783,18 @@ static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update the cost diff and combo of a pair of histograms. This needs to be
|
||||||
|
// called when the the histograms have been merged with a third one.
|
||||||
|
static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
|
||||||
|
const VP8LHistogram* const h2,
|
||||||
|
double threshold,
|
||||||
|
HistogramPair* const pair) {
|
||||||
|
const double sum_cost = h1->bit_cost_ + h2->bit_cost_;
|
||||||
|
pair->cost_combo = 0.;
|
||||||
|
GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
|
||||||
|
pair->cost_diff = pair->cost_combo - sum_cost;
|
||||||
|
}
|
||||||
|
|
||||||
// Create a pair from indices "idx1" and "idx2" provided its cost
|
// Create a pair from indices "idx1" and "idx2" provided its cost
|
||||||
// is inferior to "threshold", a negative entropy.
|
// is inferior to "threshold", a negative entropy.
|
||||||
// It returns the cost of the pair, or 0. if it superior to threshold.
|
// It returns the cost of the pair, or 0. if it superior to threshold.
|
||||||
@ -683,8 +804,9 @@ static double HistoQueuePush(HistoQueue* const histo_queue,
|
|||||||
const VP8LHistogram* h1;
|
const VP8LHistogram* h1;
|
||||||
const VP8LHistogram* h2;
|
const VP8LHistogram* h2;
|
||||||
HistogramPair pair;
|
HistogramPair pair;
|
||||||
double sum_cost;
|
|
||||||
|
|
||||||
|
// Stop here if the queue is full.
|
||||||
|
if (histo_queue->size == histo_queue->max_size) return 0.;
|
||||||
assert(threshold <= 0.);
|
assert(threshold <= 0.);
|
||||||
if (idx1 > idx2) {
|
if (idx1 > idx2) {
|
||||||
const int tmp = idx2;
|
const int tmp = idx2;
|
||||||
@ -695,16 +817,12 @@ static double HistoQueuePush(HistoQueue* const histo_queue,
|
|||||||
pair.idx2 = idx2;
|
pair.idx2 = idx2;
|
||||||
h1 = histograms[idx1];
|
h1 = histograms[idx1];
|
||||||
h2 = histograms[idx2];
|
h2 = histograms[idx2];
|
||||||
sum_cost = h1->bit_cost_ + h2->bit_cost_;
|
|
||||||
pair.cost_combo = 0.;
|
HistoQueueUpdatePair(h1, h2, threshold, &pair);
|
||||||
GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair.cost_combo);
|
|
||||||
pair.cost_diff = pair.cost_combo - sum_cost;
|
|
||||||
|
|
||||||
// Do not even consider the pair if it does not improve the entropy.
|
// Do not even consider the pair if it does not improve the entropy.
|
||||||
if (pair.cost_diff >= threshold) return 0.;
|
if (pair.cost_diff >= threshold) return 0.;
|
||||||
|
|
||||||
// We cannot add more elements than the capacity.
|
|
||||||
assert(histo_queue->size < histo_queue->max_size);
|
|
||||||
histo_queue->queue[histo_queue->size++] = pair;
|
histo_queue->queue[histo_queue->size++] = pair;
|
||||||
HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);
|
HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);
|
||||||
|
|
||||||
@ -715,42 +833,43 @@ static double HistoQueuePush(HistoQueue* const histo_queue,
|
|||||||
|
|
||||||
// Combines histograms by continuously choosing the one with the highest cost
|
// Combines histograms by continuously choosing the one with the highest cost
|
||||||
// reduction.
|
// reduction.
|
||||||
static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
|
static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
|
||||||
|
int* const num_used) {
|
||||||
int ok = 0;
|
int ok = 0;
|
||||||
int image_histo_size = image_histo->size;
|
const int image_histo_size = image_histo->size;
|
||||||
int i, j;
|
int i, j;
|
||||||
VP8LHistogram** const histograms = image_histo->histograms;
|
VP8LHistogram** const histograms = image_histo->histograms;
|
||||||
// Indexes of remaining histograms.
|
|
||||||
int* const clusters =
|
|
||||||
(int*)WebPSafeMalloc(image_histo_size, sizeof(*clusters));
|
|
||||||
// Priority queue of histogram pairs.
|
// Priority queue of histogram pairs.
|
||||||
HistoQueue histo_queue;
|
HistoQueue histo_queue;
|
||||||
|
|
||||||
if (!HistoQueueInit(&histo_queue, image_histo_size) || clusters == NULL) {
|
// image_histo_size^2 for the queue size is safe. If you look at
|
||||||
|
// HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
|
||||||
|
// data to the queue, you insert at most:
|
||||||
|
// - image_histo_size*(image_histo_size-1)/2 (the first two for loops)
|
||||||
|
// - image_histo_size - 1 in the last for loop at the first iteration of
|
||||||
|
// the while loop, image_histo_size - 2 at the second iteration ...
|
||||||
|
// therefore image_histo_size*(image_histo_size-1)/2 overall too
|
||||||
|
if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) {
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < image_histo_size; ++i) {
|
for (i = 0; i < image_histo_size; ++i) {
|
||||||
// Initialize clusters indexes.
|
if (image_histo->histograms[i] == NULL) continue;
|
||||||
clusters[i] = i;
|
|
||||||
for (j = i + 1; j < image_histo_size; ++j) {
|
for (j = i + 1; j < image_histo_size; ++j) {
|
||||||
// Initialize positions array.
|
// Initialize queue.
|
||||||
|
if (image_histo->histograms[j] == NULL) continue;
|
||||||
HistoQueuePush(&histo_queue, histograms, i, j, 0.);
|
HistoQueuePush(&histo_queue, histograms, i, j, 0.);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
while (image_histo_size > 1 && histo_queue.size > 0) {
|
while (histo_queue.size > 0) {
|
||||||
const int idx1 = histo_queue.queue[0].idx1;
|
const int idx1 = histo_queue.queue[0].idx1;
|
||||||
const int idx2 = histo_queue.queue[0].idx2;
|
const int idx2 = histo_queue.queue[0].idx2;
|
||||||
HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
|
HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
|
||||||
histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
|
histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
|
||||||
|
|
||||||
// Remove merged histogram.
|
// Remove merged histogram.
|
||||||
for (i = 0; i + 1 < image_histo_size; ++i) {
|
HistogramSetRemoveHistogram(image_histo, idx2, num_used);
|
||||||
if (clusters[i] >= idx2) {
|
|
||||||
clusters[i] = clusters[i + 1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
--image_histo_size;
|
|
||||||
|
|
||||||
// Remove pairs intersecting the just combined best pair.
|
// Remove pairs intersecting the just combined best pair.
|
||||||
for (i = 0; i < histo_queue.size;) {
|
for (i = 0; i < histo_queue.size;) {
|
||||||
@ -765,24 +884,15 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Push new pairs formed with combined histogram to the queue.
|
// Push new pairs formed with combined histogram to the queue.
|
||||||
for (i = 0; i < image_histo_size; ++i) {
|
for (i = 0; i < image_histo->size; ++i) {
|
||||||
if (clusters[i] != idx1) {
|
if (i == idx1 || image_histo->histograms[i] == NULL) continue;
|
||||||
HistoQueuePush(&histo_queue, histograms, idx1, clusters[i], 0.);
|
HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0.);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Move remaining histograms to the beginning of the array.
|
|
||||||
for (i = 0; i < image_histo_size; ++i) {
|
|
||||||
if (i != clusters[i]) { // swap the two histograms
|
|
||||||
HistogramSwap(&histograms[i], &histograms[clusters[i]]);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
image_histo->size = image_histo_size;
|
|
||||||
ok = 1;
|
ok = 1;
|
||||||
|
|
||||||
End:
|
End:
|
||||||
WebPSafeFree(clusters);
|
|
||||||
HistoQueueClear(&histo_queue);
|
HistoQueueClear(&histo_queue);
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
@ -790,47 +900,69 @@ static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo) {
|
|||||||
// Perform histogram aggregation using a stochastic approach.
|
// Perform histogram aggregation using a stochastic approach.
|
||||||
// 'do_greedy' is set to 1 if a greedy approach needs to be performed
|
// 'do_greedy' is set to 1 if a greedy approach needs to be performed
|
||||||
// afterwards, 0 otherwise.
|
// afterwards, 0 otherwise.
|
||||||
|
static int PairComparison(const void* idx1, const void* idx2) {
|
||||||
|
// To be used with bsearch: <0 when *idx1<*idx2, >0 if >, 0 when ==.
|
||||||
|
return (*(int*) idx1 - *(int*) idx2);
|
||||||
|
}
|
||||||
static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
||||||
int min_cluster_size,
|
int* const num_used, int min_cluster_size,
|
||||||
int* const do_greedy) {
|
int* const do_greedy) {
|
||||||
int iter;
|
int j, iter;
|
||||||
uint32_t seed = 1;
|
uint32_t seed = 1;
|
||||||
int tries_with_no_success = 0;
|
int tries_with_no_success = 0;
|
||||||
int image_histo_size = image_histo->size;
|
const int outer_iters = *num_used;
|
||||||
const int outer_iters = image_histo_size;
|
|
||||||
const int num_tries_no_success = outer_iters / 2;
|
const int num_tries_no_success = outer_iters / 2;
|
||||||
VP8LHistogram** const histograms = image_histo->histograms;
|
VP8LHistogram** const histograms = image_histo->histograms;
|
||||||
// Priority queue of histogram pairs. Its size of "kCostHeapSizeSqrt"^2
|
// Priority queue of histogram pairs. Its size of 'kHistoQueueSize'
|
||||||
// impacts the quality of the compression and the speed: the smaller the
|
// impacts the quality of the compression and the speed: the smaller the
|
||||||
// faster but the worse for the compression.
|
// faster but the worse for the compression.
|
||||||
HistoQueue histo_queue;
|
HistoQueue histo_queue;
|
||||||
const int kHistoQueueSizeSqrt = 3;
|
const int kHistoQueueSize = 9;
|
||||||
int ok = 0;
|
int ok = 0;
|
||||||
|
// mapping from an index in image_histo with no NULL histogram to the full
|
||||||
|
// blown image_histo.
|
||||||
|
int* mappings;
|
||||||
|
|
||||||
if (!HistoQueueInit(&histo_queue, kHistoQueueSizeSqrt)) {
|
if (*num_used < min_cluster_size) {
|
||||||
|
*do_greedy = 1;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
mappings = (int*) WebPSafeMalloc(*num_used, sizeof(*mappings));
|
||||||
|
if (mappings == NULL || !HistoQueueInit(&histo_queue, kHistoQueueSize)) {
|
||||||
goto End;
|
goto End;
|
||||||
}
|
}
|
||||||
|
// Fill the initial mapping.
|
||||||
|
for (j = 0, iter = 0; iter < image_histo->size; ++iter) {
|
||||||
|
if (histograms[iter] == NULL) continue;
|
||||||
|
mappings[j++] = iter;
|
||||||
|
}
|
||||||
|
assert(j == *num_used);
|
||||||
|
|
||||||
// Collapse similar histograms in 'image_histo'.
|
// Collapse similar histograms in 'image_histo'.
|
||||||
++min_cluster_size;
|
for (iter = 0;
|
||||||
for (iter = 0; iter < outer_iters && image_histo_size >= min_cluster_size &&
|
iter < outer_iters && *num_used >= min_cluster_size &&
|
||||||
++tries_with_no_success < num_tries_no_success;
|
++tries_with_no_success < num_tries_no_success;
|
||||||
++iter) {
|
++iter) {
|
||||||
|
int* mapping_index;
|
||||||
double best_cost =
|
double best_cost =
|
||||||
(histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff;
|
(histo_queue.size == 0) ? 0. : histo_queue.queue[0].cost_diff;
|
||||||
int best_idx1 = -1, best_idx2 = 1;
|
int best_idx1 = -1, best_idx2 = 1;
|
||||||
int j;
|
const uint32_t rand_range = (*num_used - 1) * (*num_used);
|
||||||
const uint32_t rand_range = (image_histo_size - 1) * image_histo_size;
|
// (*num_used) / 2 was chosen empirically. Less means faster but worse
|
||||||
// image_histo_size / 2 was chosen empirically. Less means faster but worse
|
|
||||||
// compression.
|
// compression.
|
||||||
const int num_tries = image_histo_size / 2;
|
const int num_tries = (*num_used) / 2;
|
||||||
|
|
||||||
for (j = 0; j < num_tries; ++j) {
|
// Pick random samples.
|
||||||
|
for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
|
||||||
double curr_cost;
|
double curr_cost;
|
||||||
// Choose two different histograms at random and try to combine them.
|
// Choose two different histograms at random and try to combine them.
|
||||||
const uint32_t tmp = MyRand(&seed) % rand_range;
|
const uint32_t tmp = MyRand(&seed) % rand_range;
|
||||||
const uint32_t idx1 = tmp / (image_histo_size - 1);
|
uint32_t idx1 = tmp / (*num_used - 1);
|
||||||
uint32_t idx2 = tmp % (image_histo_size - 1);
|
uint32_t idx2 = tmp % (*num_used - 1);
|
||||||
if (idx2 >= idx1) ++idx2;
|
if (idx2 >= idx1) ++idx2;
|
||||||
|
idx1 = mappings[idx1];
|
||||||
|
idx2 = mappings[idx2];
|
||||||
|
|
||||||
// Calculate cost reduction on combination.
|
// Calculate cost reduction on combination.
|
||||||
curr_cost =
|
curr_cost =
|
||||||
@ -843,18 +975,21 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
|||||||
}
|
}
|
||||||
if (histo_queue.size == 0) continue;
|
if (histo_queue.size == 0) continue;
|
||||||
|
|
||||||
// Merge the two best histograms.
|
// Get the best histograms.
|
||||||
best_idx1 = histo_queue.queue[0].idx1;
|
best_idx1 = histo_queue.queue[0].idx1;
|
||||||
best_idx2 = histo_queue.queue[0].idx2;
|
best_idx2 = histo_queue.queue[0].idx2;
|
||||||
assert(best_idx1 < best_idx2);
|
assert(best_idx1 < best_idx2);
|
||||||
HistogramAddEval(histograms[best_idx1], histograms[best_idx2],
|
// Pop best_idx2 from mappings.
|
||||||
histograms[best_idx1], 0);
|
mapping_index = (int*) bsearch(&best_idx2, mappings, *num_used,
|
||||||
// Swap the best_idx2 histogram with the last one (which is now unused).
|
sizeof(best_idx2), &PairComparison);
|
||||||
--image_histo_size;
|
assert(mapping_index != NULL);
|
||||||
if (best_idx2 != image_histo_size) {
|
memmove(mapping_index, mapping_index + 1, sizeof(*mapping_index) *
|
||||||
HistogramSwap(&histograms[image_histo_size], &histograms[best_idx2]);
|
((*num_used) - (mapping_index - mappings) - 1));
|
||||||
}
|
// Merge the histograms and remove best_idx2 from the queue.
|
||||||
histograms[image_histo_size] = NULL;
|
HistogramAdd(histograms[best_idx2], histograms[best_idx1],
|
||||||
|
histograms[best_idx1]);
|
||||||
|
histograms[best_idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
|
||||||
|
HistogramSetRemoveHistogram(image_histo, best_idx2, num_used);
|
||||||
// Parse the queue and update each pair that deals with best_idx1,
|
// Parse the queue and update each pair that deals with best_idx1,
|
||||||
// best_idx2 or image_histo_size.
|
// best_idx2 or image_histo_size.
|
||||||
for (j = 0; j < histo_queue.size;) {
|
for (j = 0; j < histo_queue.size;) {
|
||||||
@ -877,12 +1012,6 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
|||||||
p->idx2 = best_idx1;
|
p->idx2 = best_idx1;
|
||||||
do_eval = 1;
|
do_eval = 1;
|
||||||
}
|
}
|
||||||
if (p->idx2 == image_histo_size) {
|
|
||||||
// No need to re-evaluate here as it does not involve a pair
|
|
||||||
// containing best_idx1 or best_idx2.
|
|
||||||
p->idx2 = best_idx2;
|
|
||||||
}
|
|
||||||
assert(p->idx2 < image_histo_size);
|
|
||||||
// Make sure the index order is respected.
|
// Make sure the index order is respected.
|
||||||
if (p->idx1 > p->idx2) {
|
if (p->idx1 > p->idx2) {
|
||||||
const int tmp = p->idx2;
|
const int tmp = p->idx2;
|
||||||
@ -891,8 +1020,7 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
|||||||
}
|
}
|
||||||
if (do_eval) {
|
if (do_eval) {
|
||||||
// Re-evaluate the cost of an updated pair.
|
// Re-evaluate the cost of an updated pair.
|
||||||
GetCombinedHistogramEntropy(histograms[p->idx1], histograms[p->idx2], 0,
|
HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0., p);
|
||||||
&p->cost_diff);
|
|
||||||
if (p->cost_diff >= 0.) {
|
if (p->cost_diff >= 0.) {
|
||||||
HistoQueuePopPair(&histo_queue, p);
|
HistoQueuePopPair(&histo_queue, p);
|
||||||
continue;
|
continue;
|
||||||
@ -901,15 +1029,14 @@ static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
|
|||||||
HistoQueueUpdateHead(&histo_queue, p);
|
HistoQueueUpdateHead(&histo_queue, p);
|
||||||
++j;
|
++j;
|
||||||
}
|
}
|
||||||
|
|
||||||
tries_with_no_success = 0;
|
tries_with_no_success = 0;
|
||||||
}
|
}
|
||||||
image_histo->size = image_histo_size;
|
*do_greedy = (*num_used <= min_cluster_size);
|
||||||
*do_greedy = (image_histo->size <= min_cluster_size);
|
|
||||||
ok = 1;
|
ok = 1;
|
||||||
|
|
||||||
End:
|
End:
|
||||||
HistoQueueClear(&histo_queue);
|
HistoQueueClear(&histo_queue);
|
||||||
|
WebPSafeFree(mappings);
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -917,23 +1044,29 @@ End:
|
|||||||
// Histogram refinement
|
// Histogram refinement
|
||||||
|
|
||||||
// Find the best 'out' histogram for each of the 'in' histograms.
|
// Find the best 'out' histogram for each of the 'in' histograms.
|
||||||
|
// At call-time, 'out' contains the histograms of the clusters.
|
||||||
// Note: we assume that out[]->bit_cost_ is already up-to-date.
|
// Note: we assume that out[]->bit_cost_ is already up-to-date.
|
||||||
static void HistogramRemap(const VP8LHistogramSet* const in,
|
static void HistogramRemap(const VP8LHistogramSet* const in,
|
||||||
const VP8LHistogramSet* const out,
|
VP8LHistogramSet* const out,
|
||||||
uint16_t* const symbols) {
|
uint16_t* const symbols) {
|
||||||
int i;
|
int i;
|
||||||
VP8LHistogram** const in_histo = in->histograms;
|
VP8LHistogram** const in_histo = in->histograms;
|
||||||
VP8LHistogram** const out_histo = out->histograms;
|
VP8LHistogram** const out_histo = out->histograms;
|
||||||
const int in_size = in->size;
|
const int in_size = out->max_size;
|
||||||
const int out_size = out->size;
|
const int out_size = out->size;
|
||||||
if (out_size > 1) {
|
if (out_size > 1) {
|
||||||
for (i = 0; i < in_size; ++i) {
|
for (i = 0; i < in_size; ++i) {
|
||||||
int best_out = 0;
|
int best_out = 0;
|
||||||
double best_bits = MAX_COST;
|
double best_bits = MAX_COST;
|
||||||
int k;
|
int k;
|
||||||
|
if (in_histo[i] == NULL) {
|
||||||
|
// Arbitrarily set to the previous value if unused to help future LZ77.
|
||||||
|
symbols[i] = symbols[i - 1];
|
||||||
|
continue;
|
||||||
|
}
|
||||||
for (k = 0; k < out_size; ++k) {
|
for (k = 0; k < out_size; ++k) {
|
||||||
const double cur_bits =
|
double cur_bits;
|
||||||
HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
|
cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
|
||||||
if (k == 0 || cur_bits < best_bits) {
|
if (k == 0 || cur_bits < best_bits) {
|
||||||
best_bits = cur_bits;
|
best_bits = cur_bits;
|
||||||
best_out = k;
|
best_out = k;
|
||||||
@ -949,12 +1082,13 @@ static void HistogramRemap(const VP8LHistogramSet* const in,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Recompute each out based on raw and symbols.
|
// Recompute each out based on raw and symbols.
|
||||||
for (i = 0; i < out_size; ++i) {
|
VP8LHistogramSetClear(out);
|
||||||
HistogramClear(out_histo[i]);
|
out->size = out_size;
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < in_size; ++i) {
|
for (i = 0; i < in_size; ++i) {
|
||||||
const int idx = symbols[i];
|
int idx;
|
||||||
|
if (in_histo[i] == NULL) continue;
|
||||||
|
idx = symbols[i];
|
||||||
HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);
|
HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -970,6 +1104,70 @@ static double GetCombineCostFactor(int histo_size, int quality) {
|
|||||||
return combine_cost_factor;
|
return combine_cost_factor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Given a HistogramSet 'set', the mapping of clusters 'cluster_mapping' and the
|
||||||
|
// current assignment of the cells in 'symbols', merge the clusters and
|
||||||
|
// assign the smallest possible clusters values.
|
||||||
|
static void OptimizeHistogramSymbols(const VP8LHistogramSet* const set,
|
||||||
|
uint16_t* const cluster_mappings,
|
||||||
|
int num_clusters,
|
||||||
|
uint16_t* const cluster_mappings_tmp,
|
||||||
|
uint16_t* const symbols) {
|
||||||
|
int i, cluster_max;
|
||||||
|
int do_continue = 1;
|
||||||
|
// First, assign the lowest cluster to each pixel.
|
||||||
|
while (do_continue) {
|
||||||
|
do_continue = 0;
|
||||||
|
for (i = 0; i < num_clusters; ++i) {
|
||||||
|
int k;
|
||||||
|
k = cluster_mappings[i];
|
||||||
|
while (k != cluster_mappings[k]) {
|
||||||
|
cluster_mappings[k] = cluster_mappings[cluster_mappings[k]];
|
||||||
|
k = cluster_mappings[k];
|
||||||
|
}
|
||||||
|
if (k != cluster_mappings[i]) {
|
||||||
|
do_continue = 1;
|
||||||
|
cluster_mappings[i] = k;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create a mapping from a cluster id to its minimal version.
|
||||||
|
cluster_max = 0;
|
||||||
|
memset(cluster_mappings_tmp, 0,
|
||||||
|
set->max_size * sizeof(*cluster_mappings_tmp));
|
||||||
|
assert(cluster_mappings[0] == 0);
|
||||||
|
// Re-map the ids.
|
||||||
|
for (i = 0; i < set->max_size; ++i) {
|
||||||
|
int cluster;
|
||||||
|
if (symbols[i] == kInvalidHistogramSymbol) continue;
|
||||||
|
cluster = cluster_mappings[symbols[i]];
|
||||||
|
assert(symbols[i] < num_clusters);
|
||||||
|
if (cluster > 0 && cluster_mappings_tmp[cluster] == 0) {
|
||||||
|
++cluster_max;
|
||||||
|
cluster_mappings_tmp[cluster] = cluster_max;
|
||||||
|
}
|
||||||
|
symbols[i] = cluster_mappings_tmp[cluster];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure all cluster values are used.
|
||||||
|
cluster_max = 0;
|
||||||
|
for (i = 0; i < set->max_size; ++i) {
|
||||||
|
if (symbols[i] == kInvalidHistogramSymbol) continue;
|
||||||
|
if (symbols[i] <= cluster_max) continue;
|
||||||
|
++cluster_max;
|
||||||
|
assert(symbols[i] == cluster_max);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {
|
||||||
|
uint32_t size;
|
||||||
|
int i;
|
||||||
|
for (i = 0, size = 0; i < image_histo->size; ++i) {
|
||||||
|
if (image_histo->histograms[i] == NULL) continue;
|
||||||
|
image_histo->histograms[size++] = image_histo->histograms[i];
|
||||||
|
}
|
||||||
|
image_histo->size = size;
|
||||||
|
}
|
||||||
|
|
||||||
int VP8LGetHistoImageSymbols(int xsize, int ysize,
|
int VP8LGetHistoImageSymbols(int xsize, int ysize,
|
||||||
const VP8LBackwardRefs* const refs,
|
const VP8LBackwardRefs* const refs,
|
||||||
int quality, int low_effort,
|
int quality, int low_effort,
|
||||||
@ -987,28 +1185,37 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
|
|||||||
// histograms of small sizes (as bin_map will be very sparse) and
|
// histograms of small sizes (as bin_map will be very sparse) and
|
||||||
// maximum quality q==100 (to preserve the compression gains at that level).
|
// maximum quality q==100 (to preserve the compression gains at that level).
|
||||||
const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;
|
const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;
|
||||||
const int entropy_combine =
|
int entropy_combine;
|
||||||
(orig_histo->size > entropy_combine_num_bins * 2) && (quality < 100);
|
uint16_t* const map_tmp =
|
||||||
|
WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp));
|
||||||
if (orig_histo == NULL) goto Error;
|
uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;
|
||||||
|
int num_used = image_histo_raw_size;
|
||||||
|
if (orig_histo == NULL || map_tmp == NULL) goto Error;
|
||||||
|
|
||||||
// Construct the histograms from backward references.
|
// Construct the histograms from backward references.
|
||||||
HistogramBuild(xsize, histo_bits, refs, orig_histo);
|
HistogramBuild(xsize, histo_bits, refs, orig_histo);
|
||||||
// Copies the histograms and computes its bit_cost.
|
// Copies the histograms and computes its bit_cost.
|
||||||
HistogramCopyAndAnalyze(orig_histo, image_histo);
|
// histogram_symbols is optimized
|
||||||
|
HistogramCopyAndAnalyze(orig_histo, image_histo, &num_used,
|
||||||
|
histogram_symbols);
|
||||||
|
|
||||||
|
entropy_combine =
|
||||||
|
(num_used > entropy_combine_num_bins * 2) && (quality < 100);
|
||||||
|
|
||||||
if (entropy_combine) {
|
if (entropy_combine) {
|
||||||
const int bin_map_size = orig_histo->size;
|
uint16_t* const bin_map = map_tmp;
|
||||||
// Reuse histogram_symbols storage. By definition, it's guaranteed to be ok.
|
|
||||||
uint16_t* const bin_map = histogram_symbols;
|
|
||||||
const double combine_cost_factor =
|
const double combine_cost_factor =
|
||||||
GetCombineCostFactor(image_histo_raw_size, quality);
|
GetCombineCostFactor(image_histo_raw_size, quality);
|
||||||
|
const uint32_t num_clusters = num_used;
|
||||||
|
|
||||||
HistogramAnalyzeEntropyBin(orig_histo, bin_map, low_effort);
|
HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
|
||||||
// Collapse histograms with similar entropy.
|
// Collapse histograms with similar entropy.
|
||||||
HistogramCombineEntropyBin(image_histo, tmp_histo, bin_map, bin_map_size,
|
HistogramCombineEntropyBin(image_histo, &num_used, histogram_symbols,
|
||||||
|
cluster_mappings, tmp_histo, bin_map,
|
||||||
entropy_combine_num_bins, combine_cost_factor,
|
entropy_combine_num_bins, combine_cost_factor,
|
||||||
low_effort);
|
low_effort);
|
||||||
|
OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,
|
||||||
|
map_tmp, histogram_symbols);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't combine the histograms using stochastic and greedy heuristics for
|
// Don't combine the histograms using stochastic and greedy heuristics for
|
||||||
@ -1018,21 +1225,26 @@ int VP8LGetHistoImageSymbols(int xsize, int ysize,
|
|||||||
// cubic ramp between 1 and MAX_HISTO_GREEDY:
|
// cubic ramp between 1 and MAX_HISTO_GREEDY:
|
||||||
const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1));
|
const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1));
|
||||||
int do_greedy;
|
int do_greedy;
|
||||||
if (!HistogramCombineStochastic(image_histo, threshold_size, &do_greedy)) {
|
if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,
|
||||||
|
&do_greedy)) {
|
||||||
goto Error;
|
goto Error;
|
||||||
}
|
}
|
||||||
if (do_greedy && !HistogramCombineGreedy(image_histo)) {
|
if (do_greedy) {
|
||||||
|
RemoveEmptyHistograms(image_histo);
|
||||||
|
if (!HistogramCombineGreedy(image_histo, &num_used)) {
|
||||||
goto Error;
|
goto Error;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(vrabaud): Optimize HistogramRemap for low-effort compression mode.
|
|
||||||
// Find the optimal map from original histograms to the final ones.
|
// Find the optimal map from original histograms to the final ones.
|
||||||
|
RemoveEmptyHistograms(image_histo);
|
||||||
HistogramRemap(orig_histo, image_histo, histogram_symbols);
|
HistogramRemap(orig_histo, image_histo, histogram_symbols);
|
||||||
|
|
||||||
ok = 1;
|
ok = 1;
|
||||||
|
|
||||||
Error:
|
Error:
|
||||||
VP8LFreeHistogramSet(orig_histo);
|
VP8LFreeHistogramSet(orig_histo);
|
||||||
|
WebPSafeFree(map_tmp);
|
||||||
return ok;
|
return ok;
|
||||||
}
|
}
|
||||||
|
10
3rdparty/libwebp/src/enc/histogram_enc.h
vendored
10
3rdparty/libwebp/src/enc/histogram_enc.h
vendored
@ -44,6 +44,7 @@ typedef struct {
|
|||||||
double literal_cost_; // Cached values of dominant entropy costs:
|
double literal_cost_; // Cached values of dominant entropy costs:
|
||||||
double red_cost_; // literal, red & blue.
|
double red_cost_; // literal, red & blue.
|
||||||
double blue_cost_;
|
double blue_cost_;
|
||||||
|
uint8_t is_used_[5]; // 5 for literal, red, blue, alpha, distance
|
||||||
} VP8LHistogram;
|
} VP8LHistogram;
|
||||||
|
|
||||||
// Collection of histograms with fixed capacity, allocated as one
|
// Collection of histograms with fixed capacity, allocated as one
|
||||||
@ -67,7 +68,9 @@ void VP8LHistogramCreate(VP8LHistogram* const p,
|
|||||||
int VP8LGetHistogramSize(int palette_code_bits);
|
int VP8LGetHistogramSize(int palette_code_bits);
|
||||||
|
|
||||||
// Set the palette_code_bits and reset the stats.
|
// Set the palette_code_bits and reset the stats.
|
||||||
void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits);
|
// If init_arrays is true, the arrays are also filled with 0's.
|
||||||
|
void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,
|
||||||
|
int init_arrays);
|
||||||
|
|
||||||
// Collect all the references into a histogram (without reset)
|
// Collect all the references into a histogram (without reset)
|
||||||
void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,
|
void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,
|
||||||
@ -83,6 +86,9 @@ void VP8LFreeHistogramSet(VP8LHistogramSet* const histo);
|
|||||||
// using 'cache_bits'. Return NULL in case of memory error.
|
// using 'cache_bits'. Return NULL in case of memory error.
|
||||||
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits);
|
VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits);
|
||||||
|
|
||||||
|
// Set the histograms in set to 0.
|
||||||
|
void VP8LHistogramSetClear(VP8LHistogramSet* const set);
|
||||||
|
|
||||||
// Allocate and initialize histogram object with specified 'cache_bits'.
|
// Allocate and initialize histogram object with specified 'cache_bits'.
|
||||||
// Returns NULL in case of memory error.
|
// Returns NULL in case of memory error.
|
||||||
// Special case of VP8LAllocateHistogramSet, with size equals 1.
|
// Special case of VP8LAllocateHistogramSet, with size equals 1.
|
||||||
@ -113,7 +119,7 @@ double VP8LBitsEntropy(const uint32_t* const array, int n);
|
|||||||
|
|
||||||
// Estimate how many bits the combined entropy of literals and distance
|
// Estimate how many bits the combined entropy of literals and distance
|
||||||
// approximately maps to.
|
// approximately maps to.
|
||||||
double VP8LHistogramEstimateBits(const VP8LHistogram* const p);
|
double VP8LHistogramEstimateBits(VP8LHistogram* const p);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
2
3rdparty/libwebp/src/enc/iterator_enc.c
vendored
2
3rdparty/libwebp/src/enc/iterator_enc.c
vendored
@ -128,7 +128,7 @@ static void ImportLine(const uint8_t* src, int src_stride,
|
|||||||
for (; i < total_len; ++i) dst[i] = dst[len - 1];
|
for (; i < total_len; ++i) dst[i] = dst[len - 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32) {
|
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32) {
|
||||||
const VP8Encoder* const enc = it->enc_;
|
const VP8Encoder* const enc = it->enc_;
|
||||||
const int x = it->x_, y = it->y_;
|
const int x = it->x_, y = it->y_;
|
||||||
const WebPPicture* const pic = enc->pic_;
|
const WebPPicture* const pic = enc->pic_;
|
||||||
|
41
3rdparty/libwebp/src/enc/picture_tools_enc.c
vendored
41
3rdparty/libwebp/src/enc/picture_tools_enc.c
vendored
@ -16,10 +16,6 @@
|
|||||||
#include "src/enc/vp8i_enc.h"
|
#include "src/enc/vp8i_enc.h"
|
||||||
#include "src/dsp/yuv.h"
|
#include "src/dsp/yuv.h"
|
||||||
|
|
||||||
static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
|
|
||||||
return (0xff000000u | (r << 16) | (g << 8) | b);
|
|
||||||
}
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Helper: clean up fully transparent area to help compressibility.
|
// Helper: clean up fully transparent area to help compressibility.
|
||||||
|
|
||||||
@ -195,6 +191,10 @@ void WebPCleanupTransparentAreaLossless(WebPPicture* const pic) {
|
|||||||
#define BLEND_10BIT(V0, V1, ALPHA) \
|
#define BLEND_10BIT(V0, V1, ALPHA) \
|
||||||
((((V0) * (1020 - (ALPHA)) + (V1) * (ALPHA)) * 0x101 + 1024) >> 18)
|
((((V0) * (1020 - (ALPHA)) + (V1) * (ALPHA)) * 0x101 + 1024) >> 18)
|
||||||
|
|
||||||
|
static WEBP_INLINE uint32_t MakeARGB32(int r, int g, int b) {
|
||||||
|
return (0xff000000u | (r << 16) | (g << 8) | b);
|
||||||
|
}
|
||||||
|
|
||||||
void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
|
void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
|
||||||
const int red = (background_rgb >> 16) & 0xff;
|
const int red = (background_rgb >> 16) & 0xff;
|
||||||
const int green = (background_rgb >> 8) & 0xff;
|
const int green = (background_rgb >> 8) & 0xff;
|
||||||
@ -208,39 +208,44 @@ void WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb) {
|
|||||||
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
|
const int U0 = VP8RGBToU(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
|
||||||
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
|
const int V0 = VP8RGBToV(4 * red, 4 * green, 4 * blue, 4 * YUV_HALF);
|
||||||
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
|
const int has_alpha = pic->colorspace & WEBP_CSP_ALPHA_BIT;
|
||||||
if (!has_alpha || pic->a == NULL) return; // nothing to do
|
uint8_t* y_ptr = pic->y;
|
||||||
|
uint8_t* u_ptr = pic->u;
|
||||||
|
uint8_t* v_ptr = pic->v;
|
||||||
|
uint8_t* a_ptr = pic->a;
|
||||||
|
if (!has_alpha || a_ptr == NULL) return; // nothing to do
|
||||||
for (y = 0; y < pic->height; ++y) {
|
for (y = 0; y < pic->height; ++y) {
|
||||||
// Luma blending
|
// Luma blending
|
||||||
uint8_t* const y_ptr = pic->y + y * pic->y_stride;
|
|
||||||
uint8_t* const a_ptr = pic->a + y * pic->a_stride;
|
|
||||||
for (x = 0; x < pic->width; ++x) {
|
for (x = 0; x < pic->width; ++x) {
|
||||||
const int alpha = a_ptr[x];
|
const uint8_t alpha = a_ptr[x];
|
||||||
if (alpha < 0xff) {
|
if (alpha < 0xff) {
|
||||||
y_ptr[x] = BLEND(Y0, y_ptr[x], a_ptr[x]);
|
y_ptr[x] = BLEND(Y0, y_ptr[x], alpha);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Chroma blending every even line
|
// Chroma blending every even line
|
||||||
if ((y & 1) == 0) {
|
if ((y & 1) == 0) {
|
||||||
uint8_t* const u = pic->u + (y >> 1) * pic->uv_stride;
|
|
||||||
uint8_t* const v = pic->v + (y >> 1) * pic->uv_stride;
|
|
||||||
uint8_t* const a_ptr2 =
|
uint8_t* const a_ptr2 =
|
||||||
(y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
|
(y + 1 == pic->height) ? a_ptr : a_ptr + pic->a_stride;
|
||||||
for (x = 0; x < uv_width; ++x) {
|
for (x = 0; x < uv_width; ++x) {
|
||||||
// Average four alpha values into a single blending weight.
|
// Average four alpha values into a single blending weight.
|
||||||
// TODO(skal): might lead to visible contouring. Can we do better?
|
// TODO(skal): might lead to visible contouring. Can we do better?
|
||||||
const int alpha =
|
const uint32_t alpha =
|
||||||
a_ptr[2 * x + 0] + a_ptr[2 * x + 1] +
|
a_ptr[2 * x + 0] + a_ptr[2 * x + 1] +
|
||||||
a_ptr2[2 * x + 0] + a_ptr2[2 * x + 1];
|
a_ptr2[2 * x + 0] + a_ptr2[2 * x + 1];
|
||||||
u[x] = BLEND_10BIT(U0, u[x], alpha);
|
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
|
||||||
v[x] = BLEND_10BIT(V0, v[x], alpha);
|
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
|
||||||
}
|
}
|
||||||
if (pic->width & 1) { // rightmost pixel
|
if (pic->width & 1) { // rightmost pixel
|
||||||
const int alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
|
const uint32_t alpha = 2 * (a_ptr[2 * x + 0] + a_ptr2[2 * x + 0]);
|
||||||
u[x] = BLEND_10BIT(U0, u[x], alpha);
|
u_ptr[x] = BLEND_10BIT(U0, u_ptr[x], alpha);
|
||||||
v[x] = BLEND_10BIT(V0, v[x], alpha);
|
v_ptr[x] = BLEND_10BIT(V0, v_ptr[x], alpha);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
u_ptr += pic->uv_stride;
|
||||||
|
v_ptr += pic->uv_stride;
|
||||||
}
|
}
|
||||||
memset(a_ptr, 0xff, pic->width);
|
memset(a_ptr, 0xff, pic->width); // reset alpha value to opaque
|
||||||
|
a_ptr += pic->a_stride;
|
||||||
|
y_ptr += pic->y_stride;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uint32_t* argb = pic->argb;
|
uint32_t* argb = pic->argb;
|
||||||
|
14
3rdparty/libwebp/src/enc/predictor_enc.c
vendored
14
3rdparty/libwebp/src/enc/predictor_enc.c
vendored
@ -177,12 +177,15 @@ static uint8_t NearLosslessComponent(uint8_t value, uint8_t predict,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static WEBP_INLINE uint8_t NearLosslessDiff(uint8_t a, uint8_t b) {
|
||||||
|
return (uint8_t)((((int)(a) - (int)(b))) & 0xff);
|
||||||
|
}
|
||||||
|
|
||||||
// Quantize every component of the difference between the actual pixel value and
|
// Quantize every component of the difference between the actual pixel value and
|
||||||
// its prediction to a multiple of a quantization (a power of 2, not larger than
|
// its prediction to a multiple of a quantization (a power of 2, not larger than
|
||||||
// max_quantization which is a power of 2, smaller than max_diff). Take care if
|
// max_quantization which is a power of 2, smaller than max_diff). Take care if
|
||||||
// value and predict have undergone subtract green, which means that red and
|
// value and predict have undergone subtract green, which means that red and
|
||||||
// blue are represented as offsets from green.
|
// blue are represented as offsets from green.
|
||||||
#define NEAR_LOSSLESS_DIFF(a, b) (uint8_t)((((int)(a) - (int)(b))) & 0xff)
|
|
||||||
static uint32_t NearLossless(uint32_t value, uint32_t predict,
|
static uint32_t NearLossless(uint32_t value, uint32_t predict,
|
||||||
int max_quantization, int max_diff,
|
int max_quantization, int max_diff,
|
||||||
int used_subtract_green) {
|
int used_subtract_green) {
|
||||||
@ -199,7 +202,7 @@ static uint32_t NearLossless(uint32_t value, uint32_t predict,
|
|||||||
}
|
}
|
||||||
if ((value >> 24) == 0 || (value >> 24) == 0xff) {
|
if ((value >> 24) == 0 || (value >> 24) == 0xff) {
|
||||||
// Preserve transparency of fully transparent or fully opaque pixels.
|
// Preserve transparency of fully transparent or fully opaque pixels.
|
||||||
a = NEAR_LOSSLESS_DIFF(value >> 24, predict >> 24);
|
a = NearLosslessDiff(value >> 24, predict >> 24);
|
||||||
} else {
|
} else {
|
||||||
a = NearLosslessComponent(value >> 24, predict >> 24, 0xff, quantization);
|
a = NearLosslessComponent(value >> 24, predict >> 24, 0xff, quantization);
|
||||||
}
|
}
|
||||||
@ -212,16 +215,15 @@ static uint32_t NearLossless(uint32_t value, uint32_t predict,
|
|||||||
// The amount by which green has been adjusted during quantization. It is
|
// The amount by which green has been adjusted during quantization. It is
|
||||||
// subtracted from red and blue for compensation, to avoid accumulating two
|
// subtracted from red and blue for compensation, to avoid accumulating two
|
||||||
// quantization errors in them.
|
// quantization errors in them.
|
||||||
green_diff = NEAR_LOSSLESS_DIFF(new_green, value >> 8);
|
green_diff = NearLosslessDiff(new_green, value >> 8);
|
||||||
}
|
}
|
||||||
r = NearLosslessComponent(NEAR_LOSSLESS_DIFF(value >> 16, green_diff),
|
r = NearLosslessComponent(NearLosslessDiff(value >> 16, green_diff),
|
||||||
(predict >> 16) & 0xff, 0xff - new_green,
|
(predict >> 16) & 0xff, 0xff - new_green,
|
||||||
quantization);
|
quantization);
|
||||||
b = NearLosslessComponent(NEAR_LOSSLESS_DIFF(value, green_diff),
|
b = NearLosslessComponent(NearLosslessDiff(value, green_diff),
|
||||||
predict & 0xff, 0xff - new_green, quantization);
|
predict & 0xff, 0xff - new_green, quantization);
|
||||||
return ((uint32_t)a << 24) | ((uint32_t)r << 16) | ((uint32_t)g << 8) | b;
|
return ((uint32_t)a << 24) | ((uint32_t)r << 16) | ((uint32_t)g << 8) | b;
|
||||||
}
|
}
|
||||||
#undef NEAR_LOSSLESS_DIFF
|
|
||||||
#endif // (WEBP_NEAR_LOSSLESS == 1)
|
#endif // (WEBP_NEAR_LOSSLESS == 1)
|
||||||
|
|
||||||
// Stores the difference between the pixel and its prediction in "out".
|
// Stores the difference between the pixel and its prediction in "out".
|
||||||
|
14
3rdparty/libwebp/src/enc/quant_enc.c
vendored
14
3rdparty/libwebp/src/enc/quant_enc.c
vendored
@ -15,6 +15,7 @@
|
|||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <stdlib.h> // for abs()
|
#include <stdlib.h> // for abs()
|
||||||
|
|
||||||
|
#include "src/dsp/quant.h"
|
||||||
#include "src/enc/vp8i_enc.h"
|
#include "src/enc/vp8i_enc.h"
|
||||||
#include "src/enc/cost_enc.h"
|
#include "src/enc/cost_enc.h"
|
||||||
|
|
||||||
@ -977,19 +978,6 @@ static void SwapOut(VP8EncIterator* const it) {
|
|||||||
SwapPtr(&it->yuv_out_, &it->yuv_out2_);
|
SwapPtr(&it->yuv_out_, &it->yuv_out2_);
|
||||||
}
|
}
|
||||||
|
|
||||||
static score_t IsFlat(const int16_t* levels, int num_blocks, score_t thresh) {
|
|
||||||
score_t score = 0;
|
|
||||||
while (num_blocks-- > 0) { // TODO(skal): refine positional scoring?
|
|
||||||
int i;
|
|
||||||
for (i = 1; i < 16; ++i) { // omit DC, we're only interested in AC
|
|
||||||
score += (levels[i] != 0);
|
|
||||||
if (score > thresh) return 0;
|
|
||||||
}
|
|
||||||
levels += 16;
|
|
||||||
}
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
|
static void PickBestIntra16(VP8EncIterator* const it, VP8ModeScore* rd) {
|
||||||
const int kNumBlocks = 16;
|
const int kNumBlocks = 16;
|
||||||
VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
|
VP8SegmentInfo* const dqm = &it->enc_->dqm_[it->mb_->segment_];
|
||||||
|
6
3rdparty/libwebp/src/enc/vp8i_enc.h
vendored
6
3rdparty/libwebp/src/enc/vp8i_enc.h
vendored
@ -32,7 +32,7 @@ extern "C" {
|
|||||||
// version numbers
|
// version numbers
|
||||||
#define ENC_MAJ_VERSION 1
|
#define ENC_MAJ_VERSION 1
|
||||||
#define ENC_MIN_VERSION 0
|
#define ENC_MIN_VERSION 0
|
||||||
#define ENC_REV_VERSION 0
|
#define ENC_REV_VERSION 2
|
||||||
|
|
||||||
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
|
enum { MAX_LF_LEVELS = 64, // Maximum loop filter level
|
||||||
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
|
MAX_VARIABLE_LEVEL = 67, // last (inclusive) level with variable cost
|
||||||
@ -278,7 +278,7 @@ int VP8IteratorIsDone(const VP8EncIterator* const it);
|
|||||||
// Import uncompressed samples from source.
|
// Import uncompressed samples from source.
|
||||||
// If tmp_32 is not NULL, import boundary samples too.
|
// If tmp_32 is not NULL, import boundary samples too.
|
||||||
// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory.
|
// tmp_32 is a 32-bytes scratch buffer that must be aligned in memory.
|
||||||
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* tmp_32);
|
void VP8IteratorImport(VP8EncIterator* const it, uint8_t* const tmp_32);
|
||||||
// export decimated samples
|
// export decimated samples
|
||||||
void VP8IteratorExport(const VP8EncIterator* const it);
|
void VP8IteratorExport(const VP8EncIterator* const it);
|
||||||
// go to next macroblock. Returns false if not finished.
|
// go to next macroblock. Returns false if not finished.
|
||||||
@ -515,4 +515,4 @@ void WebPCleanupTransparentAreaLossless(WebPPicture* const pic);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_ENC_VP8I_ENC_H_ */
|
#endif // WEBP_ENC_VP8I_ENC_H_
|
||||||
|
16
3rdparty/libwebp/src/enc/vp8l_enc.c
vendored
16
3rdparty/libwebp/src/enc/vp8l_enc.c
vendored
@ -462,6 +462,7 @@ static int GetHuffBitLengthsAndCodes(
|
|||||||
for (i = 0; i < histogram_image_size; ++i) {
|
for (i = 0; i < histogram_image_size; ++i) {
|
||||||
const VP8LHistogram* const histo = histogram_image->histograms[i];
|
const VP8LHistogram* const histo = histogram_image->histograms[i];
|
||||||
HuffmanTreeCode* const codes = &huffman_codes[5 * i];
|
HuffmanTreeCode* const codes = &huffman_codes[5 * i];
|
||||||
|
assert(histo != NULL);
|
||||||
for (k = 0; k < 5; ++k) {
|
for (k = 0; k < 5; ++k) {
|
||||||
const int num_symbols =
|
const int num_symbols =
|
||||||
(k == 0) ? VP8LHistogramNumCodes(histo->palette_code_bits_) :
|
(k == 0) ? VP8LHistogramNumCodes(histo->palette_code_bits_) :
|
||||||
@ -809,6 +810,7 @@ static WebPEncodingError EncodeImageNoHuffman(VP8LBitWriter* const bw,
|
|||||||
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
|
err = VP8_ENC_ERROR_OUT_OF_MEMORY;
|
||||||
goto Error;
|
goto Error;
|
||||||
}
|
}
|
||||||
|
VP8LHistogramSetClear(histogram_image);
|
||||||
|
|
||||||
// Build histogram image and symbols from backward references.
|
// Build histogram image and symbols from backward references.
|
||||||
VP8LHistogramStoreRefs(refs, histogram_image->histograms[0]);
|
VP8LHistogramStoreRefs(refs, histogram_image->histograms[0]);
|
||||||
@ -1248,14 +1250,20 @@ static WebPEncodingError MakeInputImageCopy(VP8LEncoder* const enc) {
|
|||||||
const WebPPicture* const picture = enc->pic_;
|
const WebPPicture* const picture = enc->pic_;
|
||||||
const int width = picture->width;
|
const int width = picture->width;
|
||||||
const int height = picture->height;
|
const int height = picture->height;
|
||||||
int y;
|
|
||||||
err = AllocateTransformBuffer(enc, width, height);
|
err = AllocateTransformBuffer(enc, width, height);
|
||||||
if (err != VP8_ENC_OK) return err;
|
if (err != VP8_ENC_OK) return err;
|
||||||
if (enc->argb_content_ == kEncoderARGB) return VP8_ENC_OK;
|
if (enc->argb_content_ == kEncoderARGB) return VP8_ENC_OK;
|
||||||
|
|
||||||
|
{
|
||||||
|
uint32_t* dst = enc->argb_;
|
||||||
|
const uint32_t* src = picture->argb;
|
||||||
|
int y;
|
||||||
for (y = 0; y < height; ++y) {
|
for (y = 0; y < height; ++y) {
|
||||||
memcpy(enc->argb_ + y * width,
|
memcpy(dst, src, width * sizeof(*dst));
|
||||||
picture->argb + y * picture->argb_stride,
|
dst += width;
|
||||||
width * sizeof(*enc->argb_));
|
src += picture->argb_stride;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
enc->argb_content_ = kEncoderARGB;
|
enc->argb_content_ = kEncoderARGB;
|
||||||
assert(enc->current_width_ == width);
|
assert(enc->current_width_ == width);
|
||||||
|
2
3rdparty/libwebp/src/enc/vp8li_enc.h
vendored
2
3rdparty/libwebp/src/enc/vp8li_enc.h
vendored
@ -115,4 +115,4 @@ void VP8LColorSpaceTransform(int width, int height, int bits, int quality,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_ENC_VP8LI_ENC_H_ */
|
#endif // WEBP_ENC_VP8LI_ENC_H_
|
||||||
|
2
3rdparty/libwebp/src/mux/animi.h
vendored
2
3rdparty/libwebp/src/mux/animi.h
vendored
@ -40,4 +40,4 @@ int WebPAnimEncoderRefineRect(
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_MUX_ANIMI_H_ */
|
#endif // WEBP_MUX_ANIMI_H_
|
||||||
|
18
3rdparty/libwebp/src/mux/muxedit.c
vendored
18
3rdparty/libwebp/src/mux/muxedit.c
vendored
@ -69,12 +69,12 @@ void WebPMuxDelete(WebPMux* mux) {
|
|||||||
if (idx == (INDEX)) { \
|
if (idx == (INDEX)) { \
|
||||||
err = ChunkAssignData(&chunk, data, copy_data, tag); \
|
err = ChunkAssignData(&chunk, data, copy_data, tag); \
|
||||||
if (err == WEBP_MUX_OK) { \
|
if (err == WEBP_MUX_OK) { \
|
||||||
err = ChunkSetNth(&chunk, (LIST), nth); \
|
err = ChunkSetHead(&chunk, (LIST)); \
|
||||||
} \
|
} \
|
||||||
return err; \
|
return err; \
|
||||||
}
|
}
|
||||||
|
|
||||||
static WebPMuxError MuxSet(WebPMux* const mux, uint32_t tag, uint32_t nth,
|
static WebPMuxError MuxSet(WebPMux* const mux, uint32_t tag,
|
||||||
const WebPData* const data, int copy_data) {
|
const WebPData* const data, int copy_data) {
|
||||||
WebPChunk chunk;
|
WebPChunk chunk;
|
||||||
WebPMuxError err = WEBP_MUX_NOT_FOUND;
|
WebPMuxError err = WEBP_MUX_NOT_FOUND;
|
||||||
@ -190,7 +190,7 @@ WebPMuxError WebPMuxSetChunk(WebPMux* mux, const char fourcc[4],
|
|||||||
if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err;
|
if (err != WEBP_MUX_OK && err != WEBP_MUX_NOT_FOUND) return err;
|
||||||
|
|
||||||
// Add the given chunk.
|
// Add the given chunk.
|
||||||
return MuxSet(mux, tag, 1, chunk_data, copy_data);
|
return MuxSet(mux, tag, chunk_data, copy_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a chunk from given 'data' and sets it as 1st chunk in 'chunk_list'.
|
// Creates a chunk from given 'data' and sets it as 1st chunk in 'chunk_list'.
|
||||||
@ -202,7 +202,7 @@ static WebPMuxError AddDataToChunkList(
|
|||||||
ChunkInit(&chunk);
|
ChunkInit(&chunk);
|
||||||
err = ChunkAssignData(&chunk, data, copy_data, tag);
|
err = ChunkAssignData(&chunk, data, copy_data, tag);
|
||||||
if (err != WEBP_MUX_OK) goto Err;
|
if (err != WEBP_MUX_OK) goto Err;
|
||||||
err = ChunkSetNth(&chunk, chunk_list, 1);
|
err = ChunkSetHead(&chunk, chunk_list);
|
||||||
if (err != WEBP_MUX_OK) goto Err;
|
if (err != WEBP_MUX_OK) goto Err;
|
||||||
return WEBP_MUX_OK;
|
return WEBP_MUX_OK;
|
||||||
Err:
|
Err:
|
||||||
@ -266,14 +266,14 @@ WebPMuxError WebPMuxPushFrame(WebPMux* mux, const WebPMuxFrameInfo* info,
|
|||||||
int copy_data) {
|
int copy_data) {
|
||||||
WebPMuxImage wpi;
|
WebPMuxImage wpi;
|
||||||
WebPMuxError err;
|
WebPMuxError err;
|
||||||
const WebPData* const bitstream = &info->bitstream;
|
|
||||||
|
|
||||||
// Sanity checks.
|
// Sanity checks.
|
||||||
if (mux == NULL || info == NULL) return WEBP_MUX_INVALID_ARGUMENT;
|
if (mux == NULL || info == NULL) return WEBP_MUX_INVALID_ARGUMENT;
|
||||||
|
|
||||||
if (info->id != WEBP_CHUNK_ANMF) return WEBP_MUX_INVALID_ARGUMENT;
|
if (info->id != WEBP_CHUNK_ANMF) return WEBP_MUX_INVALID_ARGUMENT;
|
||||||
|
|
||||||
if (bitstream->bytes == NULL || bitstream->size > MAX_CHUNK_PAYLOAD) {
|
if (info->bitstream.bytes == NULL ||
|
||||||
|
info->bitstream.size > MAX_CHUNK_PAYLOAD) {
|
||||||
return WEBP_MUX_INVALID_ARGUMENT;
|
return WEBP_MUX_INVALID_ARGUMENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,7 +287,7 @@ WebPMuxError WebPMuxPushFrame(WebPMux* mux, const WebPMuxFrameInfo* info,
|
|||||||
}
|
}
|
||||||
|
|
||||||
MuxImageInit(&wpi);
|
MuxImageInit(&wpi);
|
||||||
err = SetAlphaAndImageChunks(bitstream, copy_data, &wpi);
|
err = SetAlphaAndImageChunks(&info->bitstream, copy_data, &wpi);
|
||||||
if (err != WEBP_MUX_OK) goto Err;
|
if (err != WEBP_MUX_OK) goto Err;
|
||||||
assert(wpi.img_ != NULL); // As SetAlphaAndImageChunks() was successful.
|
assert(wpi.img_ != NULL); // As SetAlphaAndImageChunks() was successful.
|
||||||
|
|
||||||
@ -342,7 +342,7 @@ WebPMuxError WebPMuxSetAnimationParams(WebPMux* mux,
|
|||||||
// Set the animation parameters.
|
// Set the animation parameters.
|
||||||
PutLE32(data, params->bgcolor);
|
PutLE32(data, params->bgcolor);
|
||||||
PutLE16(data + 4, params->loop_count);
|
PutLE16(data + 4, params->loop_count);
|
||||||
return MuxSet(mux, kChunks[IDX_ANIM].tag, 1, &anim, 1);
|
return MuxSet(mux, kChunks[IDX_ANIM].tag, &anim, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
WebPMuxError WebPMuxSetCanvasSize(WebPMux* mux,
|
WebPMuxError WebPMuxSetCanvasSize(WebPMux* mux,
|
||||||
@ -540,7 +540,7 @@ static WebPMuxError CreateVP8XChunk(WebPMux* const mux) {
|
|||||||
PutLE24(data + 4, width - 1); // canvas width.
|
PutLE24(data + 4, width - 1); // canvas width.
|
||||||
PutLE24(data + 7, height - 1); // canvas height.
|
PutLE24(data + 7, height - 1); // canvas height.
|
||||||
|
|
||||||
return MuxSet(mux, kChunks[IDX_VP8X].tag, 1, &vp8x, 1);
|
return MuxSet(mux, kChunks[IDX_VP8X].tag, &vp8x, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleans up 'mux' by removing any unnecessary chunks.
|
// Cleans up 'mux' by removing any unnecessary chunks.
|
||||||
|
18
3rdparty/libwebp/src/mux/muxi.h
vendored
18
3rdparty/libwebp/src/mux/muxi.h
vendored
@ -14,6 +14,7 @@
|
|||||||
#ifndef WEBP_MUX_MUXI_H_
|
#ifndef WEBP_MUX_MUXI_H_
|
||||||
#define WEBP_MUX_MUXI_H_
|
#define WEBP_MUX_MUXI_H_
|
||||||
|
|
||||||
|
#include <assert.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include "src/dec/vp8i_dec.h"
|
#include "src/dec/vp8i_dec.h"
|
||||||
#include "src/dec/vp8li_dec.h"
|
#include "src/dec/vp8li_dec.h"
|
||||||
@ -28,7 +29,7 @@ extern "C" {
|
|||||||
|
|
||||||
#define MUX_MAJ_VERSION 1
|
#define MUX_MAJ_VERSION 1
|
||||||
#define MUX_MIN_VERSION 0
|
#define MUX_MIN_VERSION 0
|
||||||
#define MUX_REV_VERSION 0
|
#define MUX_REV_VERSION 2
|
||||||
|
|
||||||
// Chunk object.
|
// Chunk object.
|
||||||
typedef struct WebPChunk WebPChunk;
|
typedef struct WebPChunk WebPChunk;
|
||||||
@ -126,11 +127,14 @@ WebPChunk* ChunkSearchList(WebPChunk* first, uint32_t nth, uint32_t tag);
|
|||||||
WebPMuxError ChunkAssignData(WebPChunk* chunk, const WebPData* const data,
|
WebPMuxError ChunkAssignData(WebPChunk* chunk, const WebPData* const data,
|
||||||
int copy_data, uint32_t tag);
|
int copy_data, uint32_t tag);
|
||||||
|
|
||||||
// Sets 'chunk' at nth position in the 'chunk_list'.
|
// Sets 'chunk' as the only element in 'chunk_list' if it is empty.
|
||||||
// nth = 0 has the special meaning "last of the list".
|
|
||||||
// On success ownership is transferred from 'chunk' to the 'chunk_list'.
|
// On success ownership is transferred from 'chunk' to the 'chunk_list'.
|
||||||
WebPMuxError ChunkSetNth(WebPChunk* chunk, WebPChunk** chunk_list,
|
WebPMuxError ChunkSetHead(WebPChunk* const chunk, WebPChunk** const chunk_list);
|
||||||
uint32_t nth);
|
// Sets 'chunk' at last position in the 'chunk_list'.
|
||||||
|
// On success ownership is transferred from 'chunk' to the 'chunk_list'.
|
||||||
|
// *chunk_list also points towards the last valid element of the initial
|
||||||
|
// *chunk_list.
|
||||||
|
WebPMuxError ChunkAppend(WebPChunk* const chunk, WebPChunk*** const chunk_list);
|
||||||
|
|
||||||
// Releases chunk and returns chunk->next_.
|
// Releases chunk and returns chunk->next_.
|
||||||
WebPChunk* ChunkRelease(WebPChunk* const chunk);
|
WebPChunk* ChunkRelease(WebPChunk* const chunk);
|
||||||
@ -143,13 +147,13 @@ void ChunkListDelete(WebPChunk** const chunk_list);
|
|||||||
|
|
||||||
// Returns size of the chunk including chunk header and padding byte (if any).
|
// Returns size of the chunk including chunk header and padding byte (if any).
|
||||||
static WEBP_INLINE size_t SizeWithPadding(size_t chunk_size) {
|
static WEBP_INLINE size_t SizeWithPadding(size_t chunk_size) {
|
||||||
|
assert(chunk_size <= MAX_CHUNK_PAYLOAD);
|
||||||
return CHUNK_HEADER_SIZE + ((chunk_size + 1) & ~1U);
|
return CHUNK_HEADER_SIZE + ((chunk_size + 1) & ~1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size of a chunk including header and padding.
|
// Size of a chunk including header and padding.
|
||||||
static WEBP_INLINE size_t ChunkDiskSize(const WebPChunk* chunk) {
|
static WEBP_INLINE size_t ChunkDiskSize(const WebPChunk* chunk) {
|
||||||
const size_t data_size = chunk->data_.size;
|
const size_t data_size = chunk->data_.size;
|
||||||
assert(data_size < MAX_CHUNK_PAYLOAD);
|
|
||||||
return SizeWithPadding(data_size);
|
return SizeWithPadding(data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,4 +231,4 @@ WebPMuxError MuxValidate(const WebPMux* const mux);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_MUX_MUXI_H_ */
|
#endif // WEBP_MUX_MUXI_H_
|
||||||
|
53
3rdparty/libwebp/src/mux/muxinternal.c
vendored
53
3rdparty/libwebp/src/mux/muxinternal.c
vendored
@ -111,27 +111,6 @@ WebPChunk* ChunkSearchList(WebPChunk* first, uint32_t nth, uint32_t tag) {
|
|||||||
return ((nth > 0) && (iter > 0)) ? NULL : first;
|
return ((nth > 0) && (iter > 0)) ? NULL : first;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Outputs a pointer to 'prev_chunk->next_',
|
|
||||||
// where 'prev_chunk' is the pointer to the chunk at position (nth - 1).
|
|
||||||
// Returns true if nth chunk was found.
|
|
||||||
static int ChunkSearchListToSet(WebPChunk** chunk_list, uint32_t nth,
|
|
||||||
WebPChunk*** const location) {
|
|
||||||
uint32_t count = 0;
|
|
||||||
assert(chunk_list != NULL);
|
|
||||||
*location = chunk_list;
|
|
||||||
|
|
||||||
while (*chunk_list != NULL) {
|
|
||||||
WebPChunk* const cur_chunk = *chunk_list;
|
|
||||||
++count;
|
|
||||||
if (count == nth) return 1; // Found.
|
|
||||||
chunk_list = &cur_chunk->next_;
|
|
||||||
*location = chunk_list;
|
|
||||||
}
|
|
||||||
|
|
||||||
// *chunk_list is ok to be NULL if adding at last location.
|
|
||||||
return (nth == 0 || (count == nth - 1)) ? 1 : 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Chunk writer methods.
|
// Chunk writer methods.
|
||||||
|
|
||||||
@ -156,11 +135,12 @@ WebPMuxError ChunkAssignData(WebPChunk* chunk, const WebPData* const data,
|
|||||||
return WEBP_MUX_OK;
|
return WEBP_MUX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
WebPMuxError ChunkSetNth(WebPChunk* chunk, WebPChunk** chunk_list,
|
WebPMuxError ChunkSetHead(WebPChunk* const chunk,
|
||||||
uint32_t nth) {
|
WebPChunk** const chunk_list) {
|
||||||
WebPChunk* new_chunk;
|
WebPChunk* new_chunk;
|
||||||
|
|
||||||
if (!ChunkSearchListToSet(chunk_list, nth, &chunk_list)) {
|
assert(chunk_list != NULL);
|
||||||
|
if (*chunk_list != NULL) {
|
||||||
return WEBP_MUX_NOT_FOUND;
|
return WEBP_MUX_NOT_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,11 +148,26 @@ WebPMuxError ChunkSetNth(WebPChunk* chunk, WebPChunk** chunk_list,
|
|||||||
if (new_chunk == NULL) return WEBP_MUX_MEMORY_ERROR;
|
if (new_chunk == NULL) return WEBP_MUX_MEMORY_ERROR;
|
||||||
*new_chunk = *chunk;
|
*new_chunk = *chunk;
|
||||||
chunk->owner_ = 0;
|
chunk->owner_ = 0;
|
||||||
new_chunk->next_ = *chunk_list;
|
new_chunk->next_ = NULL;
|
||||||
*chunk_list = new_chunk;
|
*chunk_list = new_chunk;
|
||||||
return WEBP_MUX_OK;
|
return WEBP_MUX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WebPMuxError ChunkAppend(WebPChunk* const chunk,
|
||||||
|
WebPChunk*** const chunk_list) {
|
||||||
|
assert(chunk_list != NULL && *chunk_list != NULL);
|
||||||
|
|
||||||
|
if (**chunk_list == NULL) {
|
||||||
|
ChunkSetHead(chunk, *chunk_list);
|
||||||
|
} else {
|
||||||
|
WebPChunk* last_chunk = **chunk_list;
|
||||||
|
while (last_chunk->next_ != NULL) last_chunk = last_chunk->next_;
|
||||||
|
ChunkSetHead(chunk, &last_chunk->next_);
|
||||||
|
*chunk_list = &last_chunk->next_;
|
||||||
|
}
|
||||||
|
return WEBP_MUX_OK;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
// Chunk deletion method(s).
|
// Chunk deletion method(s).
|
||||||
|
|
||||||
@ -232,9 +227,11 @@ void MuxImageInit(WebPMuxImage* const wpi) {
|
|||||||
WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi) {
|
WebPMuxImage* MuxImageRelease(WebPMuxImage* const wpi) {
|
||||||
WebPMuxImage* next;
|
WebPMuxImage* next;
|
||||||
if (wpi == NULL) return NULL;
|
if (wpi == NULL) return NULL;
|
||||||
ChunkDelete(wpi->header_);
|
// There should be at most one chunk of header_, alpha_, img_ but we call
|
||||||
ChunkDelete(wpi->alpha_);
|
// ChunkListDelete to be safe
|
||||||
ChunkDelete(wpi->img_);
|
ChunkListDelete(&wpi->header_);
|
||||||
|
ChunkListDelete(&wpi->alpha_);
|
||||||
|
ChunkListDelete(&wpi->img_);
|
||||||
ChunkListDelete(&wpi->unknown_);
|
ChunkListDelete(&wpi->unknown_);
|
||||||
|
|
||||||
next = wpi->next_;
|
next = wpi->next_;
|
||||||
|
59
3rdparty/libwebp/src/mux/muxread.c
vendored
59
3rdparty/libwebp/src/mux/muxread.c
vendored
@ -59,6 +59,7 @@ static WebPMuxError ChunkVerifyAndAssign(WebPChunk* chunk,
|
|||||||
// Sanity checks.
|
// Sanity checks.
|
||||||
if (data_size < CHUNK_HEADER_SIZE) return WEBP_MUX_NOT_ENOUGH_DATA;
|
if (data_size < CHUNK_HEADER_SIZE) return WEBP_MUX_NOT_ENOUGH_DATA;
|
||||||
chunk_size = GetLE32(data + TAG_SIZE);
|
chunk_size = GetLE32(data + TAG_SIZE);
|
||||||
|
if (chunk_size > MAX_CHUNK_PAYLOAD) return WEBP_MUX_BAD_DATA;
|
||||||
|
|
||||||
{
|
{
|
||||||
const size_t chunk_disk_size = SizeWithPadding(chunk_size);
|
const size_t chunk_disk_size = SizeWithPadding(chunk_size);
|
||||||
@ -102,6 +103,7 @@ static int MuxImageParse(const WebPChunk* const chunk, int copy_data,
|
|||||||
const uint8_t* const last = bytes + size;
|
const uint8_t* const last = bytes + size;
|
||||||
WebPChunk subchunk;
|
WebPChunk subchunk;
|
||||||
size_t subchunk_size;
|
size_t subchunk_size;
|
||||||
|
WebPChunk** unknown_chunk_list = &wpi->unknown_;
|
||||||
ChunkInit(&subchunk);
|
ChunkInit(&subchunk);
|
||||||
|
|
||||||
assert(chunk->tag_ == kChunks[IDX_ANMF].tag);
|
assert(chunk->tag_ == kChunks[IDX_ANMF].tag);
|
||||||
@ -116,7 +118,7 @@ static int MuxImageParse(const WebPChunk* const chunk, int copy_data,
|
|||||||
if (size < hdr_size) goto Fail;
|
if (size < hdr_size) goto Fail;
|
||||||
ChunkAssignData(&subchunk, &temp, copy_data, chunk->tag_);
|
ChunkAssignData(&subchunk, &temp, copy_data, chunk->tag_);
|
||||||
}
|
}
|
||||||
ChunkSetNth(&subchunk, &wpi->header_, 1);
|
ChunkSetHead(&subchunk, &wpi->header_);
|
||||||
wpi->is_partial_ = 1; // Waiting for ALPH and/or VP8/VP8L chunks.
|
wpi->is_partial_ = 1; // Waiting for ALPH and/or VP8/VP8L chunks.
|
||||||
|
|
||||||
// Rest of the chunks.
|
// Rest of the chunks.
|
||||||
@ -133,18 +135,23 @@ static int MuxImageParse(const WebPChunk* const chunk, int copy_data,
|
|||||||
switch (ChunkGetIdFromTag(subchunk.tag_)) {
|
switch (ChunkGetIdFromTag(subchunk.tag_)) {
|
||||||
case WEBP_CHUNK_ALPHA:
|
case WEBP_CHUNK_ALPHA:
|
||||||
if (wpi->alpha_ != NULL) goto Fail; // Consecutive ALPH chunks.
|
if (wpi->alpha_ != NULL) goto Fail; // Consecutive ALPH chunks.
|
||||||
if (ChunkSetNth(&subchunk, &wpi->alpha_, 1) != WEBP_MUX_OK) goto Fail;
|
if (ChunkSetHead(&subchunk, &wpi->alpha_) != WEBP_MUX_OK) goto Fail;
|
||||||
wpi->is_partial_ = 1; // Waiting for a VP8 chunk.
|
wpi->is_partial_ = 1; // Waiting for a VP8 chunk.
|
||||||
break;
|
break;
|
||||||
case WEBP_CHUNK_IMAGE:
|
case WEBP_CHUNK_IMAGE:
|
||||||
if (ChunkSetNth(&subchunk, &wpi->img_, 1) != WEBP_MUX_OK) goto Fail;
|
if (wpi->img_ != NULL) goto Fail; // Only 1 image chunk allowed.
|
||||||
|
if (ChunkSetHead(&subchunk, &wpi->img_) != WEBP_MUX_OK) goto Fail;
|
||||||
if (!MuxImageFinalize(wpi)) goto Fail;
|
if (!MuxImageFinalize(wpi)) goto Fail;
|
||||||
wpi->is_partial_ = 0; // wpi is completely filled.
|
wpi->is_partial_ = 0; // wpi is completely filled.
|
||||||
break;
|
break;
|
||||||
case WEBP_CHUNK_UNKNOWN:
|
case WEBP_CHUNK_UNKNOWN:
|
||||||
if (wpi->is_partial_) goto Fail; // Encountered an unknown chunk
|
if (wpi->is_partial_) {
|
||||||
|
goto Fail; // Encountered an unknown chunk
|
||||||
// before some image chunks.
|
// before some image chunks.
|
||||||
if (ChunkSetNth(&subchunk, &wpi->unknown_, 0) != WEBP_MUX_OK) goto Fail;
|
}
|
||||||
|
if (ChunkAppend(&subchunk, &unknown_chunk_list) != WEBP_MUX_OK) {
|
||||||
|
goto Fail;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
goto Fail;
|
goto Fail;
|
||||||
@ -175,6 +182,9 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
const uint8_t* data;
|
const uint8_t* data;
|
||||||
size_t size;
|
size_t size;
|
||||||
WebPChunk chunk;
|
WebPChunk chunk;
|
||||||
|
// Stores the end of the chunk lists so that it is faster to append data to
|
||||||
|
// their ends.
|
||||||
|
WebPChunk** chunk_list_ends[WEBP_CHUNK_NIL + 1] = { NULL };
|
||||||
ChunkInit(&chunk);
|
ChunkInit(&chunk);
|
||||||
|
|
||||||
// Sanity checks.
|
// Sanity checks.
|
||||||
@ -187,7 +197,7 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
size = bitstream->size;
|
size = bitstream->size;
|
||||||
|
|
||||||
if (data == NULL) return NULL;
|
if (data == NULL) return NULL;
|
||||||
if (size < RIFF_HEADER_SIZE) return NULL;
|
if (size < RIFF_HEADER_SIZE + CHUNK_HEADER_SIZE) return NULL;
|
||||||
if (GetLE32(data + 0) != MKFOURCC('R', 'I', 'F', 'F') ||
|
if (GetLE32(data + 0) != MKFOURCC('R', 'I', 'F', 'F') ||
|
||||||
GetLE32(data + CHUNK_HEADER_SIZE) != MKFOURCC('W', 'E', 'B', 'P')) {
|
GetLE32(data + CHUNK_HEADER_SIZE) != MKFOURCC('W', 'E', 'B', 'P')) {
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -196,8 +206,6 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
mux = WebPMuxNew();
|
mux = WebPMuxNew();
|
||||||
if (mux == NULL) return NULL;
|
if (mux == NULL) return NULL;
|
||||||
|
|
||||||
if (size < RIFF_HEADER_SIZE + TAG_SIZE) goto Err;
|
|
||||||
|
|
||||||
tag = GetLE32(data + RIFF_HEADER_SIZE);
|
tag = GetLE32(data + RIFF_HEADER_SIZE);
|
||||||
if (tag != kChunks[IDX_VP8].tag &&
|
if (tag != kChunks[IDX_VP8].tag &&
|
||||||
tag != kChunks[IDX_VP8L].tag &&
|
tag != kChunks[IDX_VP8L].tag &&
|
||||||
@ -205,13 +213,17 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
goto Err; // First chunk should be VP8, VP8L or VP8X.
|
goto Err; // First chunk should be VP8, VP8L or VP8X.
|
||||||
}
|
}
|
||||||
|
|
||||||
riff_size = SizeWithPadding(GetLE32(data + TAG_SIZE));
|
riff_size = GetLE32(data + TAG_SIZE);
|
||||||
if (riff_size > MAX_CHUNK_PAYLOAD || riff_size > size) {
|
if (riff_size > MAX_CHUNK_PAYLOAD) goto Err;
|
||||||
goto Err;
|
|
||||||
} else {
|
// Note this padding is historical and differs from demux.c which does not
|
||||||
if (riff_size < size) { // Redundant data after last chunk.
|
// pad the file size.
|
||||||
size = riff_size; // To make sure we don't read any data beyond mux_size.
|
riff_size = SizeWithPadding(riff_size);
|
||||||
}
|
if (riff_size < CHUNK_HEADER_SIZE) goto Err;
|
||||||
|
if (riff_size > size) goto Err;
|
||||||
|
// There's no point in reading past the end of the RIFF chunk.
|
||||||
|
if (size > riff_size + CHUNK_HEADER_SIZE) {
|
||||||
|
size = riff_size + CHUNK_HEADER_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
end = data + size;
|
end = data + size;
|
||||||
@ -226,7 +238,6 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
while (data != end) {
|
while (data != end) {
|
||||||
size_t data_size;
|
size_t data_size;
|
||||||
WebPChunkId id;
|
WebPChunkId id;
|
||||||
WebPChunk** chunk_list;
|
|
||||||
if (ChunkVerifyAndAssign(&chunk, data, size, riff_size,
|
if (ChunkVerifyAndAssign(&chunk, data, size, riff_size,
|
||||||
copy_data) != WEBP_MUX_OK) {
|
copy_data) != WEBP_MUX_OK) {
|
||||||
goto Err;
|
goto Err;
|
||||||
@ -236,11 +247,11 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
switch (id) {
|
switch (id) {
|
||||||
case WEBP_CHUNK_ALPHA:
|
case WEBP_CHUNK_ALPHA:
|
||||||
if (wpi->alpha_ != NULL) goto Err; // Consecutive ALPH chunks.
|
if (wpi->alpha_ != NULL) goto Err; // Consecutive ALPH chunks.
|
||||||
if (ChunkSetNth(&chunk, &wpi->alpha_, 1) != WEBP_MUX_OK) goto Err;
|
if (ChunkSetHead(&chunk, &wpi->alpha_) != WEBP_MUX_OK) goto Err;
|
||||||
wpi->is_partial_ = 1; // Waiting for a VP8 chunk.
|
wpi->is_partial_ = 1; // Waiting for a VP8 chunk.
|
||||||
break;
|
break;
|
||||||
case WEBP_CHUNK_IMAGE:
|
case WEBP_CHUNK_IMAGE:
|
||||||
if (ChunkSetNth(&chunk, &wpi->img_, 1) != WEBP_MUX_OK) goto Err;
|
if (ChunkSetHead(&chunk, &wpi->img_) != WEBP_MUX_OK) goto Err;
|
||||||
if (!MuxImageFinalize(wpi)) goto Err;
|
if (!MuxImageFinalize(wpi)) goto Err;
|
||||||
wpi->is_partial_ = 0; // wpi is completely filled.
|
wpi->is_partial_ = 0; // wpi is completely filled.
|
||||||
PushImage:
|
PushImage:
|
||||||
@ -257,9 +268,13 @@ WebPMux* WebPMuxCreateInternal(const WebPData* bitstream, int copy_data,
|
|||||||
default: // A non-image chunk.
|
default: // A non-image chunk.
|
||||||
if (wpi->is_partial_) goto Err; // Encountered a non-image chunk before
|
if (wpi->is_partial_) goto Err; // Encountered a non-image chunk before
|
||||||
// getting all chunks of an image.
|
// getting all chunks of an image.
|
||||||
chunk_list = MuxGetChunkListFromId(mux, id); // List to add this chunk.
|
if (chunk_list_ends[id] == NULL) {
|
||||||
if (ChunkSetNth(&chunk, chunk_list, 0) != WEBP_MUX_OK) goto Err;
|
chunk_list_ends[id] =
|
||||||
|
MuxGetChunkListFromId(mux, id); // List to add this chunk.
|
||||||
|
}
|
||||||
|
if (ChunkAppend(&chunk, &chunk_list_ends[id]) != WEBP_MUX_OK) goto Err;
|
||||||
if (id == WEBP_CHUNK_VP8X) { // grab global specs
|
if (id == WEBP_CHUNK_VP8X) { // grab global specs
|
||||||
|
if (data_size < CHUNK_HEADER_SIZE + VP8X_CHUNK_SIZE) goto Err;
|
||||||
mux->canvas_width_ = GetLE24(data + 12) + 1;
|
mux->canvas_width_ = GetLE24(data + 12) + 1;
|
||||||
mux->canvas_height_ = GetLE24(data + 15) + 1;
|
mux->canvas_height_ = GetLE24(data + 15) + 1;
|
||||||
}
|
}
|
||||||
@ -385,6 +400,10 @@ static WebPMuxError SynthesizeBitstream(const WebPMuxImage* const wpi,
|
|||||||
uint8_t* const data = (uint8_t*)WebPSafeMalloc(1ULL, size);
|
uint8_t* const data = (uint8_t*)WebPSafeMalloc(1ULL, size);
|
||||||
if (data == NULL) return WEBP_MUX_MEMORY_ERROR;
|
if (data == NULL) return WEBP_MUX_MEMORY_ERROR;
|
||||||
|
|
||||||
|
// There should be at most one alpha_ chunk and exactly one img_ chunk.
|
||||||
|
assert(wpi->alpha_ == NULL || wpi->alpha_->next_ == NULL);
|
||||||
|
assert(wpi->img_ != NULL && wpi->img_->next_ == NULL);
|
||||||
|
|
||||||
// Main RIFF header.
|
// Main RIFF header.
|
||||||
dst = MuxEmitRiffHeader(data, size);
|
dst = MuxEmitRiffHeader(data, size);
|
||||||
|
|
||||||
|
@ -172,4 +172,4 @@ static WEBP_INLINE void VP8LFillBitWindow(VP8LBitReader* const br) {
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_BIT_READER_UTILS_H_ */
|
#endif // WEBP_UTILS_BIT_READER_UTILS_H_
|
||||||
|
@ -248,6 +248,7 @@ int VP8LBitWriterClone(const VP8LBitWriter* const src,
|
|||||||
dst->bits_ = src->bits_;
|
dst->bits_ = src->bits_;
|
||||||
dst->used_ = src->used_;
|
dst->used_ = src->used_;
|
||||||
dst->error_ = src->error_;
|
dst->error_ = src->error_;
|
||||||
|
dst->cur_ = dst->buf_ + current_size;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,4 +151,4 @@ static WEBP_INLINE void VP8LPutBits(VP8LBitWriter* const bw,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_BIT_WRITER_UTILS_H_ */
|
#endif // WEBP_UTILS_BIT_WRITER_UTILS_H_
|
||||||
|
2
3rdparty/libwebp/src/utils/filters_utils.h
vendored
2
3rdparty/libwebp/src/utils/filters_utils.h
vendored
@ -29,4 +29,4 @@ WEBP_FILTER_TYPE WebPEstimateBestFilter(const uint8_t* data,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_FILTERS_UTILS_H_ */
|
#endif // WEBP_UTILS_FILTERS_UTILS_H_
|
||||||
|
@ -261,9 +261,15 @@ static void CleanupParams(SmoothParams* const p) {
|
|||||||
|
|
||||||
int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride,
|
int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride,
|
||||||
int strength) {
|
int strength) {
|
||||||
const int radius = 4 * strength / 100;
|
int radius = 4 * strength / 100;
|
||||||
|
|
||||||
if (strength < 0 || strength > 100) return 0;
|
if (strength < 0 || strength > 100) return 0;
|
||||||
if (data == NULL || width <= 0 || height <= 0) return 0; // bad params
|
if (data == NULL || width <= 0 || height <= 0) return 0; // bad params
|
||||||
|
|
||||||
|
// limit the filter size to not exceed the image dimensions
|
||||||
|
if (2 * radius + 1 > width) radius = (width - 1) >> 1;
|
||||||
|
if (2 * radius + 1 > height) radius = (height - 1) >> 1;
|
||||||
|
|
||||||
if (radius > 0) {
|
if (radius > 0) {
|
||||||
SmoothParams p;
|
SmoothParams p;
|
||||||
memset(&p, 0, sizeof(p));
|
memset(&p, 0, sizeof(p));
|
||||||
|
@ -32,4 +32,4 @@ int WebPDequantizeLevels(uint8_t* const data, int width, int height, int stride,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_QUANT_LEVELS_DEC_UTILS_H_ */
|
#endif // WEBP_UTILS_QUANT_LEVELS_DEC_UTILS_H_
|
||||||
|
@ -33,4 +33,4 @@ int QuantizeLevels(uint8_t* const data, int width, int height, int num_levels,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_QUANT_LEVELS_UTILS_H_ */
|
#endif // WEBP_UTILS_QUANT_LEVELS_UTILS_H_
|
||||||
|
2
3rdparty/libwebp/src/utils/random_utils.h
vendored
2
3rdparty/libwebp/src/utils/random_utils.h
vendored
@ -60,4 +60,4 @@ static WEBP_INLINE int VP8RandomBits(VP8Random* const rg, int num_bits) {
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_RANDOM_UTILS_H_ */
|
#endif // WEBP_UTILS_RANDOM_UTILS_H_
|
||||||
|
2
3rdparty/libwebp/src/utils/rescaler_utils.h
vendored
2
3rdparty/libwebp/src/utils/rescaler_utils.h
vendored
@ -98,4 +98,4 @@ int WebPRescalerHasPendingOutput(const WebPRescaler* const rescaler) {
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_RESCALER_UTILS_H_ */
|
#endif // WEBP_UTILS_RESCALER_UTILS_H_
|
||||||
|
2
3rdparty/libwebp/src/utils/thread_utils.h
vendored
2
3rdparty/libwebp/src/utils/thread_utils.h
vendored
@ -87,4 +87,4 @@ WEBP_EXTERN const WebPWorkerInterface* WebPGetWorkerInterface(void);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_THREAD_UTILS_H_ */
|
#endif // WEBP_UTILS_THREAD_UTILS_H_
|
||||||
|
28
3rdparty/libwebp/src/utils/utils.h
vendored
28
3rdparty/libwebp/src/utils/utils.h
vendored
@ -107,19 +107,6 @@ static WEBP_INLINE void PutLE32(uint8_t* const data, uint32_t val) {
|
|||||||
PutLE16(data + 2, (int)(val >> 16));
|
PutLE16(data + 2, (int)(val >> 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns 31 ^ clz(n) = log2(n). This is the default C-implementation, either
|
|
||||||
// based on table or not. Can be used as fallback if clz() is not available.
|
|
||||||
#define WEBP_NEED_LOG_TABLE_8BIT
|
|
||||||
extern const uint8_t WebPLogTable8bit[256];
|
|
||||||
static WEBP_INLINE int WebPLog2FloorC(uint32_t n) {
|
|
||||||
int log_value = 0;
|
|
||||||
while (n >= 256) {
|
|
||||||
log_value += 8;
|
|
||||||
n >>= 8;
|
|
||||||
}
|
|
||||||
return log_value + WebPLogTable8bit[n];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns (int)floor(log2(n)). n must be > 0.
|
// Returns (int)floor(log2(n)). n must be > 0.
|
||||||
// use GNU builtins where available.
|
// use GNU builtins where available.
|
||||||
#if defined(__GNUC__) && \
|
#if defined(__GNUC__) && \
|
||||||
@ -138,6 +125,19 @@ static WEBP_INLINE int BitsLog2Floor(uint32_t n) {
|
|||||||
return first_set_bit;
|
return first_set_bit;
|
||||||
}
|
}
|
||||||
#else // default: use the C-version.
|
#else // default: use the C-version.
|
||||||
|
// Returns 31 ^ clz(n) = log2(n). This is the default C-implementation, either
|
||||||
|
// based on table or not. Can be used as fallback if clz() is not available.
|
||||||
|
#define WEBP_NEED_LOG_TABLE_8BIT
|
||||||
|
extern const uint8_t WebPLogTable8bit[256];
|
||||||
|
static WEBP_INLINE int WebPLog2FloorC(uint32_t n) {
|
||||||
|
int log_value = 0;
|
||||||
|
while (n >= 256) {
|
||||||
|
log_value += 8;
|
||||||
|
n >>= 8;
|
||||||
|
}
|
||||||
|
return log_value + WebPLogTable8bit[n];
|
||||||
|
}
|
||||||
|
|
||||||
static WEBP_INLINE int BitsLog2Floor(uint32_t n) { return WebPLog2FloorC(n); }
|
static WEBP_INLINE int BitsLog2Floor(uint32_t n) { return WebPLog2FloorC(n); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -175,4 +175,4 @@ WEBP_EXTERN int WebPGetColorPalette(const struct WebPPicture* const pic,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_UTILS_UTILS_H_ */
|
#endif // WEBP_UTILS_UTILS_H_
|
||||||
|
14
3rdparty/libwebp/src/webp/decode.h
vendored
14
3rdparty/libwebp/src/webp/decode.h
vendored
@ -42,6 +42,12 @@ WEBP_EXTERN int WebPGetDecoderVersion(void);
|
|||||||
// This function will also validate the header, returning true on success,
|
// This function will also validate the header, returning true on success,
|
||||||
// false otherwise. '*width' and '*height' are only valid on successful return.
|
// false otherwise. '*width' and '*height' are only valid on successful return.
|
||||||
// Pointers 'width' and 'height' can be passed NULL if deemed irrelevant.
|
// Pointers 'width' and 'height' can be passed NULL if deemed irrelevant.
|
||||||
|
// Note: The following chunk sequences (before the raw VP8/VP8L data) are
|
||||||
|
// considered valid by this function:
|
||||||
|
// RIFF + VP8(L)
|
||||||
|
// RIFF + VP8X + (optional chunks) + VP8(L)
|
||||||
|
// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
|
||||||
|
// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose.
|
||||||
WEBP_EXTERN int WebPGetInfo(const uint8_t* data, size_t data_size,
|
WEBP_EXTERN int WebPGetInfo(const uint8_t* data, size_t data_size,
|
||||||
int* width, int* height);
|
int* width, int* height);
|
||||||
|
|
||||||
@ -425,6 +431,12 @@ WEBP_EXTERN VP8StatusCode WebPGetFeaturesInternal(
|
|||||||
// Returns VP8_STATUS_OK when the features are successfully retrieved. Returns
|
// Returns VP8_STATUS_OK when the features are successfully retrieved. Returns
|
||||||
// VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the
|
// VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the
|
||||||
// features from headers. Returns error in other cases.
|
// features from headers. Returns error in other cases.
|
||||||
|
// Note: The following chunk sequences (before the raw VP8/VP8L data) are
|
||||||
|
// considered valid by this function:
|
||||||
|
// RIFF + VP8(L)
|
||||||
|
// RIFF + VP8X + (optional chunks) + VP8(L)
|
||||||
|
// ALPH + VP8 <-- Not a valid WebP format: only allowed for internal purpose.
|
||||||
|
// VP8(L) <-- Not a valid WebP format: only allowed for internal purpose.
|
||||||
static WEBP_INLINE VP8StatusCode WebPGetFeatures(
|
static WEBP_INLINE VP8StatusCode WebPGetFeatures(
|
||||||
const uint8_t* data, size_t data_size,
|
const uint8_t* data, size_t data_size,
|
||||||
WebPBitstreamFeatures* features) {
|
WebPBitstreamFeatures* features) {
|
||||||
@ -491,4 +503,4 @@ WEBP_EXTERN VP8StatusCode WebPDecode(const uint8_t* data, size_t data_size,
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_DECODE_H_ */
|
#endif // WEBP_WEBP_DECODE_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/demux.h
vendored
2
3rdparty/libwebp/src/webp/demux.h
vendored
@ -360,4 +360,4 @@ WEBP_EXTERN void WebPAnimDecoderDelete(WebPAnimDecoder* dec);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_DEMUX_H_ */
|
#endif // WEBP_WEBP_DEMUX_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/encode.h
vendored
2
3rdparty/libwebp/src/webp/encode.h
vendored
@ -542,4 +542,4 @@ WEBP_EXTERN int WebPEncode(const WebPConfig* config, WebPPicture* picture);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_ENCODE_H_ */
|
#endif // WEBP_WEBP_ENCODE_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/format_constants.h
vendored
2
3rdparty/libwebp/src/webp/format_constants.h
vendored
@ -84,4 +84,4 @@ typedef enum {
|
|||||||
// overflow a uint32_t.
|
// overflow a uint32_t.
|
||||||
#define MAX_CHUNK_PAYLOAD (~0U - CHUNK_HEADER_SIZE - 1)
|
#define MAX_CHUNK_PAYLOAD (~0U - CHUNK_HEADER_SIZE - 1)
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_FORMAT_CONSTANTS_H_ */
|
#endif // WEBP_WEBP_FORMAT_CONSTANTS_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/mux.h
vendored
2
3rdparty/libwebp/src/webp/mux.h
vendored
@ -527,4 +527,4 @@ WEBP_EXTERN void WebPAnimEncoderDelete(WebPAnimEncoder* enc);
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_MUX_H_ */
|
#endif // WEBP_WEBP_MUX_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/mux_types.h
vendored
2
3rdparty/libwebp/src/webp/mux_types.h
vendored
@ -95,4 +95,4 @@ static WEBP_INLINE int WebPDataCopy(const WebPData* src, WebPData* dst) {
|
|||||||
} // extern "C"
|
} // extern "C"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_MUX_TYPES_H_ */
|
#endif // WEBP_WEBP_MUX_TYPES_H_
|
||||||
|
2
3rdparty/libwebp/src/webp/types.h
vendored
2
3rdparty/libwebp/src/webp/types.h
vendored
@ -49,4 +49,4 @@ typedef long long int int64_t;
|
|||||||
// Macro to check ABI compatibility (same major revision number)
|
// Macro to check ABI compatibility (same major revision number)
|
||||||
#define WEBP_ABI_IS_INCOMPATIBLE(a, b) (((a) >> 8) != ((b) >> 8))
|
#define WEBP_ABI_IS_INCOMPATIBLE(a, b) (((a) >> 8) != ((b) >> 8))
|
||||||
|
|
||||||
#endif /* WEBP_WEBP_TYPES_H_ */
|
#endif // WEBP_WEBP_TYPES_H_
|
||||||
|
Loading…
Reference in New Issue
Block a user