summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/platform/image-decoders/image_decoder.h
blob: 6688d323ec20bd3bd87bab7a5fde7f714f668273 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
/*
 * Copyright (C) 2006 Apple Computer, Inc.  All rights reserved.
 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE COMPUTER, INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_IMAGE_DECODERS_IMAGE_DECODER_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_IMAGE_DECODERS_IMAGE_DECODER_H_

#include <memory>

#include "base/memory/scoped_refptr.h"
#include "third_party/blink/renderer/platform/graphics/color_behavior.h"
#include "third_party/blink/renderer/platform/graphics/image_orientation.h"
#include "third_party/blink/renderer/platform/graphics/paint/paint_image.h"
#include "third_party/blink/renderer/platform/image-decoders/image_animation.h"
#include "third_party/blink/renderer/platform/image-decoders/image_frame.h"
#include "third_party/blink/renderer/platform/image-decoders/segment_reader.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/assertions.h"
#include "third_party/blink/renderer/platform/wtf/shared_buffer.h"
#include "third_party/blink/renderer/platform/wtf/text/wtf_string.h"

#include "third_party/blink/renderer/platform/wtf/vector.h"
#include "third_party/skia/include/core/SkImageInfo.h"
#include "third_party/skia/include/third_party/skcms/skcms.h"

class SkColorSpace;

namespace blink {

#if SK_B32_SHIFT
inline skcms_PixelFormat XformColorFormat() {
  return skcms_PixelFormat_RGBA_8888;
}
#else
inline skcms_PixelFormat XformColorFormat() {
  return skcms_PixelFormat_BGRA_8888;
}
#endif

// ImagePlanes can be used to decode color components into provided buffers
// instead of using an ImageFrame.
class PLATFORM_EXPORT ImagePlanes final {
  USING_FAST_MALLOC(ImagePlanes);

 public:
  ImagePlanes();

  // |color_type| is kGray_8_SkColorType if GetYUVBitDepth() == 8 and either
  // kA16_float_SkColorType or kA16_unorm_SkColorType if GetYUVBitDepth() > 8.
  //
  // TODO(crbug/910276): To support YUVA, ImagePlanes needs to support a
  // variable number of planes.
  ImagePlanes(void* planes[cc::kNumYUVPlanes],
              const size_t row_bytes[cc::kNumYUVPlanes],
              SkColorType color_type);

  void* Plane(cc::YUVIndex);
  size_t RowBytes(cc::YUVIndex) const;
  SkColorType color_type() const { return color_type_; }
  void SetHasCompleteScan() { has_complete_scan_ = true; }
  bool HasCompleteScan() const { return has_complete_scan_; }

 private:
  void* planes_[cc::kNumYUVPlanes];
  size_t row_bytes_[cc::kNumYUVPlanes];
  SkColorType color_type_;
  bool has_complete_scan_ = false;

  DISALLOW_COPY_AND_ASSIGN(ImagePlanes);
};

class PLATFORM_EXPORT ColorProfile final {
  USING_FAST_MALLOC(ColorProfile);

 public:
  ColorProfile(const skcms_ICCProfile&, std::unique_ptr<uint8_t[]> = nullptr);
  static std::unique_ptr<ColorProfile> Create(const void* buffer, size_t size);

  const skcms_ICCProfile* GetProfile() const { return &profile_; }

 private:
  skcms_ICCProfile profile_;
  std::unique_ptr<uint8_t[]> buffer_;

  DISALLOW_COPY_AND_ASSIGN(ColorProfile);
};

class PLATFORM_EXPORT ColorProfileTransform final {
  USING_FAST_MALLOC(ColorProfileTransform);

 public:
  ColorProfileTransform(const skcms_ICCProfile* src_profile,
                        const skcms_ICCProfile* dst_profile);

  const skcms_ICCProfile* SrcProfile() const;
  const skcms_ICCProfile* DstProfile() const;

 private:
  const skcms_ICCProfile* src_profile_;
  skcms_ICCProfile dst_profile_;

  DISALLOW_COPY_AND_ASSIGN(ColorProfileTransform);
};

// ImageDecoder is a base for all format-specific decoders
// (e.g. JPEGImageDecoder). This base manages the ImageFrame cache.
//
class PLATFORM_EXPORT ImageDecoder {
  USING_FAST_MALLOC(ImageDecoder);

 public:
  static const size_t kNoDecodedImageByteLimit;

  enum AlphaOption { kAlphaPremultiplied, kAlphaNotPremultiplied };
  enum HighBitDepthDecodingOption {
    // Decode everything to uint8 pixel format (kN32 channel order).
    kDefaultBitDepth,
    // Decode high bit depth images to half float pixel format.
    kHighBitDepthToHalfFloat
  };

  // The first three values are as defined in webp/decode.h, the last value
  // specifies WebP animation formats.
  enum CompressionFormat {
    kUndefinedFormat = 0,
    kLossyFormat = 1,
    kLosslessFormat = 2,
    kWebPAnimationFormat = 3,
    kMaxValue = kWebPAnimationFormat,
  };

  // For images which contain both animations and still images, indicates which
  // is preferred. When unspecified the decoder will use hints from the data
  // stream to make a decision.
  //
  // Note: |animation_option| is unused by formats like GIF or APNG since they
  // do not have distinct still and animated tracks. I.e., there is either only
  // an animation or only a still image. If a caller only wants a still image
  // from a GIF or APNG animation, they can choose to only decode the first
  // frame. With a format like AVIF where there are distinct still and animation
  // tracks, callers need a mechanism to choose.
  enum class AnimationOption {
    kUnspecified,
    kPreferAnimation,
    kPreferStillImage,
  };

  virtual ~ImageDecoder() = default;

  // Returns a caller-owned decoder of the appropriate type.  Returns nullptr if
  // we can't sniff a supported type from the provided data (possibly
  // because there isn't enough data yet).
  // Sets |max_decoded_bytes_| to Platform::MaxImageDecodedBytes().
  static std::unique_ptr<ImageDecoder> Create(
      scoped_refptr<SegmentReader> data,
      bool data_complete,
      AlphaOption,
      HighBitDepthDecodingOption,
      const ColorBehavior&,
      const SkISize& desired_size = SkISize::MakeEmpty(),
      AnimationOption animation_option = AnimationOption::kUnspecified);
  static std::unique_ptr<ImageDecoder> Create(
      scoped_refptr<SharedBuffer> data,
      bool data_complete,
      AlphaOption alpha_option,
      HighBitDepthDecodingOption high_bit_depth_decoding_option,
      const ColorBehavior& color_behavior,
      const SkISize& desired_size = SkISize::MakeEmpty(),
      AnimationOption animation_option = AnimationOption::kUnspecified) {
    return Create(SegmentReader::CreateFromSharedBuffer(std::move(data)),
                  data_complete, alpha_option, high_bit_depth_decoding_option,
                  color_behavior, desired_size, animation_option);
  }

  // Similar to above, but does not allow mime sniffing. Creates explicitly
  // based on the |mime_type| value.
  static std::unique_ptr<ImageDecoder> CreateByMimeType(
      String mime_type,
      scoped_refptr<SegmentReader> data,
      bool data_complete,
      AlphaOption alpha_option,
      HighBitDepthDecodingOption high_bit_depth_decoding_option,
      const ColorBehavior& color_behavior,
      const SkISize& desired_size = SkISize::MakeEmpty(),
      AnimationOption animation_option = AnimationOption::kUnspecified);

  virtual String FilenameExtension() const = 0;

  bool IsAllDataReceived() const { return is_all_data_received_; }

  // Returns true if the decoder supports decoding to high bit depth. The
  // decoded output will be high bit depth (half float backed bitmap) iff
  // encoded image is high bit depth and high_bit_depth_decoding_option_ is set
  // to kHighBitDepthToHalfFloat.
  virtual bool ImageIsHighBitDepth() { return false; }

  // Returns true if the buffer holds enough data to instantiate a decoder.
  // This is useful for callers to determine whether a decoder instantiation
  // failure is due to insufficient or bad data.
  static bool HasSufficientDataToSniffMimeType(const SharedBuffer&);

  // Looks at the image data to determine and return the image MIME type.
  static String SniffMimeType(scoped_refptr<SharedBuffer> image_data);

  // Returns the image data's compression format.
  static CompressionFormat GetCompressionFormat(
      scoped_refptr<SharedBuffer> image_data,
      String mime_type);

  void SetData(scoped_refptr<SegmentReader> data, bool all_data_received) {
    if (failed_)
      return;
    data_ = std::move(data);
    is_all_data_received_ = all_data_received;
    OnSetData(data_.get());
  }

  void SetData(scoped_refptr<SharedBuffer> data, bool all_data_received) {
    SetData(SegmentReader::CreateFromSharedBuffer(std::move(data)),
            all_data_received);
  }

  virtual void OnSetData(SegmentReader* data) {}

  bool IsSizeAvailable();

  bool IsDecodedSizeAvailable() const { return !failed_ && size_available_; }

  virtual IntSize Size() const { return size_; }
  virtual Vector<SkISize> GetSupportedDecodeSizes() const { return {}; }

  // Decoders which downsample images should override this method to
  // return the actual decoded size.
  virtual IntSize DecodedSize() const { return Size(); }

  // The YUV subsampling of the image.
  virtual cc::YUVSubsampling GetYUVSubsampling() const {
    return cc::YUVSubsampling::kUnknown;
  }

  // Image decoders that support YUV decoding must override this to
  // provide the size of each component.
  virtual IntSize DecodedYUVSize(cc::YUVIndex) const {
    NOTREACHED();
    return IntSize();
  }

  // Image decoders that support YUV decoding must override this to
  // return the width of each row of the memory allocation.
  virtual size_t DecodedYUVWidthBytes(cc::YUVIndex) const {
    NOTREACHED();
    return 0;
  }

  // Image decoders that support YUV decoding must override this to
  // return the SkYUVColorSpace that is used to convert from YUV
  // to RGB.
  virtual SkYUVColorSpace GetYUVColorSpace() const {
    NOTREACHED();
    return SkYUVColorSpace::kIdentity_SkYUVColorSpace;
  }

  // Image decoders that support high bit depth YUV decoding can override this.
  //
  // Note: If an implementation advertises a bit depth > 8 it must support both
  // kA16_unorm_SkColorType and kA16_float_SkColorType ImagePlanes.
  virtual uint8_t GetYUVBitDepth() const { return 8; }

  // Returns the information required to decide whether or not hardware
  // acceleration can be used to decode this image. Callers of this function
  // must ensure the header was successfully parsed prior to calling this
  // method, i.e., IsDecodedSizeAvailable() must return true.
  virtual cc::ImageHeaderMetadata MakeMetadataForDecodeAcceleration() const;

  // This will only differ from Size() for ICO (where each frame is a
  // different icon) or other formats where different frames are different
  // sizes. This does NOT differ from Size() for GIF or WebP, since
  // decoding GIF or WebP composites any smaller frames against previous
  // frames to create full-size frames.
  virtual IntSize FrameSizeAtIndex(size_t) const { return Size(); }

  // Returns whether the size is legal (i.e. not going to result in
  // overflow elsewhere).  If not, marks decoding as failed.
  virtual bool SetSize(unsigned width, unsigned height) {
    unsigned decoded_bytes_per_pixel = 4;
    if (ImageIsHighBitDepth() &&
        high_bit_depth_decoding_option_ == kHighBitDepthToHalfFloat)
      decoded_bytes_per_pixel = 8;
    if (SizeCalculationMayOverflow(width, height, decoded_bytes_per_pixel))
      return SetFailed();

    size_ = IntSize(width, height);
    size_available_ = true;
    return true;
  }

  // Calls DecodeFrameCount() to get the current frame count (if possible),
  // without decoding the individual frames.  Resizes |frame_buffer_cache_| to
  // the new size and returns that size.
  //
  // Note: FrameCount() returns the return value of DecodeFrameCount(). For more
  // information on the return value, see the comment for DecodeFrameCount().
  size_t FrameCount();

  virtual int RepetitionCount() const { return kAnimationNone; }

  // Decodes as much of the requested frame as possible, and returns an
  // ImageDecoder-owned pointer.
  ImageFrame* DecodeFrameBufferAtIndex(size_t);

  // Whether the requested frame has alpha.
  virtual bool FrameHasAlphaAtIndex(size_t) const;

  // Whether or not the frame is fully received.
  virtual bool FrameIsReceivedAtIndex(size_t) const;

  // Returns true if a cached complete decode is available.
  bool FrameIsDecodedAtIndex(size_t) const;

  // Duration for displaying a frame. This method is only used by animated
  // images.
  virtual base::TimeDelta FrameDurationAtIndex(size_t) const {
    return base::TimeDelta();
  }

  // Number of bytes in the decoded frame. Returns 0 if the decoder doesn't
  // have this frame cached (either because it hasn't been decoded, or because
  // it has been cleared).
  virtual size_t FrameBytesAtIndex(size_t) const;

  ImageOrientation Orientation() const { return orientation_; }
  IntSize DensityCorrectedSize() const { return density_corrected_size_; }

  bool IgnoresColorSpace() const { return color_behavior_.IsIgnore(); }
  const ColorBehavior& GetColorBehavior() const { return color_behavior_; }

  // This returns the color space that will be included in the SkImageInfo of
  // SkImages created from this decoder. This will be nullptr unless the
  // decoder was created with the option ColorSpaceTagged.
  sk_sp<SkColorSpace> ColorSpaceForSkImages();

  // This returns whether or not the image included a not-ignored embedded
  // color profile. This is independent of whether or not that profile's
  // transform has been baked into the pixel values.
  bool HasEmbeddedColorProfile() const { return embedded_color_profile_.get(); }

  void SetEmbeddedColorProfile(std::unique_ptr<ColorProfile> profile);

  // Transformation from embedded color space to target color space.
  ColorProfileTransform* ColorTransform();

  AlphaOption GetAlphaOption() const {
    return premultiply_alpha_ ? kAlphaPremultiplied : kAlphaNotPremultiplied;
  }

  size_t GetMaxDecodedBytes() const { return max_decoded_bytes_; }

  // Sets the "decode failure" flag.  For caller convenience (since so
  // many callers want to return false after calling this), returns false
  // to enable easy tailcalling.  Subclasses may override this to also
  // clean up any local data.
  virtual bool SetFailed() {
    failed_ = true;
    return false;
  }

  bool Failed() const { return failed_; }

  // Clears decoded pixel data from all frames except the provided frame. If
  // subsequent frames depend on this frame's required previous frame, then that
  // frame is also kept in cache to prevent re-decoding from the beginning.
  // Callers may pass WTF::kNotFound to clear all frames.
  // Note: If |frame_buffer_cache_| contains only one frame, it won't be
  // cleared. Returns the number of bytes of frame data actually cleared.
  virtual size_t ClearCacheExceptFrame(size_t);

  // If the image has a cursor hot-spot, stores it in the argument
  // and returns true. Otherwise returns false.
  virtual bool HotSpot(IntPoint&) const { return false; }

  virtual void SetMemoryAllocator(SkBitmap::Allocator* allocator) {
    // This currently doesn't work for images with multiple frames.
    // Some animated image formats require extra guarantees:
    // 1. The memory is cheaply readable, which isn't true for GPU memory, and
    // 2. The memory's lifetime will persist long enough to allow reading past
    //   frames, which isn't true for discardable memory.
    // Not all animated image formats share these requirements. Blocking
    // all animated formats is overly aggressive. If a need arises for an
    // external memory allocator for animated images, this should be changed.
    if (frame_buffer_cache_.IsEmpty()) {
      // Ensure that InitializeNewFrame is called, after parsing if
      // necessary.
      if (!FrameCount())
        return;
    }

    frame_buffer_cache_[0].SetMemoryAllocator(allocator);
  }

  bool CanDecodeToYUV() const { return allow_decode_to_yuv_; }
  // Should only be called if CanDecodeToYuv() returns true, in which case
  // the subclass of ImageDecoder must override this method.
  virtual void DecodeToYUV() { NOTREACHED(); }
  void SetImagePlanes(std::unique_ptr<ImagePlanes> image_planes) {
    image_planes_ = std::move(image_planes);
  }
  bool HasDisplayableYUVData() const {
    return image_planes_ && image_planes_->HasCompleteScan();
  }

  // Indicates if the data contains both an animation and still image.
  virtual bool ImageHasBothStillAndAnimatedSubImages() const { return false; }

 protected:
  ImageDecoder(AlphaOption alpha_option,
               HighBitDepthDecodingOption high_bit_depth_decoding_option,
               const ColorBehavior& color_behavior,
               size_t max_decoded_bytes)
      : premultiply_alpha_(alpha_option == kAlphaPremultiplied),
        high_bit_depth_decoding_option_(high_bit_depth_decoding_option),
        color_behavior_(color_behavior),
        max_decoded_bytes_(max_decoded_bytes),
        allow_decode_to_yuv_(false),
        purge_aggressively_(false) {}

  // Calculates the most recent frame whose image data may be needed in
  // order to decode frame |frame_index|, based on frame disposal methods
  // and |frame_rect_is_opaque|, where |frame_rect_is_opaque| signifies whether
  // the rectangle of frame at |frame_index| is known to be opaque.
  // If no previous frame's data is required, returns WTF::kNotFound.
  //
  // This function requires that the previous frame's
  // |required_previous_frame_index_| member has been set correctly. The
  // easiest way to ensure this is for subclasses to call this method and
  // store the result on the frame via SetRequiredPreviousFrameIndex()
  // as soon as the frame has been created and parsed sufficiently to
  // determine the disposal method; assuming this happens for all frames
  // in order, the required invariant will hold.
  //
  // Image formats which do not use more than one frame do not need to
  // worry about this; see comments on
  // ImageFrame::required_previous_frame_index_.
  size_t FindRequiredPreviousFrame(size_t frame_index,
                                   bool frame_rect_is_opaque);

  // This is called by ClearCacheExceptFrame() if that method decides it wants
  // to preserve another frame, to avoid unnecessary redecoding.
  size_t ClearCacheExceptTwoFrames(size_t, size_t);
  virtual void ClearFrameBuffer(size_t frame_index);

  // Decodes the image sufficiently to determine the image size.
  virtual void DecodeSize() = 0;

  // Decodes the image sufficiently to determine the number of frames and
  // returns that number.
  //
  // If an image format supports images with multiple frames, the decoder must
  // override this method. FrameCount() calls this method and resizes
  // |frame_buffer_cache_| to the return value of this method. Therefore, on
  // failure this method should return |frame_buffer_cache_.size()| (the
  // existing number of frames) instead of 0 to leave |frame_buffer_cache_|
  // unchanged.
  //
  // This method may return an increasing frame count as frames are received and
  // parsed. Alternatively, if the total frame count is available in the image
  // header, this method may return the total frame count without checking how
  // many frames are received.
  virtual size_t DecodeFrameCount() { return 1; }

  // Called to initialize the frame buffer with the given index, based on the
  // provided and previous frame's characteristics. Returns true on success.
  // Before calling this method, the caller must verify that the frame exists.
  // On failure, the client should call SetFailed. This method does not call
  // SetFailed itself because that might delete the object directly making this
  // call.
  bool InitFrameBuffer(size_t);

  // Performs any decoder-specific setup of the requested frame after it has
  // been newly created, e.g. setting the frame's duration or disposal method.
  virtual void InitializeNewFrame(size_t) {}

  // Decodes the requested frame.
  virtual void Decode(size_t) = 0;

  // This method is only required for animated images. It returns a vector with
  // all frame indices that need to be decoded in order to succesfully decode
  // the provided frame.  The indices are returned in reverse order, so the
  // last frame needs to be decoded first.  Before calling this method, the
  // caller must verify that the frame exists.
  Vector<size_t> FindFramesToDecode(size_t) const;

  // This is called by Decode() after decoding a frame in an animated image.
  // Before calling this method, the caller must verify that the frame exists.
  // @return true  if the frame was fully decoded,
  //         false otherwise.
  bool PostDecodeProcessing(size_t);

  // The GIF and PNG decoders set the default alpha setting of the ImageFrame to
  // true. When the frame rect does not contain any (semi-) transparent pixels,
  // this may need to be changed to false. This depends on whether the required
  // previous frame adds transparency to the image, outside of the frame rect.
  // This methods corrects the alpha setting of the frame buffer to false when
  // the whole frame is opaque.
  //
  // This method should be called by the GIF and PNG decoder when the pixels in
  // the frame rect do *not* contain any transparent pixels. Before calling
  // this method, the caller must verify that the frame exists.
  void CorrectAlphaWhenFrameBufferSawNoAlpha(size_t);

  scoped_refptr<SegmentReader> data_;  // The encoded data.
  Vector<ImageFrame, 1> frame_buffer_cache_;
  const bool premultiply_alpha_;
  const HighBitDepthDecodingOption high_bit_depth_decoding_option_;
  const ColorBehavior color_behavior_;
  ImageOrientation orientation_;
  IntSize density_corrected_size_;

  // The maximum amount of memory a decoded image should require. Ideally,
  // image decoders should downsample large images to fit under this limit
  // (and then return the downsampled size from DecodedSize()). Ignoring
  // this limit can cause excessive memory use or even crashes on low-
  // memory devices.
  const size_t max_decoded_bytes_;

  // While decoding, we may learn that there are so many animation frames that
  // we would go beyond our cache budget.
  // If that happens, purge_aggressively_ is set to true. This signals
  // future decodes to purge old frames as it goes.
  void UpdateAggressivePurging(size_t index);

  // The method is only relevant for multi-frame images.
  //
  // This method indicates whether the provided frame has enough data to decode
  // successive frames that depend on it. It is used by ClearCacheExceptFrame
  // to determine which frame to keep in cache when the indicated frame is not
  // yet sufficiently decoded.
  //
  // The default condition is that the frame status needs to be FramePartial or
  // FrameComplete, since the data of previous frames is copied in
  // InitFrameBuffer() before setting the status to FramePartial. For WebP,
  // however, the status needs to be FrameComplete since the complete buffer is
  // used to do alpha blending in WEBPImageDecoder::ApplyPostProcessing().
  //
  // Before calling this, verify that frame |index| exists by checking that
  // |index| is smaller than |frame_buffer_cache_|.size().
  virtual bool FrameStatusSufficientForSuccessors(wtf_size_t index) {
    DCHECK(index < frame_buffer_cache_.size());
    ImageFrame::Status frame_status = frame_buffer_cache_[index].GetStatus();
    return frame_status == ImageFrame::kFramePartial ||
           frame_status == ImageFrame::kFrameComplete;
  }

  // Note that |allow_decode_to_yuv_| being true merely means that the
  // ImageDecoder supports decoding to YUV. Other layers higher in the
  // stack (the PaintImageGenerator, ImageFrameGenerator, or cache) may
  // decline to go down the YUV path.
  bool allow_decode_to_yuv_;
  std::unique_ptr<ImagePlanes> image_planes_;

 private:
  // Some code paths compute the size of the image as "width * height * 4 or 8"
  // and return it as a (signed) int.  Avoid overflow.
  inline bool SizeCalculationMayOverflow(unsigned width,
                                         unsigned height,
                                         unsigned decoded_bytes_per_pixel) {
    base::CheckedNumeric<int32_t> total_size = width;
    total_size *= height;
    total_size *= decoded_bytes_per_pixel;
    return !total_size.IsValid();
  }

  bool purge_aggressively_;

  // This methods gets called at the end of InitFrameBuffer. Subclasses can do
  // format specific initialization, for e.g. alpha settings, here.
  virtual void OnInitFrameBuffer(size_t) {}

  // Called by InitFrameBuffer to determine if it can take the bitmap of the
  // previous frame. This condition is different for GIF and WEBP.
  virtual bool CanReusePreviousFrameBuffer(size_t) const { return false; }

  IntSize size_;
  bool size_available_ = false;
  bool is_all_data_received_ = false;
  bool failed_ = false;

  std::unique_ptr<ColorProfile> embedded_color_profile_;
  sk_sp<SkColorSpace> color_space_for_sk_images_;

  bool source_to_target_color_transform_needs_update_ = false;
  std::unique_ptr<ColorProfileTransform> source_to_target_color_transform_;

  DISALLOW_COPY_AND_ASSIGN(ImageDecoder);
};

}  // namespace blink

#endif