-
Notifications
You must be signed in to change notification settings - Fork 67
Decoder-native resize public implementation #1003
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
dd24dfa
3a2df84
5344ab4
98cf81b
65c4ad7
f300c70
2c3b7f0
80e84b5
5ac60d8
531b40f
cc333ac
238a8ff
55d362c
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -8,7 +8,7 @@ | |
| import json | ||
| import numbers | ||
| from pathlib import Path | ||
| from typing import Literal, Optional, Tuple, Union | ||
| from typing import Literal, Optional, Sequence, Tuple, Union | ||
|
|
||
| import torch | ||
| from torch import device as torch_device, Tensor | ||
|
|
@@ -19,6 +19,7 @@ | |
| create_decoder, | ||
| ERROR_REPORTING_INSTRUCTIONS, | ||
| ) | ||
| from torchcodec.transforms import DecoderNativeTransform, Resize | ||
|
|
||
|
|
||
| class VideoDecoder: | ||
|
|
@@ -103,6 +104,7 @@ def __init__( | |
| dimension_order: Literal["NCHW", "NHWC"] = "NCHW", | ||
| num_ffmpeg_threads: int = 1, | ||
| device: Optional[Union[str, torch_device]] = "cpu", | ||
| transforms: Optional[Sequence[DecoderNativeTransform]] = None, | ||
| seek_mode: Literal["exact", "approximate"] = "exact", | ||
| custom_frame_mappings: Optional[ | ||
| Union[str, bytes, io.RawIOBase, io.BufferedReader] | ||
|
|
@@ -148,13 +150,16 @@ def __init__( | |
|
|
||
| device_variant = _get_cuda_backend() | ||
|
|
||
| transform_specs = _make_transform_specs(transforms) | ||
|
|
||
| core.add_video_stream( | ||
| self._decoder, | ||
| stream_index=stream_index, | ||
| dimension_order=dimension_order, | ||
| num_threads=num_ffmpeg_threads, | ||
| device=device, | ||
| device_variant=device_variant, | ||
| transform_specs=transform_specs, | ||
| custom_frame_mappings=custom_frame_mappings_data, | ||
| ) | ||
|
|
||
|
|
@@ -432,6 +437,60 @@ def _get_and_validate_stream_metadata( | |
| ) | ||
|
|
||
|
|
||
| # This function, _make_transform_specs, and the transforms argument to | ||
| # VideoDecoder actually accept a union of DecoderNativeTransform and | ||
| # TorchVision transforms. We don't put that in our type annotation because | ||
| # that would require importing torchvision at module scope which would mean we | ||
| # have a hard dependency on torchvision. | ||
| # TODO: better explanation of the above. | ||
| def _convert_to_decoder_native_transforms( | ||
| transforms: Sequence[DecoderNativeTransform], | ||
| ) -> Sequence[DecoderNativeTransform]: | ||
| try: | ||
| from torchvision.transforms import v2 | ||
|
|
||
| tv_available = True | ||
| except ImportError: | ||
| tv_available = False | ||
|
|
||
| converted_transforms = [] | ||
| for transform in transforms: | ||
| if not isinstance(transform, DecoderNativeTransform): | ||
| if not tv_available: | ||
| raise ValueError( | ||
| f"The supplied transform, {transform}, is not a TorchCodec " | ||
| " DecoderNativeTransform. TorchCodec also accept TorchVision " | ||
| "v2 transforms, but TorchVision is not installed." | ||
| ) | ||
| if isinstance(transform, v2.Resize): | ||
| if len(transform.size) != 2: | ||
| raise ValueError( | ||
| "TorchVision Resize transform must have a (height, width) " | ||
| f"pair for the size, got {transform.size}." | ||
| ) | ||
| converted_transforms.append(Resize(size=transform.size)) | ||
| else: | ||
| raise ValueError( | ||
| f"Unsupported transform: {transform}. Transforms must be " | ||
| "either a TorchCodec DecoderNativeTransform or a TorchVision " | ||
| "v2 transform." | ||
| ) | ||
| else: | ||
| converted_transforms.append(transform) | ||
|
|
||
| return converted_transforms | ||
|
|
||
|
|
||
| def _make_transform_specs( | ||
| transforms: Optional[Sequence[DecoderNativeTransform]], | ||
| ) -> str: | ||
| if transforms is None: | ||
| return "" | ||
|
|
||
| transforms = _convert_to_decoder_native_transforms(transforms) | ||
| return ";".join([t.make_params() for t in transforms]) | ||
|
|
||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Discussion point 2: This is what we'll have to do with TorchVision transforms at the moment. We'll need special handling for each transform, looking into its internals to get what we need and enforce decoder-native limitations. In the future, we can change TorchVision transforms to have an API so that we can get what we need in a generic way. But for now, we'll need to do something like this.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm still undecided on whether we should accept TV transforms or not (ironic, I know), but I think this is totally OK. And I think we'll need that level of coupling anyway, even if we were to write our own TC transforms. Echoing what you wrote:
Basically, that coupling between TC and TV will have to exist either in the code (as in this PR), or in our heads as API designers. Side note, slightly related: if we're going to have our own TC transforms, I think we'll want their API to exactly match (or be a strict subset of) the TV transforms. E.g. we'd have
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @NicolasHug, I came to same conclusion as:
At which point, I don't think we've really gained anything by having them separate. And users will probably also start asking, hey, can you just accept the TorchVision ones? I also just realized a new counter-point, which I'll put up in the summary as counter point 3. |
||
|
|
||
| def _read_custom_frame_mappings( | ||
| custom_frame_mappings: Union[str, bytes, io.RawIOBase, io.BufferedReader] | ||
| ) -> tuple[Tensor, Tensor, Tensor]: | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,7 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from ._decoder_native_transforms import DecoderNativeTransform, Resize # noqa |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,39 @@ | ||
| # Copyright (c) Meta Platforms, Inc. and affiliates. | ||
| # All rights reserved. | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| from abc import ABC, abstractmethod | ||
| from dataclasses import dataclass | ||
| from typing import Sequence | ||
|
|
||
|
|
||
| @dataclass | ||
| class DecoderNativeTransform(ABC): | ||
| """TODO: docstring""" | ||
|
|
||
| @abstractmethod | ||
| def make_params(self) -> str: | ||
| pass | ||
|
|
||
|
|
||
| @dataclass | ||
| class Resize(DecoderNativeTransform): | ||
| """ | ||
| TODO. One benefit of having parallel definitions is that it gives us a place | ||
| to put documentation about what behavior we do and do not support. For | ||
| example, we don't yet have fields for `interpolation` and `antialias` | ||
| because we don't allow users to control those yet in decoder-native | ||
| transforms. | ||
| """ | ||
|
|
||
| # Also note that this type is more restrictive than what TorchVision | ||
| # accepts, but it accurately reflects current decoder-native transform | ||
| # limitations. We can reflect that not just in our docs, but also type | ||
| # annotations. | ||
| size: Sequence[int] | ||
|
|
||
| def make_params(self) -> str: | ||
| assert len(self.size) == 2 | ||
| return f"resize, {self.size[0]}, {self.size[1]}" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I was getting linting errors like: https://github.com/meta-pytorch/torchcodec/actions/runs/19157614790/job/54761644331
Which points to docs which recommend the above change: https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports