Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 17 additions & 1 deletion tests/pipelines/dance_diffusion/test_dance_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps

from ...test_pipelines_common import PipelineTesterMixin

Expand Down Expand Up @@ -87,6 +87,22 @@ def test_dance_diffusion(self):
expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000])
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2

@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()

@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()

@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()


@slow
@require_torch_gpu
Expand Down
24 changes: 23 additions & 1 deletion tests/pipelines/repaint/test_repaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import torch

from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, torch_device
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device

from ...test_pipelines_common import PipelineTesterMixin

Expand Down Expand Up @@ -84,6 +84,28 @@ def test_repaint(self):

assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3

@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()

# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()

@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()

@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()


@nightly
@require_torch_gpu
Expand Down
22 changes: 21 additions & 1 deletion tests/pipelines/stable_diffusion/test_cycle_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps

from ...test_pipelines_common import PipelineTesterMixin

Expand Down Expand Up @@ -149,6 +149,26 @@ def test_stable_diffusion_cycle_fp16(self):

assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()

@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()

@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()

@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()


@slow
@require_torch_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
UNet2DConditionModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps

from ...test_pipelines_common import PipelineTesterMixin

Expand Down Expand Up @@ -213,6 +213,22 @@ def test_stable_diffusion_img2img_num_images_per_prompt(self):

assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3)

@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()

@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()

@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()

@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()


@slow
@require_torch_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,11 @@ def test_stable_diffusion_pix2pix_zero_num_images_per_prompt(self):

assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3)

# Non-determinism caused by the scheduler optimizing the latent inputs during inference
@unittest.skip("non-deterministic pipeline")
def test_inference_batch_single_identical(self):
return super().test_inference_batch_single_identical()


@slow
@require_torch_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,10 @@ def test_stable_diffusion_depth2img_pil(self):

assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3

@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass()


@slow
@require_torch_gpu
Expand Down
58 changes: 1 addition & 57 deletions tests/test_pipelines_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,7 @@
import torch

import diffusers
from diffusers import (
CycleDiffusionPipeline,
DanceDiffusionPipeline,
DiffusionPipeline,
RePaintPipeline,
StableDiffusionDepth2ImgPipeline,
StableDiffusionImg2ImgPipeline,
)
from diffusers import DiffusionPipeline
from diffusers.utils import logging
from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available
from diffusers.utils.testing_utils import require_torch, torch_device
Expand Down Expand Up @@ -83,15 +76,6 @@ def tearDown(self):
torch.cuda.empty_cache()

def test_save_load_local(self):
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return

components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
Expand Down Expand Up @@ -199,18 +183,6 @@ def test_inference_batch_single_identical(self):
def _test_inference_batch_single_identical(
self, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False
):
if self.pipeline_class.__name__ in [
"CycleDiffusionPipeline",
"RePaintPipeline",
"StableDiffusionPix2PixZeroPipeline",
]:
# RePaint can hardly be made deterministic since the scheduler is currently always
# nondeterministic
# CycleDiffusion is also slightly nondeterministic
# There's a training loop inside Pix2PixZero and is guided by edit directions. This is
# why the slight non-determinism.
return

if test_max_difference is None:
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
# make sure that batched and non-batched is identical
Expand Down Expand Up @@ -283,15 +255,6 @@ def _test_inference_batch_single_identical(
assert_mean_pixel_difference(output_batch[0][0], output[0][0])

def test_dict_tuple_outputs_equivalent(self):
if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return

components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
Expand Down Expand Up @@ -370,15 +333,6 @@ def test_save_load_optional_components(self):
if not hasattr(self.pipeline_class, "_optional_components"):
return

if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return

components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
Expand Down Expand Up @@ -440,16 +394,6 @@ def _test_attention_slicing_forward_pass(self, test_max_difference=True):
if not self.test_attention_slicing:
return

if torch_device == "mps" and self.pipeline_class in (
DanceDiffusionPipeline,
CycleDiffusionPipeline,
RePaintPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionDepth2ImgPipeline,
):
# FIXME: inconsistent outputs on MPS
return

components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(torch_device)
Expand Down