Skip to content

Commit 42beaf1

Browse files
move pipeline based test skips out of pipeline mixin (#2486)
1 parent 824cb53 commit 42beaf1

File tree

7 files changed

+88
-61
lines changed

7 files changed

+88
-61
lines changed

tests/pipelines/dance_diffusion/test_dance_diffusion.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121

2222
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel
2323
from diffusers.utils import slow, torch_device
24-
from diffusers.utils.testing_utils import require_torch_gpu
24+
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
2525

2626
from ...test_pipelines_common import PipelineTesterMixin
2727

@@ -87,6 +87,22 @@ def test_dance_diffusion(self):
8787
expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000])
8888
assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
8989

90+
@skip_mps
91+
def test_save_load_local(self):
92+
return super().test_save_load_local()
93+
94+
@skip_mps
95+
def test_dict_tuple_outputs_equivalent(self):
96+
return super().test_dict_tuple_outputs_equivalent()
97+
98+
@skip_mps
99+
def test_save_load_optional_components(self):
100+
return super().test_save_load_optional_components()
101+
102+
@skip_mps
103+
def test_attention_slicing_forward_pass(self):
104+
return super().test_attention_slicing_forward_pass()
105+
90106

91107
@slow
92108
@require_torch_gpu

tests/pipelines/repaint/test_repaint.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
import torch
2121

2222
from diffusers import RePaintPipeline, RePaintScheduler, UNet2DModel
23-
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, torch_device
23+
from diffusers.utils.testing_utils import load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device
2424

2525
from ...test_pipelines_common import PipelineTesterMixin
2626

@@ -84,6 +84,28 @@ def test_repaint(self):
8484

8585
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
8686

87+
@skip_mps
88+
def test_save_load_local(self):
89+
return super().test_save_load_local()
90+
91+
# RePaint can hardly be made deterministic since the scheduler is currently always
92+
# nondeterministic
93+
@unittest.skip("non-deterministic pipeline")
94+
def test_inference_batch_single_identical(self):
95+
return super().test_inference_batch_single_identical()
96+
97+
@skip_mps
98+
def test_dict_tuple_outputs_equivalent(self):
99+
return super().test_dict_tuple_outputs_equivalent()
100+
101+
@skip_mps
102+
def test_save_load_optional_components(self):
103+
return super().test_save_load_optional_components()
104+
105+
@skip_mps
106+
def test_attention_slicing_forward_pass(self):
107+
return super().test_attention_slicing_forward_pass()
108+
87109

88110
@nightly
89111
@require_torch_gpu

tests/pipelines/stable_diffusion/test_cycle_diffusion.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
2525
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
26-
from diffusers.utils.testing_utils import require_torch_gpu
26+
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
2727

2828
from ...test_pipelines_common import PipelineTesterMixin
2929

@@ -149,6 +149,26 @@ def test_stable_diffusion_cycle_fp16(self):
149149

150150
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
151151

152+
@skip_mps
153+
def test_save_load_local(self):
154+
return super().test_save_load_local()
155+
156+
@unittest.skip("non-deterministic pipeline")
157+
def test_inference_batch_single_identical(self):
158+
return super().test_inference_batch_single_identical()
159+
160+
@skip_mps
161+
def test_dict_tuple_outputs_equivalent(self):
162+
return super().test_dict_tuple_outputs_equivalent()
163+
164+
@skip_mps
165+
def test_save_load_optional_components(self):
166+
return super().test_save_load_optional_components()
167+
168+
@skip_mps
169+
def test_attention_slicing_forward_pass(self):
170+
return super().test_attention_slicing_forward_pass()
171+
152172

153173
@slow
154174
@require_torch_gpu

tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
UNet2DConditionModel,
3232
)
3333
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
34-
from diffusers.utils.testing_utils import require_torch_gpu
34+
from diffusers.utils.testing_utils import require_torch_gpu, skip_mps
3535

3636
from ...test_pipelines_common import PipelineTesterMixin
3737

@@ -213,6 +213,22 @@ def test_stable_diffusion_img2img_num_images_per_prompt(self):
213213

214214
assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3)
215215

216+
@skip_mps
217+
def test_save_load_local(self):
218+
return super().test_save_load_local()
219+
220+
@skip_mps
221+
def test_dict_tuple_outputs_equivalent(self):
222+
return super().test_dict_tuple_outputs_equivalent()
223+
224+
@skip_mps
225+
def test_save_load_optional_components(self):
226+
return super().test_save_load_optional_components()
227+
228+
@skip_mps
229+
def test_attention_slicing_forward_pass(self):
230+
return super().test_attention_slicing_forward_pass()
231+
216232

217233
@slow
218234
@require_torch_gpu

tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -223,6 +223,11 @@ def test_stable_diffusion_pix2pix_zero_num_images_per_prompt(self):
223223

224224
assert images.shape == (batch_size * num_images_per_prompt, 64, 64, 3)
225225

226+
# Non-determinism caused by the scheduler optimizing the latent inputs during inference
227+
@unittest.skip("non-deterministic pipeline")
228+
def test_inference_batch_single_identical(self):
229+
return super().test_inference_batch_single_identical()
230+
226231

227232
@slow
228233
@require_torch_gpu

tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -382,6 +382,10 @@ def test_stable_diffusion_depth2img_pil(self):
382382

383383
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
384384

385+
@skip_mps
386+
def test_attention_slicing_forward_pass(self):
387+
return super().test_attention_slicing_forward_pass()
388+
385389

386390
@slow
387391
@require_torch_gpu

tests/test_pipelines_common.py

Lines changed: 1 addition & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,7 @@
1111
import torch
1212

1313
import diffusers
14-
from diffusers import (
15-
CycleDiffusionPipeline,
16-
DanceDiffusionPipeline,
17-
DiffusionPipeline,
18-
RePaintPipeline,
19-
StableDiffusionDepth2ImgPipeline,
20-
StableDiffusionImg2ImgPipeline,
21-
)
14+
from diffusers import DiffusionPipeline
2215
from diffusers.utils import logging
2316
from diffusers.utils.import_utils import is_accelerate_available, is_xformers_available
2417
from diffusers.utils.testing_utils import require_torch, torch_device
@@ -83,15 +76,6 @@ def tearDown(self):
8376
torch.cuda.empty_cache()
8477

8578
def test_save_load_local(self):
86-
if torch_device == "mps" and self.pipeline_class in (
87-
DanceDiffusionPipeline,
88-
CycleDiffusionPipeline,
89-
RePaintPipeline,
90-
StableDiffusionImg2ImgPipeline,
91-
):
92-
# FIXME: inconsistent outputs on MPS
93-
return
94-
9579
components = self.get_dummy_components()
9680
pipe = self.pipeline_class(**components)
9781
pipe.to(torch_device)
@@ -199,18 +183,6 @@ def test_inference_batch_single_identical(self):
199183
def _test_inference_batch_single_identical(
200184
self, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False
201185
):
202-
if self.pipeline_class.__name__ in [
203-
"CycleDiffusionPipeline",
204-
"RePaintPipeline",
205-
"StableDiffusionPix2PixZeroPipeline",
206-
]:
207-
# RePaint can hardly be made deterministic since the scheduler is currently always
208-
# nondeterministic
209-
# CycleDiffusion is also slightly nondeterministic
210-
# There's a training loop inside Pix2PixZero and is guided by edit directions. This is
211-
# why the slight non-determinism.
212-
return
213-
214186
if test_max_difference is None:
215187
# TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems
216188
# make sure that batched and non-batched is identical
@@ -283,15 +255,6 @@ def _test_inference_batch_single_identical(
283255
assert_mean_pixel_difference(output_batch[0][0], output[0][0])
284256

285257
def test_dict_tuple_outputs_equivalent(self):
286-
if torch_device == "mps" and self.pipeline_class in (
287-
DanceDiffusionPipeline,
288-
CycleDiffusionPipeline,
289-
RePaintPipeline,
290-
StableDiffusionImg2ImgPipeline,
291-
):
292-
# FIXME: inconsistent outputs on MPS
293-
return
294-
295258
components = self.get_dummy_components()
296259
pipe = self.pipeline_class(**components)
297260
pipe.to(torch_device)
@@ -370,15 +333,6 @@ def test_save_load_optional_components(self):
370333
if not hasattr(self.pipeline_class, "_optional_components"):
371334
return
372335

373-
if torch_device == "mps" and self.pipeline_class in (
374-
DanceDiffusionPipeline,
375-
CycleDiffusionPipeline,
376-
RePaintPipeline,
377-
StableDiffusionImg2ImgPipeline,
378-
):
379-
# FIXME: inconsistent outputs on MPS
380-
return
381-
382336
components = self.get_dummy_components()
383337
pipe = self.pipeline_class(**components)
384338
pipe.to(torch_device)
@@ -440,16 +394,6 @@ def _test_attention_slicing_forward_pass(self, test_max_difference=True):
440394
if not self.test_attention_slicing:
441395
return
442396

443-
if torch_device == "mps" and self.pipeline_class in (
444-
DanceDiffusionPipeline,
445-
CycleDiffusionPipeline,
446-
RePaintPipeline,
447-
StableDiffusionImg2ImgPipeline,
448-
StableDiffusionDepth2ImgPipeline,
449-
):
450-
# FIXME: inconsistent outputs on MPS
451-
return
452-
453397
components = self.get_dummy_components()
454398
pipe = self.pipeline_class(**components)
455399
pipe.to(torch_device)

0 commit comments

Comments
 (0)