Skip to content

Commit 839e30e

Browse files
committed
improve CUDA VRAM monitoring
extra check that device==cuda before getting VRAM stats
1 parent bfb2781 commit 839e30e

File tree

2 files changed

+12
-7
lines changed

2 files changed

+12
-7
lines changed

ldm/generate.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -357,12 +357,14 @@ def process_image(image,seed):
357357
print(
358358
f'>> {len(results)} image(s) generated in', '%4.2fs' % (toc - tic)
359359
)
360-
print(
361-
f'>> Max VRAM used for this generation:',
362-
'%4.2fG' % (torch.cuda.max_memory_allocated() / 1e9),
363-
)
360+
if torch.cuda.is_available() and self.device.type == 'cuda':
361+
print(
362+
f'>> Max VRAM used for this generation:',
363+
'%4.2fG.' % (torch.cuda.max_memory_allocated() / 1e9),
364+
'Current VRAM utilization:'
365+
'%4.2fG' % (torch.cuda.memory_allocated() / 1e9),
366+
)
364367

365-
if self.session_peakmem:
366368
self.session_peakmem = max(
367369
self.session_peakmem, torch.cuda.max_memory_allocated()
368370
)

scripts/dream.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,8 +130,11 @@ def main_loop(t2i, outdir, prompt_as_dir, parser, infile):
130130
command = get_next_command(infile)
131131
except EOFError:
132132
done = True
133-
break
134-
133+
continue
134+
except KeyboardInterrupt:
135+
done = True
136+
continue
137+
135138
# skip empty lines
136139
if not command.strip():
137140
continue

0 commit comments

Comments
 (0)