Skip to content

Commit

Permalink
fix a warning
Browse files Browse the repository at this point in the history
  • Loading branch information
lucidrains committed Sep 5, 2024
1 parent 4019202 commit ef4421a
Show file tree
Hide file tree
Showing 9 changed files with 17 additions and 17 deletions.
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/classifier_free_guidance.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.amp import autocast

from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange
Expand Down Expand Up @@ -731,7 +731,7 @@ def interpolate(self, x1, x2, classes, t = None, lam = 0.5):

return img

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from torch import sqrt
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.amp import autocast
from torch.special import expm1

from tqdm import tqdm
Expand Down Expand Up @@ -234,7 +234,7 @@ def sample(self, batch_size = 16):

# training related functions - noise prediction

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from torch import nn, einsum
import torch.nn.functional as F
from torch.nn import Module, ModuleList
from torch.cuda.amp import autocast
from torch.amp import autocast
from torch.utils.data import Dataset, DataLoader

from torch.optim import Adam
Expand Down Expand Up @@ -772,7 +772,7 @@ def noise_assignment(self, x_start, noise):
_, assign = linear_sum_assignment(dist.cpu())
return torch.from_numpy(assign).to(dist.device)

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/denoising_diffusion_pytorch_1d.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
from torch import nn, einsum, Tensor
from torch.nn import Module, ModuleList
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.amp import autocast
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader

Expand Down Expand Up @@ -660,7 +660,7 @@ def interpolate(self, x1, x2, t = None, lam = 0.5):

return img

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/guided_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import torch
from torch import nn, einsum
import torch.nn.functional as F
from torch.cuda.amp import autocast
from torch.amp import autocast
from torch.utils.data import Dataset, DataLoader

from torch.optim import Adam
Expand Down Expand Up @@ -709,7 +709,7 @@ def interpolate(self, x1, x2, t = None, lam = 0.5):

return img

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/repaint.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from torch import nn, einsum
import torch.nn.functional as F
from torch.nn import Module, ModuleList
from torch.cuda.amp import autocast
from torch.amp import autocast
from torch.utils.data import Dataset, DataLoader

from torch.optim import Adam
Expand Down Expand Up @@ -815,7 +815,7 @@ def interpolate(self, x1, x2, t = None, lam = 0.5):

return img

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, t, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
4 changes: 2 additions & 2 deletions denoising_diffusion_pytorch/simple_diffusion.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from torch import nn, einsum
import torch.nn.functional as F
from torch.special import expm1
from torch.cuda.amp import autocast
from torch.amp import autocast

from tqdm import tqdm
from einops import rearrange, repeat, reduce, pack, unpack
Expand Down Expand Up @@ -651,7 +651,7 @@ def sample(self, batch_size = 16):

# training related functions - noise prediction

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
from torch import nn, einsum
import torch.nn.functional as F
from torch.special import expm1
from torch.cuda.amp import autocast
from torch.amp import autocast

from tqdm import tqdm
from einops import rearrange, repeat, reduce
Expand Down Expand Up @@ -150,7 +150,7 @@ def sample(self, batch_size = 16):

# training related functions - noise prediction

@autocast(enabled = False)
@autocast('cuda', enabled = False)
def q_sample(self, x_start, times, noise = None):
noise = default(noise, lambda: torch.randn_like(x_start))

Expand Down
2 changes: 1 addition & 1 deletion denoising_diffusion_pytorch/version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '2.0.16'
__version__ = '2.0.17'

0 comments on commit ef4421a

Please sign in to comment.