- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Good morning.
When trying to instantiate the "ConditionalGenerator" class to test:
CONDITIONAL GAN TRAINING WITH WASSERSTEIN LOSS AND SPECTRAL NORMALIZATION
# -*- coding: utf-8 -*-
import os
os.environ["TF_ENABLE_ONEDNN_OPTS"]="0"
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.utils import spectral_norm
print(torch.__version__)
# =============================================================================
# 0 Models WGAN-GP
# =============================================================================
class ConditionalGenerator(nn.Module):
def __init__(self, nz=100, ngf=64, n_classes=10, embedding_dim=50):
super(ConditionalGenerator, self).__init__()
# Embedding para las etiquetas de clase
self.label_embedding = nn.Embedding(n_classes, embedding_dim)
# Capa para combinar ruido y embedding de etiquetas
self.fc_combined = nn.Sequential(
nn.Linear(nz + embedding_dim, 7 * 7 * ngf * 4),
nn.BatchNorm1d(7 * 7 * ngf * 4),
nn.ReLU(True)
)
# Capas convolucionales transpuestas
self.conv1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
)
self.conv2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
)
# Capa final para generar imágenes
self.to_img = nn.Sequential(
nn.Conv2d(ngf, 1, 3, 1, 1, bias=False),
nn.Tanh()
)
# Inicialización de pesos
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d, nn.Linear)):
nn.init.normal_(m.weight.data, 0.0, 0.02)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
def forward(self, z, labels, ngf=64):
# Obtener embeddings de las etiquetas
label_emb = self.label_embedding(labels)
# Concatenar ruido y embeddings
z_combined = torch.cat([z, label_emb], 1)
# Pasar por la capa fully connected
x = self.fc_combined(z_combined)
# Reshape para las capas convolucionales
x = x.view(-1, ngf * 4, 7, 7)
# Pasar por las capas convolucionales
x = self.conv1(x)
x = self.conv2(x)
# Generar imagen final
return self.to_img(x)
# =============================================================================
# 1 Hiper-parameters
# =============================================================================
noise_dim = 100
ngf = 64 # Size of feature maps in generator
n_classes = 10 # Number of MNIST digits (0-9)
embedding_dim = 50 # Dimension of embedding for class labels
device = torch.device("xpu")
# =============================================================================
# 2 Initialization of models and optimizers
# =============================================================================
# models
generator = ConditionalGenerator(
noise_dim,
ngf,
n_classes,
embedding_dim
).to(device)
The following error occurs if the device is "xpu"
AssertionError: Torch not compiled with XPU enabled
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
File c:\ia\generative_adversarial_networks\sin título1.py:104
92 device = torch.device("xpu")
94 # =============================================================================
95 # 2 Initialization of models and optimizers
96 # =============================================================================
97
98 # models
99 generator = ConditionalGenerator(
100 noise_dim,
101 ngf,
102 n_classes,
103 embedding_dim
--> 104 ).to(device)
File C:\IA\Generative_Adversarial_Networks\Lib\site-packages\torch\nn\modules\module.py:1355, in Module.to(self, *args, **kwargs)
1352 else:
1353 raise
-> 1355 return self._apply(convert)
File C:\IA\Generative_Adversarial_Networks\Lib\site-packages\torch\nn\modules\module.py:915, in Module._apply(self, fn, recurse)
913 if recurse:
914 for module in self.children():
--> 915 module._apply(fn)
917 def compute_should_use_set_data(tensor, tensor_applied):
918 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
919 # If the new tensor has compatible tensor type as the existing tensor,
920 # the current behavior is to change the tensor in-place using `.data =`,
(...)
925 # global flag to let the user control whether they want the future
926 # behavior of overwriting the existing tensor or not.
File C:\IA\Generative_Adversarial_Networks\Lib\site-packages\torch\nn\modules\module.py:942, in Module._apply(self, fn, recurse)
938 # Tensors stored in modules are graph leaves, and we don't want to
939 # track autograd history of `param_applied`, so we have to use
940 # `with torch.no_grad():`
941 with torch.no_grad():
--> 942 param_applied = fn(param)
943 p_should_use_set_data = compute_should_use_set_data(param, param_applied)
945 # subclasses may have multiple child tensors so we need to use swap_tensors
File C:\IA\Generative_Adversarial_Networks\Lib\site-packages\torch\nn\modules\module.py:1341, in Module.to.<locals>.convert(t)
1334 if convert_to_format is not None and t.dim() in (4, 5):
1335 return t.to(
1336 device,
1337 dtype if t.is_floating_point() or t.is_complex() else None,
1338 non_blocking,
1339 memory_format=convert_to_format,
1340 )
-> 1341 return t.to(
1342 device,
1343 dtype if t.is_floating_point() or t.is_complex() else None,
1344 non_blocking,
1345 )
1346 except NotImplementedError as e:
1347 if str(e) == "Cannot copy out of meta tensor; no data!":
File C:\IA\Generative_Adversarial_Networks\Lib\site-packages\torch\xpu\__init__.py:118, in _lazy_init()
113 raise RuntimeError(
114 "Cannot re-initialize XPU in forked subprocess. To use XPU with "
115 "multiprocessing, you must use the 'spawn' start method"
116 )
117 if not _is_compiled():
--> 118 raise AssertionError("Torch not compiled with XPU enabled")
119 # This function inits XPU backend and detects bad fork processing.
120 torch._C._xpu_init()
AssertionError: Torch not compiled with XPU enabled
The drivers and software are updated to the latest version.
Please, can someone tell me where I'm making the mistake? Thank you.
Link Copied
- Mark as New
- Bookmark
- Subscribe
- Mute
- Subscribe to RSS Feed
- Permalink
- Report Inappropriate Content
Hi Pablo,
Please open an issue on the Intel® Extension for PyTorch* GitHub page: https://github.com/intel/intel-extension-for-pytorch/issues.
Kind regards,
Stef

- Subscribe to RSS Feed
- Mark Topic as New
- Mark Topic as Read
- Float this Topic for Current User
- Bookmark
- Subscribe
- Printer Friendly Page