loss.backward()
File "/root/anaconda/envs/pytorch/lib/python3.8/site-packages/torch/_tensor.py", line 363, in backward
torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
File "/root/anaconda/envs/pytorch/lib/python3.8/site-packages/torch/autograd/__init__.py", line 173, in backward
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
RuntimeError: Trying to backward through the graph a second time (or directly access saved tensors after they have already been freed). Saved intermediate values of the graph are freed when you call .backward() or autograd.grad(). Specify retain_graph=True if you need to backward through the graph a second time or if you need to access saved tensors after calling backward.
损失函数由两部分组成。在返回梯度时出现了上述问题。有没有人有类似的情况?
代码如下。
for i, (aug_images_u, target_u) in enumerate(tk0):
aug_images_l, target_l = next(train_loader_l) #self.plables
mix_y = next(Target_M)
target_l = target_l.to(args.device)
target_u = target_u.to(args.device)
mix_y = mix_y.to(args.device)
target = torch.cat((target_l, target_u, mix_y), 0)
# Create the mix
alpha = args.alpha
index = torch.randperm(args.batch_size, device=args.device)
lam = np.random.beta(alpha, alpha)
target_a, target_b = target, target[index]
optimizer.zero_grad()
adjust_learning_rate(optimizer, epoch, i, lr_length, args)
count = 0
for batch_l, batch_u, mix_x in zip(aug_images_l, aug_images_u, aug_images_X):
batch_l = batch_l.to(args.device)
batch_u = batch_u.to(args.device)
mix_x = mix_x.to(args.device)
batch = torch.cat((batch_l, batch_u, mix_x), 0)
m_batch = mixup_data(batch, index, lam)
class_logit = model(m_batch)
class_logit = class_logit[0]
if count == 0:
loss_sum = mixup_criterion(class_logit.double(), target_a, target_b, lam).mean()
else:
loss_sum = loss_sum + mixup_criterion(class_logit.double(), target_a, target_b, lam).mean()
count += 1
loss = loss_sum / (args.aug_num)
loss.backward()
optimizer.step()
if args.progress == True:
loss_meter.add(loss.item())
tk0.set_postfix(loss=loss_meter.mean)
global_step += 1
print('loss:\t{:.4f}'.format(loss_meter.mean))
return global_step, loss_meter.mean
接下来,我将retain_graph=True
添加到loss.backward()
中,即loss.backward(retain_graph)
。又出现了一个错误。
loss.backward(retain_graph=True)
File "/root/anaconda/envs/pytorch/lib/python3.8/site-packages/torch/_tensor.py", line 363, in backward