hey, guys
Actually I’m facing the this problem, i go through the previous solution but is could not resolve, I implementing the device but is still showing the error
here my full error report
{
“name”: “RuntimeError”,
“message”: “Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same”,
“stack”: "---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Cell In[35], line 25
22 parameter = Parameter(budget=budget)
24 # Run ZOOpt
—> 25 solution = Opt.min(objective, parameter)
26 best_params = solution.get_x() # Extract the best parameters
27 best_learning_rate = best_params[0]
File ~/.local/lib/python3.11/site-packages/zoopt/opt.py:50, in Opt.min(objective, parameter)
48 result = sre.opt()
49 else:
—> 50 result = optimizer.opt(objective, parameter)
51 result.print_solution()
52 return result
File ~/.local/lib/python3.11/site-packages/zoopt/algos/opt_algorithms/racos/racos_optimization.py:57, in RacosOptimization.opt(self, objective, parameter, strategy)
55 else:
56 self.__algorithm = SRacos()
—> 57 self.__best_solution = self.__algorithm.opt(
58 objective, parameter, strategy, ub)
59 else:
60 self.__algorithm = Racos()
File ~/.local/lib/python3.11/site-packages/zoopt/algos/opt_algorithms/racos/sracos.py:47, in SRacos.opt(self, objective, parameter, strategy, ub)
45 self.set_objective(objective)
46 self.set_parameters(parameter)
—> 47 self.init_attribute()
48 stopping_criterion = self._parameter.get_stopping_criterion()
49 i = 0
File ~/.local/lib/python3.11/site-packages/zoopt/algos/opt_algorithms/racos/racos_common.py:92, in RacosCommon.init_attribute(self)
90 break
91 if distinct_flag:
—> 92 self._objective.eval(x)
93 self._data.append(x)
94 i += 1
File ~/.local/lib/python3.11/site-packages/zoopt/objective.py:83, in Objective.eval(self, solution)
81 for i in range(self.__resample_times):
82 if self.__reducedim is False:
—> 83 val = self.__func(solution)
84 else:
85 x = solution.get_x()
Cell In[35], line 9, in objective_function(solution)
6 num_layers = int(params[1])
7 dropout_rate = params[2]
----> 9 train_losses, val_losses, val_accuracies, _ = train_model(num_layers, dropout_rate, learning_rate)
10 return -val_accuracies[-1]
Cell In[33], line 3, in train_model(num_layers, dropout_rate, learning_rate)
2 def train_model(num_layers, dropout_rate, learning_rate):
----> 3 model = CNN(num_layers, dropout_rate).to(device)
5 # Print the model summary
6 print("Model Summary:")
Cell In[30], line 18, in CNN.init(self, num_layers, dropout_rate)
15 out_channels *= 2
17 # Initialize flatten_size dynamically
—> 18 self.flatten_size = self._calculate_flatten_size()
19 self.fc1 = nn.Linear(self.flatten_size, 256)
20 self.fc2 = nn.Linear(256, len(dataset.classes)) # Ensure dataset.classes is defined
Cell In[30], line 28, in CNN._calculate_flatten_size(self)
26 x = dummy_input
27 for layer in self.layers:
—> 28 x = layer(x)
29 return int(torch.prod(torch.tensor(x.size())))
File ~/.local/lib/python3.11/site-packages/torch/nn/modules/module.py:1532, in Module._wrapped_call_impl(self, *args, **kwargs)
1530 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1531 else:
→ 1532 return self._call_impl(*args, **kwargs)
File ~/.local/lib/python3.11/site-packages/torch/nn/modules/module.py:1541, in Module._call_impl(self, *args, **kwargs)
1536 # If we don’t have any hooks, we want to skip the rest of the logic in
1537 # this function, and just call forward.
1538 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1539 or _global_backward_pre_hooks or _global_backward_hooks
1540 or _global_forward_hooks or _global_forward_pre_hooks):
→ 1541 return forward_call(*args, **kwargs)
1543 try:
1544 result = None
File ~/.local/lib/python3.11/site-packages/torch/nn/modules/conv.py:460, in Conv2d.forward(self, input)
459 def forward(self, input: Tensor) → Tensor:
→ 460 return self._conv_forward(input, self.weight, self.bias)
File ~/.local/lib/python3.11/site-packages/torch/nn/modules/conv.py:456, in Conv2d._conv_forward(self, input, weight, bias)
452 if self.padding_mode != ‘zeros’:
453 return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
454 weight, bias, self.stride,
455 _pair(0), self.dilation, self.groups)
→ 456 return F.conv2d(input, weight, bias, self.stride,
457 self.padding, self.dilation, self.groups)
RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same"
}