Model weights are not moved to the GPU

I am using the C++ frontend and am trying to train my model on my GPU.
it builds just fine and cuda is also available.

i use model->to(torch::Device(torch::kCUDA)); to move my model to the gpu.
When i try to forward an cuda tensor i get the error:

Input type (CUDAFloatType) and weight type (CPUFloatType) should be the same (_convolution at ../../aten/src/ATen/native/Convolution.cpp:599)

I saw similar topics for python but i didn’t find the reason why it does not work for me

i tried model->to(torch::kCUDA)' or model->to(torch:Device(“cuda:0”))` aswell.

my model looks like this:

class Model : public torch::nn::Module {
        
        private:
            unsigned int featureSize;
            torch::nn::Conv2d conv1{nullptr};
            torch::nn::Conv2d conv2{nullptr};
            torch::nn::Conv2d conv3{nullptr};
            
            torch::nn::Linear fc_value{nullptr};
            torch::nn::Linear fc_advantage{nullptr};
            torch::nn::Linear value_stream{nullptr};
            torch::nn::Linear advantage_stream{nullptr};
            
        public:
            Model(unsigned int stateSize, unsigned int numFeatures, unsigned int actionSize);
            torch::Tensor forward(torch::Tensor input);
            size_t feature_size(unsigned int stateSize, unsigned int numFeatures);
    };


Model::Model(unsigned int stateSize,unsigned int numFeatures, unsigned int actionSize)
{
    // Convolutional Layers
    this->conv1 = torch::nn::Conv2d(torch::nn::Conv2dOptions(numFeatures, 32, 8).stride(4));
    this->conv2 = torch::nn::Conv2d(torch::nn::Conv2dOptions(32, 64, 4).stride(2));
    this->conv3 = torch::nn::Conv2d(torch::nn::Conv2dOptions(64, 64, 3).stride(1));
    
    featureSize = static_cast<unsigned int>(feature_size(stateSize, numFeatures));
    this->fc_value = torch::nn::Linear(featureSize, 128);
    this->fc_advantage = torch::nn::Linear(featureSize, 128);
    this->value_stream = torch::nn::Linear(128, 1);
    this->advantage_stream = torch::nn::Linear(128, actionSize);
}

torch::Tensor Model::forward(torch::Tensor input)
{
    // Convolutional output
    auto features = torch::relu(conv1->forward(input));
    features = torch::relu(conv2->forward(features));
    features = torch::relu(conv3->forward(features));
    features = features.view({-1, featureSize});
    
    // Value stream output
    auto value = torch::relu(fc_value->forward(features));
    value = value_stream->forward(value);
    
    // Advantage output
    auto advantage = torch::relu(fc_advantage->forward(features));
    advantage = advantage_stream->forward(advantage);
    
    // return qvals
    return value + (advantage - advantage.mean());
}

size_t Model::feature_size(unsigned int stateSize, unsigned int numFeatures)
{
    auto input = torch::zeros({1, numFeatures, stateSize, stateSize});
    auto val = torch::relu(conv1->forward(input));
    val = torch::relu(conv2->forward(val));
    val = torch::relu(conv3->forward(val));
    
    return val.view({1, -1}).size(1);
}

I did find my problem. It was a rather unspectacular error.
i forgot registering my layers with register_module(). When adding them i got the expected results :hugs: