RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same

thank you for your response. :pray:

Hello pytorch forum, I’d like to ask for the same error message
Here is my code:

from pathlib import Path
import hydra
import numpy as np
import torch
import torchaudio
from omegaconf import DictConfig
from soundfile import write
from torch.utils.data import DataLoader
from tqdm import tqdm

import os
import sys
sys.path.append(os.getcwd())

from clarity.dataset.cec1_dataset_copy import CEC1Dataset
from clarity.enhancer.dnn.mc_conv_tasnet import ConvTasNet
from clarity.enhancer.dsp.filter import AudiometricFIR

@hydra.main(config_path=“.”, config_name=“config”)
def run(cfg: DictConfig) → None:
exp_folder = Path(cfg.path.exp_folder)
output_folder = Path(cfg.path.output_folder)

test_set = CEC1Dataset(**cfg.test_dataset)
test_loader = DataLoader(dataset=test_set, **cfg.test_loader)
down_sample = up_sample = None
if cfg.downsample_factor != 1:
    down_sample = torchaudio.transforms.Resample(
        orig_freq=cfg.sample_rate,
        new_freq=cfg.sample_rate // cfg.downsample_factor,
        resampling_method="sinc_interp_hann",
    )
    up_sample = torchaudio.transforms.Resample(
        orig_freq=cfg.sample_rate // cfg.downsample_factor,
        new_freq=cfg.sample_rate,
        resampling_method="sinc_interp_hann",
    )

device = "cuda" if torch.cuda.is_available() else None

with torch.no_grad():
    for batch in tqdm(test_loader, desc="testing"):
        noisy, scene = batch
        out = []
        for ear in ["left", "right"]:
            torch.cuda.empty_cache()
            # load denoising module
            den_model = ConvTasNet(**cfg.mc_conv_tasnet)
            den_model_path = exp_folder / f"{ear}_den/best_model.pth"

            den_model.load_state_dict(
                torch.load(den_model_path, map_location=device)
            )
            _den_model = torch.nn.parallel.DataParallel(den_model.to(torch.device('cuda')))
            _den_model.eval()

            noisy = noisy.to(device)
            proc = noisy
            if down_sample is not None:
                proc = down_sample(noisy)
            #enhanced = amp_model(den_model(proc)).squeeze(1)
            enhanced = den_model(proc).squeeze(1)
            if up_sample is not None:
                enhanced = up_sample(enhanced)
            enhanced = torch.clamp(enhanced, -1, 1)
            out.append(enhanced.detach().cpu().numpy()[0])

        out = np.stack(out, axis=0).transpose()
        write(
            #output_folder / f"{scene[0]}_{cfg.listener.id}_HA-output.wav"
            output_folder / f"{scene}_HA-output.wav",
            out,
            cfg.sample_rate,
        )

if name == “main”:
run()

I am very begineer in pytorch, please help me
This is my full error message:

Traceback (most recent call last):
File “coba_test.py”, line 78, in run
proc = down_sample(noisy)
File “C:\Users\nirma\anaconda3\envs\clarity\lib\site-packages\torch\nn\modules\module.py”, line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File “C:\Users\nirma\anaconda3\envs\clarity\lib\site-packages\torch\nn\modules\module.py”, line 1520, in _call_impl
return forward_call(*args, **kwargs)
File “C:\Users\nirma\anaconda3\envs\clarity\lib\site-packages\torchaudio\transforms_transforms.py”, line 979, in forward
return _apply_sinc_resample_kernel(waveform, self.orig_freq, self.new_freq, self.gcd, self.kernel, self.width)
File “C:\Users\nirma\anaconda3\envs\clarity\lib\site-packages\torchaudio\functional\functional.py”, line 1466, in _apply_sinc_resample_kernel
resampled = torch.nn.functional.conv1d(waveform[:, None], kernel, stride=orig_freq)
RuntimeError: Input type (torch.cuda.FloatTensor) and weight type (torch.FloatTensor) should be the same

Based on the error message you might need to move down_sample to the GPU as well.

But, isn’t down_sample just a function not a data or model?

down_sample seems to be an nn.Module based on the stacktrace and I would assume it’s storing internal parameters or buffers, which were not moved to the GPU.

Thank you, your advice were so helping.