BindsNet error (Can't pickle obj)

for step, batch in enumerate(tqdm(dataloader)):
    # Get next input sample.
    inpts = {"X": batch["encoded_image"].view(time, 1, 1, 28, 28)}
    if gpu:
        inpts = {k: v.cuda() for k, v in inpts.items()}

    if step % update_interval == 0 and step > 0:
        # Convert the array of labels into a tensor
        label_tensor = torch.tensor(labels)

        # Get network predictions.
        all_activity_pred = all_activity(
            spikes=spike_record, assignments=assignments, n_labels=n_classes
        )
        proportion_pred = proportion_weighting(
            spikes=spike_record,
            assignments=assignments,
            proportions=proportions,
            n_labels=n_classes,
        )

        # Compute network accuracy according to available classification strategies.
        accuracy["all"].append(
            100
            * torch.sum(label_tensor.long() == all_activity_pred).item()
            / len(label_tensor)
        )
        accuracy["proportion"].append(
            100
            * torch.sum(label_tensor.long() == proportion_pred).item()
            / len(label_tensor)
        )

        print(
            "\nAll activity accuracy: %.2f (last), %.2f (average), %.2f (best)"
            % (
                accuracy["all"][-1],
                np.mean(accuracy["all"]),
                np.max(accuracy["all"]),
            )
        )
        print(
            "Proportion weighting accuracy: %.2f (last), %.2f (average), %.2f (best)\n"
            % (
                accuracy["proportion"][-1],
                np.mean(accuracy["proportion"]),
                np.max(accuracy["proportion"]),
            )
        )

        # Assign labels to excitatory layer neurons.
        assignments, proportions, rates = assign_labels(
            spikes=spike_record,
            labels=label_tensor,
            n_labels=n_classes,
            rates=rates,
        )

        labels = []

    labels.append(batch["label"])

    # Run the network on the input.
    network.run(inpts=inpts, time=time, input_time_dim=1)

    # Get voltage recording.
    exc_voltages = exc_voltage_monitor.get("v")
    inh_voltages = inh_voltage_monitor.get("v")

    # Add to spikes recording.
    spike_record[step % update_interval] = spikes["Ae"].get("s").squeeze()

    # Optionally plot various simulation information.
    if plot:
        image = batch["image"].view(28, 28)
        inpt = inpts["X"].view(time, 784).sum(0).view(28, 28)
        input_exc_weights = network.connections[("X", "Ae")].w
        square_weights = get_square_weights(
            input_exc_weights.view(784, n_neurons), n_sqrt, 28
        )
        square_assignments = get_square_assignments(assignments, n_sqrt)
        spikes_ = {layer: spikes[layer].get("s") for layer in spikes}
        voltages = {"Ae": exc_voltages, "Ai": inh_voltages}
        #
        inpt_axes, inpt_ims = plot_input(
            image, inpt, label=labels[step], axes=inpt_axes, ims=inpt_ims
        )
        spike_ims, spike_axes = plot_spikes(spikes_, ims=spike_ims, axes=spike_axes)
        weights_im = plot_weights(square_weights, im=weights_im)
        assigns_im = plot_assignments(square_assignments, im=assigns_im)
        perf_ax = plot_performance(accuracy, ax=perf_ax)
        voltage_ims, voltage_axes = plot_voltages(
            voltages, ims=voltage_ims, axes=voltage_axes, plot_type="line"
        )

        plt.pause(1e-8)

    network.reset_()  # Reset state variables.  

File “C:/Users/user/PycharmProjects/bindsnet-master/bindsnet-master/examples/mnist/eth_mnist.py”, line 164, in
for step, batch in enumerate(tqdm(dataloader)):
File “C:\Users\user\Anaconda3\envs\myspace\lib\site-packages\tqdm_tqdm.py”, line 1034, in iter
for obj in iterable:
File “C:\Users\user\Anaconda3\envs\myspace\lib\site-packages\torch\utils\data\dataloader.py”, line 278, in iter
return _MultiProcessingDataLoaderIter(self)
File “C:\Users\user\Anaconda3\envs\myspace\lib\site-packages\torch\utils\data\dataloader.py”, line 682, in init
w.start()
File “C:\Users\user\Anaconda3\envs\myspace\lib\multiprocessing\process.py”, line 112, in start
self._popen = self._Popen(self)
File “C:\Users\user\Anaconda3\envs\myspace\lib\multiprocessing\context.py”, line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File “C:\Users\user\Anaconda3\envs\myspace\lib\multiprocessing\context.py”, line 322, in _Popen
return Popen(process_obj)
File “C:\Users\user\Anaconda3\envs\myspace\lib\multiprocessing\popen_spawn_win32.py”, line 89, in init
reduction.dump(process_obj, to_child)
File “C:\Users\user\Anaconda3\envs\myspace\lib\multiprocessing\reduction.py”, line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can’t pickle local object ‘create_torchvision_dataset_wrapper..TorchvisionDatasetWrapper’
0%| | 0/60000 [00:00<?, ?it/s]

Process finished with exit code 1

I get the following error but I can’t figure out why. Has anyone solved the same error?

I have met the same question, and I also don’t know how to solve it, did you fix it?