I am trying to Prune my Deep Learning model via Global Pruning. The original UnPruned model is about 77.5 MB. However after pruning, when I am saving the model, the size of the model is the same as the original. Can anyone help me with this issue?

Below is the Pruning code:-

import torch.nn.utils.prune as prune

parameters_to_prune = (

(model.encoder[0], ‘weight’),

(model.up_conv1[0], ‘weight’),

(model.up_conv2[0], ‘weight’),

(model.up_conv3[0], ‘weight’),

)

print(parameters_to_prune)

prune.global_unstructured(

parameters_to_prune,

pruning_method=prune.L1Unstructured,

amount=0.2,

)

print(

“Sparsity in Encoder.weight: {:.2f}%”.format(

100. * float(torch.sum(model.encoder[0].weight == 0))

/ float(model.encoder[0].weight.nelement())

)

)

print(

“Sparsity in up_conv1.weight: {:.2f}%”.format(

100. * float(torch.sum(model.up_conv1[0].weight == 0))

/ float(model.up_conv1[0].weight.nelement())

)

)

print(

“Sparsity in up_conv2.weight: {:.2f}%”.format(

100. * float(torch.sum(model.up_conv2[0].weight == 0))

/ float(model.up_conv2[0].weight.nelement())

)

)

print(

“Sparsity in up_conv3.weight: {:.2f}%”.format(

100. * float(torch.sum(model.up_conv3[0].weight == 0))

/ float(model.up_conv3[0].weight.nelement())

)

)

print(

“Global sparsity: {:.2f}%”.format(

100. * float(

torch.sum(model.encoder[0].weight == 0)

+ torch.sum(model.up_conv1[0].weight == 0)

+ torch.sum(model.up_conv2[0].weight == 0)

+ torch.sum(model.up_conv3[0].weight == 0)

)

/ float(

model.encoder[0].weight.nelement()

+ model.up_conv1[0].weight.nelement()

+ model.up_conv2[0].weight.nelement()

+ model.up_conv3[0].weight.nelement()

)

)

)

# Setting Pruning to Permanent

prune.remove(model.encoder[0], “weight”)

prune.remove(model.up_conv1[0], “weight”)

prune.remove(model.up_conv2[0], “weight”)

prune.remove(model.up_conv3[0], “weight”)

# Saving the model

PATH = “C:\PrunedNet.pt”

torch.save(model.state_dict(), PATH)