RuntimeError: Error(s) in loading state_dict for VGGModel:

Hello,

i have trained a NN-Model for Morph-Attack-Detection. When i load my trained Modell i getting the following error:

Traceback (most recent call last):

File ~\anaconda3\Lib\site-packages\spyder_kernels\py3compat.py:356 in compat_exec
exec(code, globals, locals)

File c:\users\micha\desktop\studium\8. semester\bachelor\algorithmen\gui\unbenannt1.py:227
main()

File c:\users\micha\desktop\studium\8. semester\bachelor\algorithmen\gui\unbenannt1.py:216 in main
process_folder()

File c:\users\micha\desktop\studium\8. semester\bachelor\algorithmen\gui\unbenannt1.py:106 in process_folder
model.load_state_dict(torch.load(model_file_path, map_location=torch.device(‘cpu’)))

File ~\anaconda3\Lib\site-packages\torch\nn\modules\module.py:2153 in load_state_dict
raise RuntimeError(‘Error(s) in loading state_dict for {}:\n\t{}’.format(

RuntimeError: Error(s) in loading state_dict for VGGModel:
Missing key(s) in state_dict: “features.0.weight”, “features.0.bias”, “features.2.weight”, “features.2.bias”, “features.5.weight”, “features.5.bias”, “features.7.weight”, “features.7.bias”, “features.10.weight”, “features.10.bias”, “features.12.weight”, “features.12.bias”, “features.14.weight”, “features.14.bias”, “features.17.weight”, “features.17.bias”, “features.19.weight”, “features.19.bias”, “features.21.weight”, “features.21.bias”, “features.24.weight”, “features.24.bias”, “features.26.weight”, “features.26.bias”, “features.28.weight”, “features.28.bias”, “classifier.0.weight”, “classifier.0.bias”, “classifier.3.weight”, “classifier.3.bias”, “classifier.6.weight”, “classifier.6.bias”.

My Main Code to load the model is:

import torch
import torch.nn as nn
import torchvision.models as models
import torchvision.transforms as transforms
from PIL import Image
import pandas as pd
import os
from typing import Tuple
import tkinter as tk
from tkinter import filedialog, messagebox

Funktion zur Auswahl einer Datei

def select_file(title, filetypes):
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(title=title, filetypes=filetypes)
return file_path

Funktion zur Auswahl eines Ordners

def select_folder(title):
root = tk.Tk()
root.withdraw()
folder_path = filedialog.askdirectory(title=title)
return folder_path

Definition von transform

transform = transforms.Compose([
transforms.Resize((413, 531)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

Verwendung des vortrainierten VGG-Modells

class VGGModel(nn.Module):
def init(self, num_classes=1000):
super(VGGModel, self).init()
self.vgg = models.vgg16(pretrained=False)
# Entfernen der letzten Schichten des vortrainierten Modells
self.features = self.vgg.features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes)
)

def forward(self, x):
    x = self.features(x)
    x = self.avgpool(x)
    x = torch.flatten(x, 1)
    x = self.classifier(x)
    return x

Funktion zur Differential-Morph-Attack-Detection

def detect_morph_differentially(suspected_morph_file_path: str, probe_face_file_path: str) → Tuple[str, bool, float]:
return_code = “Success”
is_morph = False
score = 0.0

# Laden des verdächtigen Morph-Bildes
try:
    suspected_morph_image = Image.open(suspected_morph_file_path).convert('RGB')
    suspected_morph_image_tensor = transform(suspected_morph_image).unsqueeze(0)
except Exception as e:
    return f"Fehler beim Laden des verdächtigen Morph-Bildes: {e}", is_morph, score

# Laden des Original-Bildes
try:
    probe_face_image = Image.open(probe_face_file_path).convert('RGB')
    probe_face_image_tensor = transform(probe_face_image).unsqueeze(0)
except Exception as e:
    return f"Fehler beim Laden des Original-Bildes: {e}", is_morph, score

# Detection
with torch.no_grad():
    suspected_morph_output = model(suspected_morph_image_tensor)
    probe_face_output = model(probe_face_image_tensor)
    score = (suspected_morph_output + probe_face_output) / 2.0
    score = score.item()  # Convert tensor to float

# Setzen von is_morph=True, wenn das verdächtige Morph-Bild ein Morph ist, oder is_morph=False, wenn nicht
is_morph = score > 0.5

return return_code, is_morph, score

Hauptfunktion zur Durchsuchung des Ordners und Durchführung der Morph-Detection

def process_folder():
model_file_path = select_file(“Wählen Sie die Modell-Datei aus”, [(“Modelldateien”, “*.pth .pt")])
morph_images_folder = select_folder(“Wählen Sie den Ordner mit den Morphbildern aus”)
original_images_folder = select_folder(“Wählen Sie den Ordner mit den Originalbildern aus”)
excel_file_path = select_file(“Wählen Sie die Excel-Liste aus”, [(“Excel-Dateien”, "
.xlsx”)])

# Laden des trainierten Modells
# Laden des trainierten Modells
model = VGGModel(num_classes=1000)  # Nummer der Klassen entsprechend anpassen
model.load_state_dict(torch.load(model_file_path, map_location=torch.device('cpu')))
model.eval()

# Laden der Excel-Liste
df = pd.read_excel(excel_file_path)

# Überprüfung und Anpassung der Excel-Liste
if 'Morph Detected 1' not in df.columns:
    df['Morph Detected 1'] = ''
if 'Score 1' not in df.columns:
    df['Score 1'] = 0.0
if 'Morph Detected 2' not in df.columns:
    df['Morph Detected 2'] = ''
if 'Score 2' not in df.columns:
    df['Score 2'] = 0.0

for idx, row in df.iterrows():
    try:
        morph_image_name = row['Dateiname']
        original_image_1_prefix = str(row['Bild1']).zfill(3)
        original_image_2_prefix = str(row['Bild2']).zfill(3)

        morph_image_path = os.path.join(morph_images_folder, morph_image_name)
        original_image_1_path = find_image_by_prefix(original_images_folder, original_image_1_prefix)
        original_image_2_path = find_image_by_prefix(original_images_folder, original_image_2_prefix)

        if original_image_1_path:
            # Morph-Detection 1
            _, is_morph_1, score_1 = detect_morph_differentially(morph_image_path, original_image_1_path)
            df.at[idx, 'Morph Detected 1'] = 'Wahr' if is_morph_1 else 'Falsch'
            df.at[idx, 'Score 1'] = score_1

        if original_image_2_path:
            # Morph-Detection 2
            _, is_morph_2, score_2 = detect_morph_differentially(morph_image_path, original_image_2_path)
            df.at[idx, 'Morph Detected 2'] = 'Wahr' if is_morph_2 else 'Falsch'
            df.at[idx, 'Score 2'] = score_2

    except Exception as e:
        print(f"Fehler bei der Verarbeitung der Zeile {idx}: {e}")

save_path = filedialog.asksaveasfilename(defaultextension=".xlsx", filetypes=[("Excel-Dateien", "*.xlsx")])
if save_path:
    df.to_excel(save_path, index=False)
    messagebox.showinfo("Erfolg", "Die Excel-Liste wurde erfolgreich gespeichert.")
else:
    messagebox.showwarning("Abgebrochen", "Speichern der Excel-Liste abgebrochen.")

Funktion zum Finden eines Bildes anhand des Präfixes

def find_image_by_prefix(folder_path, prefix):
for file_name in os.listdir(folder_path):
if file_name.startswith(prefix):
return os.path.join(folder_path, file_name)
return None

Einzelbilduntersuchung

def single_image_investigation():
model_file_path = select_file(“Wählen Sie die Modell-Datei aus”, [(“Modelldateien”, “*.pth .pt")])
morph_image_path = select_file(“Wählen Sie das Morphbild aus”, [(“Bilddateien”, "
.jpg *.jpeg .png")])
probe_face_image_path = select_file(“Wählen Sie das Originalbild aus”, [(“Bilddateien”, "
.jpg *.jpeg *.png”)])

# Laden des trainierten Modells
global model
model = VGGModel()
model.load_state_dict(torch.load(model_file_path, map_location=torch.device('cpu')))
model.eval()

# Morph-Detection
return_code, is_morph, score = detect_morph_differentially(morph_image_path, probe_face_image_path)

# Anzeige der Ergebnisse
if return_code == "Success":
    result_message = f"Is Morph: {is_morph}, Score: {score}"
else:
    result_message = return_code

# Laden und Anzeigen der Bilder
morph_image = Image.open(morph_image_path)
probe_face_image = Image.open(probe_face_image_path)

# Erstellen des Fensters
root = tk.Tk()
root.title("Morph Detection Result")

# Anzeigen des Ergebnisses
tk.Label(root, text=result_message, font=("Helvetica", 16)).pack()

# Funktion zur Anzeige der Bilder
def display_images():
    morph_image_tk = ImageTk.PhotoImage(morph_image)
    probe_face_image_tk = ImageTk.PhotoImage(probe_face_image)
    morph_label = tk.Label(root, image=morph_image_tk)
    probe_label = tk.Label(root, image=probe_face_image_tk)
    morph_label.image = morph_image_tk  # Referenz behalten, um das Bild im Fenster anzuzeigen
    probe_label.image = probe_face_image_tk
    morph_label.pack()
    probe_label.pack()

display_images()
root.mainloop()

Hauptfunktion zum Ausführen des Skripts

def main():
root = tk.Tk()
root.withdraw()

# Dialog für die Auswahl zwischen Ordner oder Einzelbilduntersuchung
choice = messagebox.askquestion("Auswahl", "Möchten Sie einen Ordner durchsuchen oder eine Einzelbilduntersuchung durchführen?")

if choice == 'yes':
    process_folder()
else:
    single_image_investigation()

if name == “main”:
main()

And this is my Code to train the Model:

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from torch.utils.data import DataLoader
from PIL import Image, ImageTk
import tkinter as tk
from tkinter import filedialog, messagebox
import os

Parameter

batch_size = 32
learning_rate = 0.001
num_epochs = 10
num_classes = 1000 # Anzahl der Klassen im Datensatz

Datenverarbeitung

transform = transforms.Compose([
transforms.Resize((413, 531)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

Funktion zur Auswahl einer Datei

def select_file(title, filetypes):
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(title=title, filetypes=filetypes)
return file_path

Funktion zur Auswahl eines Ordners

def select_folder(title):
root = tk.Tk()
root.withdraw()
folder_path = filedialog.askdirectory(title=title)
return folder_path

VGG-Modell definieren

class VGGModel(nn.Module):
def init(self, num_classes):
super(VGGModel, self).init()
self.vgg = models.vgg16(pretrained=True)
self.vgg.classifier[6] = nn.Linear(4096, num_classes) # Anpassung der letzten FC-Schicht

def forward(self, x):
    x = self.vgg(x)
    return x

model = VGGModel(num_classes=num_classes)

Loss und Optimizer

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

Training des Modells

def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs):
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
correct = 0
total = 0

    for inputs, labels in train_loader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * inputs.size(0)
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    epoch_loss = running_loss / len(train_loader.dataset)
    epoch_acc = correct / total
    print(f'Epoch {epoch}/{num_epochs-1}, Loss: {epoch_loss:.4f}, Accuracy: {epoch_acc:.4f}')

    # Validation
    model.eval()
    val_loss = 0.0
    val_correct = 0
    val_total = 0
    with torch.no_grad():
        for inputs, labels in val_loader:
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            val_loss += loss.item() * inputs.size(0)
            _, predicted = torch.max(outputs, 1)
            val_total += labels.size(0)
            val_correct += (predicted == labels).sum().item()

    val_epoch_loss = val_loss / len(val_loader.dataset)
    val_epoch_acc = val_correct / val_total
    print(f'Validation Loss: {val_epoch_loss:.4f}, Validation Accuracy: {val_epoch_acc:.4f}')

Hauptfunktion zur Auswahl der Pfade und zum Training des Modells

def main():
train_data_path = select_folder(“Wählen Sie den Ordner mit den Trainingsdaten aus”)
val_data_path = select_folder(“Wählen Sie den Ordner mit den Validierungsdaten aus”)
save_model_path = filedialog.asksaveasfilename(defaultextension=“.pth”, filetypes=[(“PyTorch Model”, “*.pth”)], title=“Wählen Sie den Speicherort für das trainierte Modell aus”)

if not train_data_path or not val_data_path or not save_model_path:
    messagebox.showwarning("Abgebrochen", "Einer der Pfade wurde nicht ausgewählt. Das Training wurde abgebrochen.")
    return

# Daten laden
train_data = torchvision.datasets.ImageFolder(root=train_data_path, transform=transform)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)

val_data = torchvision.datasets.ImageFolder(root=val_data_path, transform=transform)
val_loader = DataLoader(val_data, batch_size=batch_size, shuffle=False)

# Modell trainieren
train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs)

# Modell speichern
torch.save(model.state_dict(), save_model_path)
messagebox.showinfo("Erfolg", f"Das Modell wurde erfolgreich gespeichert unter: {save_model_path}")

if name == “main”:
main()

I would be very grateful for some help, as this problem has been bothering me for a long time

Your model definitions are not equal.
In the first part of the code you are creating a self.features attribute:

class VGGModel(nn.Module):
    def __init__(self, num_classes=1000):
        super(VGGModel, self).__init__()
        self.vgg = models.vgg16(pretrained=False)
        # Entfernen der letzten Schichten des vortrainierten Modells
        self.features = self.vgg.features
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        self.classifier = nn.Sequential(
        nn.Linear(512 * 7 * 7, 4096),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(4096, 4096),
        nn.ReLU(True),
        nn.Dropout(),
        nn.Linear(4096, num_classes)
        )
...

while it’s missing in the second model definition:

class VGGModel(nn.Module):
    def __init__(self, num_classes):
        super(VGGModel, self).__init__()
        self.vgg = models.vgg16(pretrained=True)
        self.vgg.classifier[6] = nn.Linear(4096, num_classes) # Anpassung der letzten FC-Schicht
...

Reuse the same model definition and it should work.

Thank you ptrblck for your answer. It was helped mit to fix the problem . Thank you very much. But now now I have the problem that when I want to compare two images for their similarity, the model gives a different score each time, although I think it should be the same each time it is checked. I think it is due to the following warnings: Warning: conv1.weight not found in provided state_dict.
Warning: bn1.weight not found in provided state_dict.
Warning: bn1.bias not found in provided state_dict.
Warning: bn1.running_mean not found in provided state_dict.
Warning: bn1.running_var not found in provided state_dict.
Warning: bn1.num_batches_tracked not found in provided state_dict.
Warning: layer1.0.conv1.weight not found in provided state_dict.
Warning: layer1.0.bn1.weight not found in provided state_dict.
Warning: layer1.0.bn1.bias not found in provided state_dict.
Warning: layer1.0.bn1.running_mean not found in provided state_dict.
Warning: layer1.0.bn1.running_var not found in provided state_dict.
Warning: layer1.0.bn1.num_batches_tracked not found in provided state_dict.

This ist my Code to load the model:

Definition von transform

transform = transforms.Compose([
transforms.Resize((128, 128)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])

Definition von MorphDetectionResNet als Klasse

class MorphDetectionResNet(nn.Module):
def init(self):
super(MorphDetectionResNet, self).init()
self.model = models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1)
self.model.fc = nn.Linear(self.model.fc.in_features, 2) # assuming binary classification: Morph (0) or Bona-Fida (1)
self.softmax = nn.Softmax(dim=1)

def forward(self, x):
    x = self.model(x)
    x = self.softmax(x)
    return x

Funktion zum Laden des Modell-Zustands mit Übereinstimmung

def load_state_dict_with_match(model, state_dict):
model_state_dict = model.state_dict()
new_state_dict = OrderedDict()

for key, value in state_dict.items():
    if key in model_state_dict:
        new_state_dict[key] = value
    else:
        print(f"Warning: {key} not found in provided state_dict.")

# Load the state_dict into the model
model.load_state_dict(new_state_dict)

# Lade den gefilterten state_dict
model.load_state_dict(new_state_dict, strict=False)

Can you help me by this problem. Thank you very much

Yes, the explanation sounds reasonable. If parameters are ignored as they are missing in the provided state_dict, I would expect to see different results.

I don’t know where your current state_dict comes from, but apparently it does not contain the same keys.
Your code snippet only shows how the MorphDetectionResNet is defined and how you are trying to load a state_dict.