This is a first part of my model:
#卷积神经网络Pytorch版本
def Auto_CNN_pytorch(vy,vx,model_list,test_size=0.25,task_mode='regression',if_best_mode='no',modelpath=None,ifrandom_split='yes',cov_padding='same',cov_strides=1,pooling_strides=2,if_print_model='yes',loss_function='default',optimizer='SGD',metrics='default',learning_rate=0.01,epochs=2000,batch_size=20,ifheatmap='yes',ifweight='yes',ifmute='no',ifsave='no',savepath=None,device='cpu'):
from torch.nn import Module,BatchNorm1d,BatchNorm2d,LayerNorm,Conv2d,MaxPool2d,AvgPool2d,Dropout,LeakyReLU,ReLU,PReLU,Sigmoid,Tanh,ELU,Softmax,Linear,Flatten
from torch.optim import Adam,SGD
import torch
from torch import nn
import torchmetrics
import math
from sklearn.model_selection import train_test_split
import numpy as np
from scipy.stats import pearsonr
import os
from sklearn.metrics import accuracy_score,recall_score,precision_score,f1_score
import sklearn
import copy
import shap
import datetime
if device=='gpu':
devices=torch.device('cuda:0')
else:
devices=torch.device('cpu')
if task_mode=='regression':
if loss_function=='default' or loss_function=='MSELoss':
loss=torch.nn.MSELoss()
elif loss_function=='L1Loss':
loss=torch.nn.L1Loss
elif loss_function=='PoissonNLLLoss':
loss=torch.nn.PoissonNLLLoss()
elif loss_function=='GaussianNLLLoss':
loss=torch.nn.GaussianNLLLoss()
elif loss_function=='KLDivLoss':
loss=torch.nn.KLDivLoss()
elif loss_function=='HuberLoss':
loss=torch.nn.HuberLoss()
elif loss_function=='SmoothL1Loss':
loss=torch.nn.SmoothL1Loss()
elif loss_function=='Pearsonr':
class loss_pearsonr(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y, x):
y_true_mean=torch.nanmean(y,dim=0,keepdim=True)
y_pred_mean=torch.nanmean(x,dim=0,keepdim=True)
cov=torch.nansum((y-y_true_mean)*(x-y_pred_mean),dim=0,keepdim=True)
y_true_v=torch.nansum(torch.square((y-y_true_mean)),dim=0,keepdim=True)
y_pred_v=torch.nansum(torch.square((x-y_pred_mean)),dim=0,keepdim=True)
y_true_v=torch.sqrt(y_true_v)
y_pred_v=torch.sqrt(y_pred_v)
pearson=cov/(y_true_v*y_pred_v)
return (1-pearson)**1.5
loss=loss_pearsonr
if metrics=='default' or metrics=='MSELoss':
metric=torch.nn.MSELoss()
elif metrics=='L1Loss':
metric=torch.nn.L1Loss
elif metrics=='PoissonNLLLoss':
metric=torch.nn.PoissonNLLLoss()
elif metrics=='GaussianNLLLoss':
metric=torch.nn.GaussianNLLLoss()
elif metrics=='KLDivLoss':
metric=torch.nn.KLDivLoss()
elif metrics=='HuberLoss':
metric=torch.nn.HuberLoss()
elif metrics=='SmoothL1Loss':
metric=torch.nn.SmoothL1Loss()
elif metrics=='Pearsonr':
class metric_pearsonr(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y, x):
y_true_mean=torch.nanmean(y,dim=0,keepdim=True)
y_pred_mean=torch.nanmean(x,dim=0,keepdim=True)
cov=torch.nansum((y-y_true_mean)*(x-y_pred_mean),dim=0,keepdim=True)
y_true_v=torch.nansum(torch.square((y-y_true_mean)),dim=0,keepdim=True)
y_pred_v=torch.nansum(torch.square((x-y_pred_mean)),dim=0,keepdim=True)
y_true_v=torch.sqrt(y_true_v)
y_pred_v=torch.sqrt(y_pred_v)
pearson=cov/(y_true_v*y_pred_v)
return (1-pearson)**1.5
metric=metric_pearsonr
elif task_mode=='binary_classify':
if loss_function=='default' or loss_function=='BCELoss':
loss=torch.nn.BCELoss()
elif loss_function=='BCEWithLogitsLoss':
loss=torch.nn.BCEWithLogitsLoss()
elif loss_function=='SoftMarginLoss':
loss=torch.nn.SoftMarginLoss()
elif loss_function=='MultiLabelSoftMarginLoss':
loss=torch.nn.MultiLabelSoftMarginLoss()
if metrics=='default' or metrics=='f1':
metric=torchmetrics.F1Score(task="binary").to(devices)
elif metrics=='accuracy':
metric=torchmetrics.Accuracy(task="binary").to(devices)
elif metrics=='precision':
metric=torchmetrics.Precision(task="binary").to(devices)
elif metrics=='recall':
metric=torchmetrics.Recall(task="binary").to(devices)
elif metrics=='BCELoss':
metric=torch.nn.BCELoss()
elif metrics=='BCEWithLogitsLoss':
metric=torch.nn.BCEWithLogitsLoss()
elif metrics=='SoftMarginLoss':
metric=torch.nn.SoftMarginLoss()
elif metrics=='MultiLabelSoftMarginLoss':
metric=torch.nn.MultiLabelSoftMarginLoss()
elif task_mode=='multi_classify':
if loss_function=='default' or loss_function=='CrossEntropyLoss':
loss=torch.nn.CrossEntropyLoss()
elif loss_function=='NLLLoss':
loss=torch.nn.NLLLoss()
elif loss_function=='TripletMarginLoss':
loss=torch.nn.TripletMarginLoss()
elif loss_function=='KLDivergence':
loss=torch.nn.KLDivergence()
elif loss_function=='HingeEmbeddingLoss':
loss=torch.nn.HingeEmbeddingLoss()
elif loss_function=='MultiLabelMarginLoss':
loss=torch.nn.MultiLabelMarginLoss()
elif loss_function=='TripletMarginWithDistanceLoss':
loss=torch.nn.TripletMarginWithDistanceLoss()
if metrics=='default' or metrics=='accuracy':
metric=torchmetrics.Accuracy(task="multiclass").to(devices)
elif metrics=='CrossEntropyLoss':
metric=torch.nn.CrossEntropyLoss()
elif metrics=='NLLLoss':
metric=torch.nn.NLLLoss()
elif metrics=='TripletMarginLoss':
metric=torch.nn.TripletMarginLoss()
elif metrics=='KLDivergence':
metric=torch.nn.KLDivergence()
elif metrics=='HingeEmbeddingLoss':
metric=torch.nn.HingeEmbeddingLoss()
elif metrics=='MultiLabelMarginLoss':
metric=torch.nn.MultiLabelMarginLoss()
elif metrics=='TripletMarginWithDistanceLoss':
metric=torch.nn.TripletMarginWithDistanceLoss()
heatmap=0
weights=0
model=0
vx=vx.transpose(0,3,1,2)
if vy.ndim==1:
vy=vy.reshape(vy.shape[0],1)
if ifrandom_split=='yes':
trainy,testy,trainx,testx = train_test_split(vy,vx,test_size=test_size,random_state=25)
else:
index=int((1-test_size)*vy.shape[0])
trainy=vy[:index]
testy=vy[index:]
trainx=vx[:index,:,:,:]
testx=vx[index:,:,:,:]
if if_best_mode=='no':
class Model(nn.Module):
def __init__(self,model_list,trainx,cov_strides,cov_padding,pooling_strides):
super(Model,self).__init__()
exec('from torch import nn', globals(), self.__dict__)
exec('from torch.nn import Module,BatchNorm1d,BatchNorm2d,LayerNorm,Conv2d,MaxPool2d,AvgPool2d,Dropout,LeakyReLU,ReLU,PReLU,Sigmoid,Tanh,ELU,Softmax,Linear,Flatten', globals(), self.__dict__)
exec('import numpy as np', globals(), self.__dict__)
self.cov_strides=cov_strides
self.cov_padding=cov_padding
self.pooling_strides=pooling_strides
self.trainx=trainx
self.model_list=model_list
self.hight=self.trainx.shape[2]
self.weight=self.trainx.shape[3]
self.__dict__['self']=self
self.flattened=False
for i in range(len(self.model_list)):
self.__dict__['i']=i
if self.model_list[i][0] == 'cov':
if self.cov_padding=='valid':
self.hight=1+math.floor((self.hight-self.model_list[i][1][1])/self.cov_strides)
self.weight=1+math.floor((self.weight-self.model_list[i][1][2])/self.cov_strides)
if self.hight < 1 or self.weight < 1:
print('卷积层数过多')
break
if i==0:
exec('self.conv'+str(i+1)+'=Conv2d(self.trainx.shape[1],self.model_list[i][1][0],(self.model_list[i][1][1],self.model_list[i][1][2]),stride=self.cov_strides,padding=self.cov_padding)', globals(), self.__dict__)
exec('in_channels=self.model_list[i][1][0]', globals(), self.__dict__)
else:
exec('self.conv'+str(i+1)+'=Conv2d(in_channels,self.model_list[i][1][0],(self.model_list[i][1][1],self.model_list[i][1][2]),stride=self.cov_strides,padding=self.cov_padding)', globals(), self.__dict__)
exec('in_channels=self.model_list[i][1][0]', globals(), self.__dict__)
elif self.model_list[i][0] == 'maxpooling':
self.hight=1+math.floor((self.hight-self.model_list[i][1][0])/self.pooling_strides)
self.weight=1+math.floor((self.weight-self.model_list[i][1][1])/self.pooling_strides)
if self.hight < 1 or self.weight < 1:
print('池化层数过多')
break
exec('self.pool'+str(i+1)+'=MaxPool2d((self.model_list[i][1][0],self.model_list[i][1][1]),stride=self.pooling_strides)', globals(), self.__dict__)
elif self.model_list[i][0] == 'avepooling':
self.hight=1+math.floor((self.hight-self.model_list[i][1][0])/self.pooling_strides)
self.weight=1+math.floor((self.weight-self.model_list[i][1][1])/self.pooling_strides)
if self.hight < 1 or self.weight < 1:
print('池化层数过多')
break
exec('self.pool'+str(i+1)+'=AvgPool2d((self.model_list[i][1][0],self.model_list[i][1][1]),stride=self.pooling_strides)', globals(), self.__dict__)
elif self.model_list[i][0] == 'batchnormalization' and not self.flattened:
exec('self.norm'+str(i+1)+'=BatchNorm2d(in_channels)', globals(), self.__dict__)
elif self.model_list[i][0] == 'batchnormalization'and self.flattened:
exec('self.norm'+str(i+1)+'=BatchNorm1d(in_channels)', globals(), self.__dict__)
elif self.model_list[i][0] == 'layernormalization':
exec('self.norm'+str(i+1)+'=LayerNorm(in_channels)', globals(), self.__dict__)
elif self.model_list[i][0] == 'activation':
if self.model_list[i][1]=='elu':
exec('self.act'+str(i+1)+'=ELU()', globals(), self.__dict__)
elif self.model_list[i][1]=='leakyrelu':
exec('self.act'+str(i+1)+'=LeakyReLU()', globals(), self.__dict__)
elif self.model_list[i][1]=='prelu':
exec('self.act'+str(i+1)+'=PReLU()', globals(), self.__dict__)
elif self.model_list[i][1]=='relu':
exec('self.act'+str(i+1)+'=ReLU()', globals(), self.__dict__)
elif self.model_list[i][1]=='sigmoid':
exec('self.act'+str(i+1)+'=Sigmoid()', globals(), self.__dict__)
elif self.model_list[i][1]=='tanh':
exec('self.act'+str(i+1)+'=Tanh()', globals(), self.__dict__)
elif self.model_list[i][1]=='softmax':
exec('self.act'+str(i+1)+'=Softmax()', globals(), self.__dict__)
elif self.model_list[i][0] == 'flatten':
exec('self.fla'+str(i+1)+'=Flatten()', globals(), self.__dict__)
exec('in_channels=self.hight*self.weight*in_channels', globals(), self.__dict__)
self.flattened=True
elif self.model_list[i][0] =='fc':
exec('self.fc'+str(i+1)+'=Linear(in_channels,self.model_list[i][1])', globals(), self.__dict__)
exec('in_channels=self.model_list[i][1]', globals(), self.__dict__)
elif self.model_list[i][0] == 'dropout':
exec('self.drop'+str(i+1)+'=Dropout(self.model_list[i][1])', globals(), self.__dict__)
def forward(self, x):
self.__dict__['x']=x
for i in range(len(self.model_list)):
if self.model_list[i][0] == 'cov':
if i==0:
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(x)', globals(), self.__dict__)
else:
if self.model_list[i-1][0]=='cov':
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation' :
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
exec('model_conv'+str(i+1)+'=self.conv'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] == 'maxpooling' or self.model_list[i][0] == 'avepooling':
if self.model_list[i-1][0]=='cov' :
exec('model_pool'+str(i+1)+'=self.pool'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_pool'+str(i+1)+'=self.pool'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_pool'+str(i+1)+'=self.pool'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation' :
exec('model_pool'+str(i+1)+'=self.pool'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
exec('model_pool'+str(i+1)+'=self.pool'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] == 'batchnormalization' or self.model_list[i][0] == 'layernormalization':
if i==len(self.model_list)-1:
if self.model_list[i-1][0]=='cov' :
outputs=eval('self.norm'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
outputs=eval('self.norm'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
outputs=eval('self.norm'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
outputs=eval('self.norm'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
outputs=eval('self.norm'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
outputs=eval('self.norm'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
outputs=eval('self.norm'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
else:
if self.model_list[i-1][0]=='cov' :
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
exec('model_norm'+str(i+1)+'=self.norm'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] == 'activation':
if i==len(self.model_list)-1:
if self.model_list[i-1][0]=='cov' :
outputs=eval('self.act'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
outputs=eval('self.act'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
outputs=eval('self.act'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
outputs=eval('self.act'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
outputs=eval('self.act'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
outputs=eval('self.act'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
outputs=eval('self.act'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
else:
if self.model_list[i-1][0]=='cov':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
exec('model_act'+str(i+1)+'=self.act'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] == 'flatten':
if self.model_list[i-1][0]=='cov' :
exec('model_fla'+str(i+1)+'=self.fla'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_fla'+str(i+1)+'=self.fla'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_fla'+str(i+1)+'=self.fla'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
exec('model_fla'+str(i+1)+'=self.fla'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout':
exec('model_fla'+str(i+1)+'=self.fla'+str(i+1)+'(model_dropout'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] =='fc':
if i==len(self.model_list)-1:
if self.model_list[i-1][0]=='batchnormalization' :
outputs=eval('self.fc'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
outputs=eval('self.fc'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout':
outputs=eval('self.fc'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
outputs=eval('self.fc'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
outputs=eval('self.fc'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
else:
if self.model_list[i-1][0]=='batchnormalization' :
exec('model_fc'+str(i+1)+'=self.fc'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
exec('model_fc'+str(i+1)+'=self.fc'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout':
exec('model_fc'+str(i+1)+'=self.fc'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
exec('model_fc'+str(i+1)+'=self.fc'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
exec('model_fc'+str(i+1)+'=self.fc'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i][0] == 'dropout':
if i==len(self.model_list)-1:
if self.model_list[i-1][0]=='cov' :
outputs=eval('self.drop'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
outputs=eval('self.drop'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
outputs=eval('self.drop'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
outputs=eval('self.drop'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
outputs=eval('self.drop'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
outputs=eval('self.drop'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
outputs=eval('self.drop'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
else:
if self.model_list[i-1][0]=='cov':
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_conv'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='maxpooling' or self.model_list[i-1][0]=='avepooling':
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_pool'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='batchnormalization' or self.model_list[i-1][0]=='layernormalization':
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_norm'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='activation':
exec('model_drop'+str(i+1)+'=Dself.drop'+str(i+1)+'(model_act'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='dropout' :
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_drop'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='fc':
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_fc'+str(i)+')', globals(), self.__dict__)
elif self.model_list[i-1][0]=='flatten':
exec('model_drop'+str(i+1)+'=self.drop'+str(i+1)+'(model_fla'+str(i)+')', globals(), self.__dict__)
return outputs