@FengMu1995 the if branch is definitely supported

Here is an example. Without looking at the entire code it would be difficult to understand the issue. I feel that there are elements that you are switching between GPU and CPU , and since quantization does not work on the GPU it throws an error

```
class random_model(nn.Module):
def __init__(self):
super(random_model, self).__init__()
self.model1 = nn.Sequential(
nn.Linear(100, 10),
nn.BatchNorm1d(10),
nn.ReLU(),
nn.Linear(10, 4),
nn.BatchNorm1d(4),
nn.ReLU(),
nn.Linear(4, 1),
)
self.model2 = nn.Sequential(
nn.Linear(100, 10),
nn.BatchNorm1d(10),
nn.ReLU(),
nn.Linear(10, 1),
)
def forward(self, x, flag_condition=True):
if flag_condition==True:
return self.model1(x)
else:
return self.model2(x)
X = torch.rand(100, 100)
y = torch.randint(2,(100,)).type(torch.FloatTensor)
model = random_model()
criterion = nn.MSELoss()
num_epochs = 100
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
for cur_epoch in range(num_epochs):
model.zero_grad()
if cur_epoch % 2 ==0:
output = model(X, flag_condition=True)
else:
output = model(X, flag_condition=False)
loss = criterion(y, output)
loss.backward()
optimizer.step()
print("Cur Epoch {0} loss is {1}".format(cur_epoch, loss.item()))
```