How to output the Low-level IR instructions to a file in Glow?

I wish to generate the low-level IR and put it into a file for a specific test such as mnist.
I found the IRGen.cpp but there seems no function can export or save the low-level IR.
Is there any chance for me to do so?

The low-level IR may look like:

declare {
  %input = weight float<8 x 28 x 28 x 1>, broadcast, 0.0
  %filter = weight float<16 x 5 x 5 x 1>, xavier, 25.0
  %filter0 = weight float<16>, broadcast, 0.100
  %weights = weight float<10 x 144>, xavier, 144.0
  %bias = weight float<10>, broadcast, 0.100
  %selected = weight index<8 x 1>
  ...
  %result = weight float<8 x 10>
}

program {
  %allo = alloc float<8 x 28 x 28 x 16>
  %conv = convolution [5 1 2 16] @out %allo, @in %input, @in %filter3, @in %bias0
  %allo0 = alloc float<8 x 28 x 28 x 16>
  %relu = relu @out %allo0, @in %allo
  %allo1 = alloc index<8 x 9 x 9 x 16 x 2>
  %allo2 = alloc float<8 x 9 x 9 x 16>
  %pool = pool max [3 3 0] @out %allo2, @in %allo0, @inout %allo1
  ...
  %deal6 = dealloc @out %allo6
  %deal7 = dealloc @out %allo7
  %deal8 = dealloc @out %allo8
  %deal9 = dealloc @out %allo9
}

Thank you so much!

You can use the command line option -dump-ir, which will dump the low-level IR after all optimizations have been applied.

Should I use this command line while using cmake to compile?

Should I use this command line while using cmake to compile?

No. It’s built into the IR optimizer as a command line option when running different binaries. For example you can add it to the command line when running the image-classifier if you’re running mnist.

I got it. Thank you so much!

Hi, @Vyronas I also need dump Low level optimized IR to a file. Can you help in this regard? I’ve tried -dump-ir, but it is not generating any IR.

i have used -dump-ir to generate the Low level IR for resnet34 and got this. This looks different from that given as an example in the Github. So can you please confirm the correctness of this Low Level IR…?

declare {
  %A346 = WeightVar float<64> const // size: 256 // Users: @in 3
  %A349 = WeightVar float<64> const // size: 256 // Users: @in 13
  %A352 = WeightVar float<64> const // size: 256 // Users: @in 18
  %A355 = WeightVar float<64> const // size: 256 // Users: @in 24
  %A358 = WeightVar float<64> const // size: 256 // Users: @in 27
  %A361 = WeightVar float<64> const // size: 256 // Users: @in 33
  %A364 = WeightVar float<64> const // size: 256 // Users: @in 36
  %A367 = WeightVar float<128> const // size: 512 // Users: @in 43
  %A370 = WeightVar float<128> const // size: 512 // Users: @in 48
  %A373 = WeightVar float<128> const // size: 512 // Users: @in 51
  %A376 = WeightVar float<128> const // size: 512 // Users: @in 57
  %A379 = WeightVar float<128> const // size: 512 // Users: @in 60
  %A382 = WeightVar float<128> const // size: 512 // Users: @in 66
  %A385 = WeightVar float<128> const // size: 512 // Users: @in 69
  %A388 = WeightVar float<128> const // size: 512 // Users: @in 75
  %A391 = WeightVar float<128> const // size: 512 // Users: @in 78
  %A394 = WeightVar float<256> const // size: 1024 // Users: @in 85
  %A397 = WeightVar float<256> const // size: 1024 // Users: @in 90
  %A400 = WeightVar float<256> const // size: 1024 // Users: @in 93
  %A403 = WeightVar float<256> const // size: 1024 // Users: @in 99
  %A406 = WeightVar float<256> const // size: 1024 // Users: @in 102
  %A409 = WeightVar float<256> const // size: 1024 // Users: @in 108
  %A412 = WeightVar float<256> const // size: 1024 // Users: @in 111
  %A415 = WeightVar float<256> const // size: 1024 // Users: @in 117
  %A418 = WeightVar float<256> const // size: 1024 // Users: @in 120
  %A421 = WeightVar float<256> const // size: 1024 // Users: @in 126
  %A424 = WeightVar float<256> const // size: 1024 // Users: @in 129
  %A427 = WeightVar float<256> const // size: 1024 // Users: @in 135
  %A430 = WeightVar float<256> const // size: 1024 // Users: @in 138
  %A433 = WeightVar float<512> const // size: 2048 // Users: @in 145
  %A436 = WeightVar float<512> const // size: 2048 // Users: @in 150
  %A439 = WeightVar float<512> const // size: 2048 // Users: @in 153
  %A442 = WeightVar float<512> const // size: 2048 // Users: @in 159
  %A445 = WeightVar float<512> const // size: 2048 // Users: @in 162
  %A448 = WeightVar float<512> const // size: 2048 // Users: @in 168
  %A451 = WeightVar float<512> const // size: 2048 // Users: @in 171
  %learned_217 = WeightVar float<1000> const // size: 4000 // Users: @in 181
  %A372__1 = WeightVar float<128 x 1 x 1 x 64> const // size: 32768 // Users: @in 51
  %A399__1 = WeightVar float<256 x 1 x 1 x 128> const // size: 131072 // Users: @in 93
  %A438__1 = WeightVar float<512 x 1 x 1 x 256> const // size: 524288 // Users: @in 153
  %A345__1 = WeightVar float<64 x 7 x 7 x 3> const // size: 37632 // Users: @in 3
  %A348__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 13
  %A351__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 18
  %A354__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 24
  %A357__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 27
  %A360__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 33
  %A363__1 = WeightVar float<64 x 3 x 3 x 64> const // size: 147456 // Users: @in 36
  %A366__1 = WeightVar float<128 x 3 x 3 x 64> const // size: 294912 // Users: @in 43
  %A369__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 48
  %A375__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 57
  %A378__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 60
  %A381__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 66
  %A384__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 69
  %A387__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 75
  %A390__1 = WeightVar float<128 x 3 x 3 x 128> const // size: 589824 // Users: @in 78
  %A393__1 = WeightVar float<256 x 3 x 3 x 128> const // size: 1179648 // Users: @in 85
  %A396__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 90
  %A402__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 99
  %A405__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 102
  %A408__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 108
  %A411__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 111
  %A414__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 117
  %A417__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 120
  %A420__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 126
  %A423__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 129
  %A426__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 135
  %A429__1 = WeightVar float<256 x 3 x 3 x 256> const // size: 2359296 // Users: @in 138
  %A432__1 = WeightVar float<512 x 3 x 3 x 256> const // size: 4718592 // Users: @in 145
  %A435__1 = WeightVar float<512 x 3 x 3 x 512> const // size: 9437184 // Users: @in 150
  %A441__1 = WeightVar float<512 x 3 x 3 x 512> const // size: 9437184 // Users: @in 159
  %A444__1 = WeightVar float<512 x 3 x 3 x 512> const // size: 9437184 // Users: @in 162
  %A447__1 = WeightVar float<512 x 3 x 3 x 512> const // size: 9437184 // Users: @in 168
  %A450__1 = WeightVar float<512 x 3 x 3 x 512> const // size: 9437184 // Users: @in 171
  %learned_216__1 = WeightVar float<512 x 1000> const // size: 2048000 // Users: @in 181
  %data = WeightVar float<1 x 3 x 224 x 224> mutable // size: 602112 // Users: @in 1
  %output = WeightVar float<1 x 1000> mutable // size: 4000 // Users: @out 181

  ; size = 87762752 bytes
}

code {
  0 %Conv_0__1_res = allocactivation  { Ty: float<1 x 224 x 224 x 3>} // size: 602112 // Users: @out 4, @in 3, @out 1
  1 %Conv_0__1 = transpose @out %Conv_0__1_res, @in %data { Shuffle: [0, 2, 3, 1]}
  2 %Conv_0__2_res = allocactivation  { Ty: float<1 x 112 x 112 x 64>} // size: 3211264 // Users: @in 10, @out 7, @out 11, @in 7, @out 3
  3 %Conv_0__2 = convolution @out %Conv_0__2_res, @in %Conv_0__1_res, @in %A345__1, @in %A346 { Kernels: [7, 7], Strides: [2, 2], Pads: [3, 3, 3, 3], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  4 %dealloc_Conv_0__1_res = deallocactivation @out %Conv_0__1_res // size: 602112
  5 %zero_res = allocactivation  { Ty: float<1 x 112 x 112 x 64>} // size: 3211264 // Users: @out 8, @in 7, @out 6
  6 %zero = splat @out %zero_res { Value: 0.000000e+00}
  7 %Relu_1__1_max__1 = elementmax @out %Conv_0__2_res, @in %Conv_0__2_res, @in %zero_res
  8 %dealloc_zero_res = deallocactivation @out %zero_res // size: 3211264
  9 %MaxPool_2__1_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @out 10, @out 21, @in 20, @in 13
  10 %MaxPool_2__2 = maxpool @out %MaxPool_2__1_res, @in %Conv_0__2_res { Kernels: [3, 3], Strides: [2, 2], Pads: [1, 1, 1, 1], Layout: 0}
  11 %dealloc_Conv_0__2_res = deallocactivation @out %Conv_0__2_res // size: 3211264
  12 %Conv_3__2_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 18, @out 16, @out 19, @in 16, @out 13
  13 %Conv_3__2 = convolution @out %Conv_3__2_res, @in %MaxPool_2__1_res, @in %A348__1, @in %A349 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  14 %zero__1_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @out 41, @in 40, @in 34, @in 31, @in 25, @in 22, @in 16, @out 15
  15 %zero__1 = splat @out %zero__1_res { Value: 0.000000e+00}
  16 %Relu_4__1_max__1 = elementmax @out %Conv_3__2_res, @in %Conv_3__2_res, @in %zero__1_res
  17 %Add_6__1_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 20, @out 18, @in 29, @in 24, @out 22, @out 30, @in 22, @out 20
  18 %Conv_5__2 = convolution @out %Add_6__1_res, @in %Conv_3__2_res, @in %A351__1, @in %A352 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  19 %dealloc_Conv_3__2_res = deallocactivation @out %Conv_3__2_res // size: 802816
  20 %Add_6__1 = elementadd @out %Add_6__1_res, @in %Add_6__1_res, @in %MaxPool_2__1_res
  21 %dealloc_MaxPool_2__1_res = deallocactivation @out %MaxPool_2__1_res // size: 802816
  22 %Relu_7__1_max__1 = elementmax @out %Add_6__1_res, @in %Add_6__1_res, @in %zero__1_res
  23 %Conv_8__2_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 27, @out 25, @out 28, @in 25, @out 24
  24 %Conv_8__2 = convolution @out %Conv_8__2_res, @in %Add_6__1_res, @in %A354__1, @in %A355 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  25 %Relu_9__1_max__1 = elementmax @out %Conv_8__2_res, @in %Conv_8__2_res, @in %zero__1_res
  26 %Add_11__1_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 29, @out 27, @in 38, @in 33, @out 31, @out 39, @in 31, @out 29
  27 %Conv_10__2 = convolution @out %Add_11__1_res, @in %Conv_8__2_res, @in %A357__1, @in %A358 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  28 %dealloc_Conv_8__2_res = deallocactivation @out %Conv_8__2_res // size: 802816
  29 %Add_11__1 = elementadd @out %Add_11__1_res, @in %Add_11__1_res, @in %Add_6__1_res
  30 %dealloc_Add_6__1_res = deallocactivation @out %Add_6__1_res // size: 802816
  31 %Relu_12__1_max__1 = elementmax @out %Add_11__1_res, @in %Add_11__1_res, @in %zero__1_res
  32 %Conv_13__2_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 36, @out 34, @out 37, @in 34, @out 33
  33 %Conv_13__2 = convolution @out %Conv_13__2_res, @in %Add_11__1_res, @in %A360__1, @in %A361 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  34 %Relu_14__1_max__1 = elementmax @out %Conv_13__2_res, @in %Conv_13__2_res, @in %zero__1_res
  35 %Add_16__1_res = allocactivation  { Ty: float<1 x 56 x 56 x 64>} // size: 802816 // Users: @in 38, @out 36, @in 51, @in 43, @out 40, @out 52, @in 40, @out 38
  36 %Conv_15__2 = convolution @out %Add_16__1_res, @in %Conv_13__2_res, @in %A363__1, @in %A364 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  37 %dealloc_Conv_13__2_res = deallocactivation @out %Conv_13__2_res // size: 802816
  38 %Add_16__1 = elementadd @out %Add_16__1_res, @in %Add_16__1_res, @in %Add_11__1_res
  39 %dealloc_Add_11__1_res = deallocactivation @out %Add_11__1_res // size: 802816
  40 %Relu_17__1_max__1 = elementmax @out %Add_16__1_res, @in %Add_16__1_res, @in %zero__1_res
  41 %dealloc_zero__1_res = deallocactivation @out %zero__1_res // size: 802816
  42 %Conv_18__2_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 48, @out 46, @out 49, @in 46, @out 43
  43 %Conv_18__2 = convolution @out %Conv_18__2_res, @in %Add_16__1_res, @in %A366__1, @in %A367 { Kernels: [3, 3], Strides: [2, 2], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  44 %zero__7_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @out 83, @in 82, @in 76, @in 73, @in 67, @in 64, @in 58, @in 55, @in 46, @out 45
  45 %zero__7 = splat @out %zero__7_res { Value: 0.000000e+00}
  46 %Relu_19__1_max__1 = elementmax @out %Conv_18__2_res, @in %Conv_18__2_res, @in %zero__7_res
  47 %Add_22__1_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 53, @out 48, @in 62, @in 57, @out 55, @out 63, @in 55, @out 53
  48 %Conv_20__2 = convolution @out %Add_22__1_res, @in %Conv_18__2_res, @in %A369__1, @in %A370 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  49 %dealloc_Conv_18__2_res = deallocactivation @out %Conv_18__2_res // size: 401408
  50 %Conv_21__2_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @out 54, @in 53, @out 51
  51 %Conv_21__2 = convolution @out %Conv_21__2_res, @in %Add_16__1_res, @in %A372__1, @in %A373 { Kernels: [1, 1], Strides: [2, 2], Pads: [0, 0, 0, 0], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  52 %dealloc_Add_16__1_res = deallocactivation @out %Add_16__1_res // size: 802816
  53 %Add_22__1 = elementadd @out %Add_22__1_res, @in %Add_22__1_res, @in %Conv_21__2_res
  54 %dealloc_Conv_21__2_res = deallocactivation @out %Conv_21__2_res // size: 401408
  55 %Relu_23__1_max__1 = elementmax @out %Add_22__1_res, @in %Add_22__1_res, @in %zero__7_res
  56 %Conv_24__2_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 60, @out 58, @out 61, @in 58, @out 57
  57 %Conv_24__2 = convolution @out %Conv_24__2_res, @in %Add_22__1_res, @in %A375__1, @in %A376 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  58 %Relu_25__1_max__1 = elementmax @out %Conv_24__2_res, @in %Conv_24__2_res, @in %zero__7_res
  59 %Add_27__1_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 62, @out 60, @in 71, @in 66, @out 64, @out 72, @in 64, @out 62
  60 %Conv_26__2 = convolution @out %Add_27__1_res, @in %Conv_24__2_res, @in %A378__1, @in %A379 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  61 %dealloc_Conv_24__2_res = deallocactivation @out %Conv_24__2_res // size: 401408
  62 %Add_27__1 = elementadd @out %Add_27__1_res, @in %Add_27__1_res, @in %Add_22__1_res
  63 %dealloc_Add_22__1_res = deallocactivation @out %Add_22__1_res // size: 401408
  64 %Relu_28__1_max__1 = elementmax @out %Add_27__1_res, @in %Add_27__1_res, @in %zero__7_res
  65 %Conv_29__2_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 69, @out 67, @out 70, @in 67, @out 66
  66 %Conv_29__2 = convolution @out %Conv_29__2_res, @in %Add_27__1_res, @in %A381__1, @in %A382 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  67 %Relu_30__1_max__1 = elementmax @out %Conv_29__2_res, @in %Conv_29__2_res, @in %zero__7_res
  68 %Add_32__1_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 71, @out 69, @in 80, @in 75, @out 73, @out 81, @in 73, @out 71
  69 %Conv_31__2 = convolution @out %Add_32__1_res, @in %Conv_29__2_res, @in %A384__1, @in %A385 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  70 %dealloc_Conv_29__2_res = deallocactivation @out %Conv_29__2_res // size: 401408
  71 %Add_32__1 = elementadd @out %Add_32__1_res, @in %Add_32__1_res, @in %Add_27__1_res
  72 %dealloc_Add_27__1_res = deallocactivation @out %Add_27__1_res // size: 401408
  73 %Relu_33__1_max__1 = elementmax @out %Add_32__1_res, @in %Add_32__1_res, @in %zero__7_res
  74 %Conv_34__2_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 78, @out 76, @out 79, @in 76, @out 75
  75 %Conv_34__2 = convolution @out %Conv_34__2_res, @in %Add_32__1_res, @in %A387__1, @in %A388 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  76 %Relu_35__1_max__1 = elementmax @out %Conv_34__2_res, @in %Conv_34__2_res, @in %zero__7_res
  77 %Add_37__1_res = allocactivation  { Ty: float<1 x 28 x 28 x 128>} // size: 401408 // Users: @in 80, @out 78, @in 93, @in 85, @out 82, @out 94, @in 82, @out 80
  78 %Conv_36__2 = convolution @out %Add_37__1_res, @in %Conv_34__2_res, @in %A390__1, @in %A391 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  79 %dealloc_Conv_34__2_res = deallocactivation @out %Conv_34__2_res // size: 401408
  80 %Add_37__1 = elementadd @out %Add_37__1_res, @in %Add_37__1_res, @in %Add_32__1_res
  81 %dealloc_Add_32__1_res = deallocactivation @out %Add_32__1_res // size: 401408
  82 %Relu_38__1_max__1 = elementmax @out %Add_37__1_res, @in %Add_37__1_res, @in %zero__7_res
  83 %dealloc_zero__7_res = deallocactivation @out %zero__7_res // size: 401408
  84 %Conv_39__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 90, @out 88, @out 91, @in 88, @out 85
  85 %Conv_39__2 = convolution @out %Conv_39__2_res, @in %Add_37__1_res, @in %A393__1, @in %A394 { Kernels: [3, 3], Strides: [2, 2], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  86 %zero__15_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @out 143, @in 142, @in 136, @in 133, @in 127, @in 124, @in 118, @in 115, @in 109, @in 106, @in 100, @in 97, @in 88, @out 87
  87 %zero__15 = splat @out %zero__15_res { Value: 0.000000e+00}
  88 %Relu_40__1_max__1 = elementmax @out %Conv_39__2_res, @in %Conv_39__2_res, @in %zero__15_res
  89 %Add_43__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 95, @out 90, @in 104, @in 99, @out 97, @out 105, @in 97, @out 95
  90 %Conv_41__2 = convolution @out %Add_43__1_res, @in %Conv_39__2_res, @in %A396__1, @in %A397 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  91 %dealloc_Conv_39__2_res = deallocactivation @out %Conv_39__2_res // size: 200704
  92 %Conv_42__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @out 96, @in 95, @out 93
  93 %Conv_42__2 = convolution @out %Conv_42__2_res, @in %Add_37__1_res, @in %A399__1, @in %A400 { Kernels: [1, 1], Strides: [2, 2], Pads: [0, 0, 0, 0], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  94 %dealloc_Add_37__1_res = deallocactivation @out %Add_37__1_res // size: 401408
  95 %Add_43__1 = elementadd @out %Add_43__1_res, @in %Add_43__1_res, @in %Conv_42__2_res
  96 %dealloc_Conv_42__2_res = deallocactivation @out %Conv_42__2_res // size: 200704
  97 %Relu_44__1_max__1 = elementmax @out %Add_43__1_res, @in %Add_43__1_res, @in %zero__15_res
  98 %Conv_45__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 102, @out 100, @out 103, @in 100, @out 99
  99 %Conv_45__2 = convolution @out %Conv_45__2_res, @in %Add_43__1_res, @in %A402__1, @in %A403 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  100 %Relu_46__1_max__1 = elementmax @out %Conv_45__2_res, @in %Conv_45__2_res, @in %zero__15_res
  101 %Add_48__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 104, @out 102, @in 113, @in 108, @out 106, @out 114, @in 106, @out 104
  102 %Conv_47__2 = convolution @out %Add_48__1_res, @in %Conv_45__2_res, @in %A405__1, @in %A406 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  103 %dealloc_Conv_45__2_res = deallocactivation @out %Conv_45__2_res // size: 200704
  104 %Add_48__1 = elementadd @out %Add_48__1_res, @in %Add_48__1_res, @in %Add_43__1_res
  105 %dealloc_Add_43__1_res = deallocactivation @out %Add_43__1_res // size: 200704
  106 %Relu_49__1_max__1 = elementmax @out %Add_48__1_res, @in %Add_48__1_res, @in %zero__15_res
  107 %Conv_50__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 111, @out 109, @out 112, @in 109, @out 108
  108 %Conv_50__2 = convolution @out %Conv_50__2_res, @in %Add_48__1_res, @in %A408__1, @in %A409 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  109 %Relu_51__1_max__1 = elementmax @out %Conv_50__2_res, @in %Conv_50__2_res, @in %zero__15_res
  110 %Add_53__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 113, @out 111, @in 122, @in 117, @out 115, @out 123, @in 115, @out 113
  111 %Conv_52__2 = convolution @out %Add_53__1_res, @in %Conv_50__2_res, @in %A411__1, @in %A412 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  112 %dealloc_Conv_50__2_res = deallocactivation @out %Conv_50__2_res // size: 200704
  113 %Add_53__1 = elementadd @out %Add_53__1_res, @in %Add_53__1_res, @in %Add_48__1_res
  114 %dealloc_Add_48__1_res = deallocactivation @out %Add_48__1_res // size: 200704
  115 %Relu_54__1_max__1 = elementmax @out %Add_53__1_res, @in %Add_53__1_res, @in %zero__15_res
  116 %Conv_55__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 120, @out 118, @out 121, @in 118, @out 117
  117 %Conv_55__2 = convolution @out %Conv_55__2_res, @in %Add_53__1_res, @in %A414__1, @in %A415 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  118 %Relu_56__1_max__1 = elementmax @out %Conv_55__2_res, @in %Conv_55__2_res, @in %zero__15_res
  119 %Add_58__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 122, @out 120, @in 131, @in 126, @out 124, @out 132, @in 124, @out 122
  120 %Conv_57__2 = convolution @out %Add_58__1_res, @in %Conv_55__2_res, @in %A417__1, @in %A418 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  121 %dealloc_Conv_55__2_res = deallocactivation @out %Conv_55__2_res // size: 200704
  122 %Add_58__1 = elementadd @out %Add_58__1_res, @in %Add_58__1_res, @in %Add_53__1_res
  123 %dealloc_Add_53__1_res = deallocactivation @out %Add_53__1_res // size: 200704
  124 %Relu_59__1_max__1 = elementmax @out %Add_58__1_res, @in %Add_58__1_res, @in %zero__15_res
  125 %Conv_60__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 129, @out 127, @out 130, @in 127, @out 126
  126 %Conv_60__2 = convolution @out %Conv_60__2_res, @in %Add_58__1_res, @in %A420__1, @in %A421 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  127 %Relu_61__1_max__1 = elementmax @out %Conv_60__2_res, @in %Conv_60__2_res, @in %zero__15_res
  128 %Add_63__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 131, @out 129, @in 140, @in 135, @out 133, @out 141, @in 133, @out 131
  129 %Conv_62__2 = convolution @out %Add_63__1_res, @in %Conv_60__2_res, @in %A423__1, @in %A424 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  130 %dealloc_Conv_60__2_res = deallocactivation @out %Conv_60__2_res // size: 200704
  131 %Add_63__1 = elementadd @out %Add_63__1_res, @in %Add_63__1_res, @in %Add_58__1_res
  132 %dealloc_Add_58__1_res = deallocactivation @out %Add_58__1_res // size: 200704
  133 %Relu_64__1_max__1 = elementmax @out %Add_63__1_res, @in %Add_63__1_res, @in %zero__15_res
  134 %Conv_65__2_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 138, @out 136, @out 139, @in 136, @out 135
  135 %Conv_65__2 = convolution @out %Conv_65__2_res, @in %Add_63__1_res, @in %A426__1, @in %A427 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  136 %Relu_66__1_max__1 = elementmax @out %Conv_65__2_res, @in %Conv_65__2_res, @in %zero__15_res
  137 %Add_68__1_res = allocactivation  { Ty: float<1 x 14 x 14 x 256>} // size: 200704 // Users: @in 140, @out 138, @in 153, @in 145, @out 142, @out 154, @in 142, @out 140
  138 %Conv_67__2 = convolution @out %Add_68__1_res, @in %Conv_65__2_res, @in %A429__1, @in %A430 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  139 %dealloc_Conv_65__2_res = deallocactivation @out %Conv_65__2_res // size: 200704
  140 %Add_68__1 = elementadd @out %Add_68__1_res, @in %Add_68__1_res, @in %Add_63__1_res
  141 %dealloc_Add_63__1_res = deallocactivation @out %Add_63__1_res // size: 200704
  142 %Relu_69__1_max__1 = elementmax @out %Add_68__1_res, @in %Add_68__1_res, @in %zero__15_res
  143 %dealloc_zero__15_res = deallocactivation @out %zero__15_res // size: 200704
  144 %Conv_70__2_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 150, @out 148, @out 151, @in 148, @out 145
  145 %Conv_70__2 = convolution @out %Conv_70__2_res, @in %Add_68__1_res, @in %A432__1, @in %A433 { Kernels: [3, 3], Strides: [2, 2], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  146 %zero__27_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @out 176, @in 175, @in 169, @in 166, @in 160, @in 157, @in 148, @out 147
  147 %zero__27 = splat @out %zero__27_res { Value: 0.000000e+00}
  148 %Relu_71__1_max__1 = elementmax @out %Conv_70__2_res, @in %Conv_70__2_res, @in %zero__27_res
  149 %Add_74__1_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 155, @out 150, @in 164, @in 159, @out 157, @out 165, @in 157, @out 155
  150 %Conv_72__2 = convolution @out %Add_74__1_res, @in %Conv_70__2_res, @in %A435__1, @in %A436 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  151 %dealloc_Conv_70__2_res = deallocactivation @out %Conv_70__2_res // size: 100352
  152 %Conv_73__2_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @out 156, @in 155, @out 153
  153 %Conv_73__2 = convolution @out %Conv_73__2_res, @in %Add_68__1_res, @in %A438__1, @in %A439 { Kernels: [1, 1], Strides: [2, 2], Pads: [0, 0, 0, 0], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  154 %dealloc_Add_68__1_res = deallocactivation @out %Add_68__1_res // size: 200704
  155 %Add_74__1 = elementadd @out %Add_74__1_res, @in %Add_74__1_res, @in %Conv_73__2_res
  156 %dealloc_Conv_73__2_res = deallocactivation @out %Conv_73__2_res // size: 100352
  157 %Relu_75__1_max__1 = elementmax @out %Add_74__1_res, @in %Add_74__1_res, @in %zero__27_res
  158 %Conv_76__2_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 162, @out 160, @out 163, @in 160, @out 159
  159 %Conv_76__2 = convolution @out %Conv_76__2_res, @in %Add_74__1_res, @in %A441__1, @in %A442 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  160 %Relu_77__1_max__1 = elementmax @out %Conv_76__2_res, @in %Conv_76__2_res, @in %zero__27_res
  161 %Add_79__1_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 164, @out 162, @in 173, @in 168, @out 166, @out 174, @in 166, @out 164
  162 %Conv_78__2 = convolution @out %Add_79__1_res, @in %Conv_76__2_res, @in %A444__1, @in %A445 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  163 %dealloc_Conv_76__2_res = deallocactivation @out %Conv_76__2_res // size: 100352
  164 %Add_79__1 = elementadd @out %Add_79__1_res, @in %Add_79__1_res, @in %Add_74__1_res
  165 %dealloc_Add_74__1_res = deallocactivation @out %Add_74__1_res // size: 100352
  166 %Relu_80__1_max__1 = elementmax @out %Add_79__1_res, @in %Add_79__1_res, @in %zero__27_res
  167 %Conv_81__2_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 171, @out 169, @out 172, @in 169, @out 168
  168 %Conv_81__2 = convolution @out %Conv_81__2_res, @in %Add_79__1_res, @in %A447__1, @in %A448 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  169 %Relu_82__1_max__1 = elementmax @out %Conv_81__2_res, @in %Conv_81__2_res, @in %zero__27_res
  170 %Add_84__1_res = allocactivation  { Ty: float<1 x 7 x 7 x 512>} // size: 100352 // Users: @in 173, @out 171, @in 178, @out 175, @out 179, @in 175, @out 173
  171 %Conv_83__2 = convolution @out %Add_84__1_res, @in %Conv_81__2_res, @in %A450__1, @in %A451 { Kernels: [3, 3], Strides: [1, 1], Pads: [1, 1, 1, 1], Group: 1, Dilation: [1, 1], Layout: NHWC, FusedActivation: NONE, FusedActivationArgs: []}
  172 %dealloc_Conv_81__2_res = deallocactivation @out %Conv_81__2_res // size: 100352
  173 %Add_84__1 = elementadd @out %Add_84__1_res, @in %Add_84__1_res, @in %Add_79__1_res
  174 %dealloc_Add_79__1_res = deallocactivation @out %Add_79__1_res // size: 100352
  175 %Relu_85__1_max__1 = elementmax @out %Add_84__1_res, @in %Add_84__1_res, @in %zero__27_res
  176 %dealloc_zero__27_res = deallocactivation @out %zero__27_res // size: 100352
  177 %GlobalAveragePool_86__1_res = allocactivation  { Ty: float<1 x 1 x 1 x 512>} // size: 2048 // Users: @in 180, @out 182, @out 178
  178 %GlobalAveragePool_86__1 = avgpool @out %GlobalAveragePool_86__1_res, @in %Add_84__1_res { Kernels: [7, 7], Strides: [1, 1], Pads: [0, 0, 0, 0], Layout: 0, CountIncludePads: 0}
  179 %dealloc_Add_84__1_res = deallocactivation @out %Add_84__1_res // size: 100352
  180 %GlobalAveragePool_86__1_res__2 = tensorview @in %GlobalAveragePool_86__1_res { Ty: float<1 x 512>, Offsets: [0, 0, 0, 0]} // Users: @in 181
  181 %Gemm_88__1 = fullyconnected @out %output, @in %GlobalAveragePool_86__1_res__2, @in %learned_216__1, @in %learned_217
  182 %dealloc_GlobalAveragePool_86__1_res = deallocactivation @out %GlobalAveragePool_86__1_res // size: 2048
}

Without looking at it in depth it looks fine, what do you see that appears different from on Github?

Hello ,I understood in a different way. It is correct i guess. Thank you.

1 Like