Hello, I was following the documents provided using nvidia tlt to create a classification model to be distributed on tx2. I use rtx3080 and it is nvidia-docker2, ubuntu 18.04 development environment.
!tlt-train classification -e $SPECS_DIR/classification_spec.cfg -r $USER_EXPERIMENT_DIR/output -k $KEY
Layer (type) Output Shape Param # Connected to
input_1 (InputLayer) (None, 3, 224, 224) 0
conv1 (Conv2D) (None, 64, 112, 112) 9408 input_1[0][0]
bn_conv1 (BatchNormalization) (None, 64, 112, 112) 256 conv1[0][0]
activation_1 (Activation) (None, 64, 112, 112) 0 bn_conv1[0][0]
block_1a_conv_1 (Conv2D) (None, 64, 56, 56) 36864 activation_1[0][0]
block_1a_bn_1 (BatchNormalizati (None, 64, 56, 56) 256 block_1a_conv_1[0][0]
block_1a_relu_1 (Activation) (None, 64, 56, 56) 0 block_1a_bn_1[0][0]
block_1a_conv_2 (Conv2D) (None, 64, 56, 56) 36864 block_1a_relu_1[0][0]
block_1a_conv_shortcut (Conv2D) (None, 64, 56, 56) 4096 activation_1[0][0]
block_1a_bn_2 (BatchNormalizati (None, 64, 56, 56) 256 block_1a_conv_2[0][0]
block_1a_bn_shortcut (BatchNorm (None, 64, 56, 56) 256 block_1a_conv_shortcut[0][0]
add_1 (Add) (None, 64, 56, 56) 0 block_1a_bn_2[0][0]
block_1a_bn_shortcut[0][0]
block_1a_relu (Activation) (None, 64, 56, 56) 0 add_1[0][0]
block_1b_conv_1 (Conv2D) (None, 64, 56, 56) 36864 block_1a_relu[0][0]
block_1b_bn_1 (BatchNormalizati (None, 64, 56, 56) 256 block_1b_conv_1[0][0]
block_1b_relu_1 (Activation) (None, 64, 56, 56) 0 block_1b_bn_1[0][0]
block_1b_conv_2 (Conv2D) (None, 64, 56, 56) 36864 block_1b_relu_1[0][0]
block_1b_conv_shortcut (Conv2D) (None, 64, 56, 56) 4096 block_1a_relu[0][0]
block_1b_bn_2 (BatchNormalizati (None, 64, 56, 56) 256 block_1b_conv_2[0][0]
block_1b_bn_shortcut (BatchNorm (None, 64, 56, 56) 256 block_1b_conv_shortcut[0][0]
add_2 (Add) (None, 64, 56, 56) 0 block_1b_bn_2[0][0]
block_1b_bn_shortcut[0][0]
block_1b_relu (Activation) (None, 64, 56, 56) 0 add_2[0][0]
block_2a_conv_1 (Conv2D) (None, 128, 28, 28) 73728 block_1b_relu[0][0]
block_2a_bn_1 (BatchNormalizati (None, 128, 28, 28) 512 block_2a_conv_1[0][0]
block_2a_relu_1 (Activation) (None, 128, 28, 28) 0 block_2a_bn_1[0][0]
block_2a_conv_2 (Conv2D) (None, 128, 28, 28) 147456 block_2a_relu_1[0][0]
block_2a_conv_shortcut (Conv2D) (None, 128, 28, 28) 8192 block_1b_relu[0][0]
block_2a_bn_2 (BatchNormalizati (None, 128, 28, 28) 512 block_2a_conv_2[0][0]
block_2a_bn_shortcut (BatchNorm (None, 128, 28, 28) 512 block_2a_conv_shortcut[0][0]
add_3 (Add) (None, 128, 28, 28) 0 block_2a_bn_2[0][0]
block_2a_bn_shortcut[0][0]
block_2a_relu (Activation) (None, 128, 28, 28) 0 add_3[0][0]
block_2b_conv_1 (Conv2D) (None, 128, 28, 28) 147456 block_2a_relu[0][0]
block_2b_bn_1 (BatchNormalizati (None, 128, 28, 28) 512 block_2b_conv_1[0][0]
block_2b_relu_1 (Activation) (None, 128, 28, 28) 0 block_2b_bn_1[0][0]
block_2b_conv_2 (Conv2D) (None, 128, 28, 28) 147456 block_2b_relu_1[0][0]
block_2b_conv_shortcut (Conv2D) (None, 128, 28, 28) 16384 block_2a_relu[0][0]
block_2b_bn_2 (BatchNormalizati (None, 128, 28, 28) 512 block_2b_conv_2[0][0]
block_2b_bn_shortcut (BatchNorm (None, 128, 28, 28) 512 block_2b_conv_shortcut[0][0]
add_4 (Add) (None, 128, 28, 28) 0 block_2b_bn_2[0][0]
block_2b_bn_shortcut[0][0]
block_2b_relu (Activation) (None, 128, 28, 28) 0 add_4[0][0]
block_3a_conv_1 (Conv2D) (None, 256, 14, 14) 294912 block_2b_relu[0][0]
block_3a_bn_1 (BatchNormalizati (None, 256, 14, 14) 1024 block_3a_conv_1[0][0]
block_3a_relu_1 (Activation) (None, 256, 14, 14) 0 block_3a_bn_1[0][0]
block_3a_conv_2 (Conv2D) (None, 256, 14, 14) 589824 block_3a_relu_1[0][0]
block_3a_conv_shortcut (Conv2D) (None, 256, 14, 14) 32768 block_2b_relu[0][0]
block_3a_bn_2 (BatchNormalizati (None, 256, 14, 14) 1024 block_3a_conv_2[0][0]
block_3a_bn_shortcut (BatchNorm (None, 256, 14, 14) 1024 block_3a_conv_shortcut[0][0]
add_5 (Add) (None, 256, 14, 14) 0 block_3a_bn_2[0][0]
block_3a_bn_shortcut[0][0]
block_3a_relu (Activation) (None, 256, 14, 14) 0 add_5[0][0]
block_3b_conv_1 (Conv2D) (None, 256, 14, 14) 589824 block_3a_relu[0][0]
block_3b_bn_1 (BatchNormalizati (None, 256, 14, 14) 1024 block_3b_conv_1[0][0]
block_3b_relu_1 (Activation) (None, 256, 14, 14) 0 block_3b_bn_1[0][0]
block_3b_conv_2 (Conv2D) (None, 256, 14, 14) 589824 block_3b_relu_1[0][0]
block_3b_conv_shortcut (Conv2D) (None, 256, 14, 14) 65536 block_3a_relu[0][0]
block_3b_bn_2 (BatchNormalizati (None, 256, 14, 14) 1024 block_3b_conv_2[0][0]
block_3b_bn_shortcut (BatchNorm (None, 256, 14, 14) 1024 block_3b_conv_shortcut[0][0]
add_6 (Add) (None, 256, 14, 14) 0 block_3b_bn_2[0][0]
block_3b_bn_shortcut[0][0]
block_3b_relu (Activation) (None, 256, 14, 14) 0 add_6[0][0]
block_4a_conv_1 (Conv2D) (None, 512, 14, 14) 1179648 block_3b_relu[0][0]
block_4a_bn_1 (BatchNormalizati (None, 512, 14, 14) 2048 block_4a_conv_1[0][0]
block_4a_relu_1 (Activation) (None, 512, 14, 14) 0 block_4a_bn_1[0][0]
block_4a_conv_2 (Conv2D) (None, 512, 14, 14) 2359296 block_4a_relu_1[0][0]
block_4a_conv_shortcut (Conv2D) (None, 512, 14, 14) 131072 block_3b_relu[0][0]
block_4a_bn_2 (BatchNormalizati (None, 512, 14, 14) 2048 block_4a_conv_2[0][0]
block_4a_bn_shortcut (BatchNorm (None, 512, 14, 14) 2048 block_4a_conv_shortcut[0][0]
add_7 (Add) (None, 512, 14, 14) 0 block_4a_bn_2[0][0]
block_4a_bn_shortcut[0][0]
block_4a_relu (Activation) (None, 512, 14, 14) 0 add_7[0][0]
block_4b_conv_1 (Conv2D) (None, 512, 14, 14) 2359296 block_4a_relu[0][0]
block_4b_bn_1 (BatchNormalizati (None, 512, 14, 14) 2048 block_4b_conv_1[0][0]
block_4b_relu_1 (Activation) (None, 512, 14, 14) 0 block_4b_bn_1[0][0]
block_4b_conv_2 (Conv2D) (None, 512, 14, 14) 2359296 block_4b_relu_1[0][0]
block_4b_conv_shortcut (Conv2D) (None, 512, 14, 14) 262144 block_4a_relu[0][0]
block_4b_bn_2 (BatchNormalizati (None, 512, 14, 14) 2048 block_4b_conv_2[0][0]
block_4b_bn_shortcut (BatchNorm (None, 512, 14, 14) 2048 block_4b_conv_shortcut[0][0]
add_8 (Add) (None, 512, 14, 14) 0 block_4b_bn_2[0][0]
block_4b_bn_shortcut[0][0]
block_4b_relu (Activation) (None, 512, 14, 14) 0 add_8[0][0]
avg_pool (AveragePooling2D) (None, 512, 1, 1) 0 block_4b_relu[0][0]
flatten (Flatten) (None, 512) 0 avg_pool[0][0]