Freeze layers of existing model in Pytorch

I want to apply additional training to a pre-trained model in Pytorch. Here is the result of print(model):

SingleStageFSDV2(
  (backbone): VirtualVoxelMixer(
    (conv_input): SparseSequential(
      (0): SubMConv3d(128, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (encoder_layers): SparseSequential(
      (encoder_layer1): SparseSequential(
        (0): SparseSequential(
          (0): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
          (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (2): ReLU(inplace=True)
        )
      )
      (encoder_layer2): SparseSequential(
        (0): SparseSequential(
          (0): SparseConv3d(64, 64, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
          (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (2): ReLU(inplace=True)
        )
        (1): SparseSequential(
          (0): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
          (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (2): ReLU(inplace=True)
        )
      )
      (encoder_layer3): SparseSequential(
        (0): SparseSequential(
          (0): SparseConv3d(64, 64, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
          (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (2): ReLU(inplace=True)
        )
        (1): SparseSequential(
          (0): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
          (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (2): ReLU(inplace=True)
        )
      )
    )
    (lateral_layer3): SparseBasicBlock(
      (conv1): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (conv2): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn2): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (merge_layer3): SparseSequential(
      (0): SubMConv3d(128, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (upsample_layer3): SparseSequential(
      (0): SparseInverseConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (lateral_layer2): SparseBasicBlock(
      (conv1): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (conv2): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn2): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (merge_layer2): SparseSequential(
      (0): SubMConv3d(128, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (upsample_layer2): SparseSequential(
      (0): SparseInverseConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (lateral_layer1): SparseBasicBlock(
      (conv1): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (conv2): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (bn2): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (relu): ReLU(inplace=True)
    )
    (merge_layer1): SparseSequential(
      (0): SubMConv3d(128, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (upsample_layer1): SparseSequential(
      (0): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
    (conv_out): SparseSequential(
      (0): SubMConv3d(64, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[0, 0, 0], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
      (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (bbox_head): FSDV2Head(
    (loss_center): SmoothL1Loss()
    (loss_size): SmoothL1Loss()
    (loss_rot): SmoothL1Loss()
    (loss_cls): FocalLoss()
    (shared_mlp): Sequential(
      (0): Sequential(
        (0): Linear(in_features=128, out_features=256, bias=False)
        (1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
        (2): ReLU(inplace=True)
      )
      (1): Sequential(
        (0): Linear(in_features=256, out_features=256, bias=False)
        (1): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
        (2): ReLU(inplace=True)
      )
    )
    (conv_cls): None
    (conv_reg): None
    (task_heads): ModuleList(
      (0): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=1, bias=True)
        )
      )
      (1): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=4, bias=True)
        )
      )
      (2): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=6, bias=True)
        )
      )
      (3): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=9, bias=True)
        )
      )
      (4): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=5, bias=True)
        )
      )
      (5): FSDSeparateHead(
        (center): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (dim): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=3, bias=True)
        )
        (rot): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=2, bias=True)
        )
        (score): Sequential(
          (0): Sequential(
            (0): Linear(in_features=256, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (1): Sequential(
            (0): Linear(in_features=128, out_features=128, bias=False)
            (1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)
            (2): ReLU(inplace=True)
          )
          (2): Linear(in_features=128, out_features=1, bias=True)
        )
      )
    )
  )
  (voxel_encoder): DynamicScatterVFE(
    (scatter): None
    (vfe_layers): ModuleList(
      (0): DynamicVFELayer(
        (norm): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
        (linear): Linear(in_features=73, out_features=64, bias=False)
      )
      (1): DynamicVFELayer(
        (norm): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
        (linear): Linear(in_features=128, out_features=128, bias=False)
      )
    )
    (vfe_scatter): None
    (cluster_scatter): None
  )
  (segmentor): VoteSegmentor(
    (voxel_layer): Voxelization(voxel_size=(0.2, 0.2, 0.2), point_cloud_range=[-204.8, -204.8, -3.2, 204.8, 204.8, 3.2], max_num_points=-1, max_voxels=(-1, -1))
    (voxel_encoder): DynamicScatterVFE(
      (scatter): None
      (vfe_layers): ModuleList(
        (0): DynamicVFELayer(
          (norm): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (linear): Linear(in_features=10, out_features=64, bias=False)
        )
        (1): DynamicVFELayer(
          (norm): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
          (linear): Linear(in_features=128, out_features=64, bias=False)
        )
      )
      (vfe_scatter): None
      (cluster_scatter): None
    )
    (middle_encoder): PseudoMiddleEncoderForSpconvFSD()
    (backbone): SimpleSparseUNet(
      (conv_input): SparseSequential(
        (0): SubMConv3d(64, 64, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
        (1): NaiveSyncBatchNorm1d(64, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
        (2): ReLU(inplace=True)
      )
      (encoder_layers): SparseSequential(
        (encoder_layer1): SparseSequential(
          (0): SparseSequential(
            (0): SubMConv3d(64, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
        )
        (encoder_layer2): SparseSequential(
          (0): SparseSequential(
            (0): SparseConv3d(128, 128, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
          (1): SparseSequential(
            (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
        )
        (encoder_layer3): SparseSequential(
          (0): SparseSequential(
            (0): SparseConv3d(128, 128, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
          (1): SparseSequential(
            (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
        )
        (encoder_layer4): SparseSequential(
          (0): SparseSequential(
            (0): SparseConv3d(128, 128, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
          (1): SparseSequential(
            (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
          (2): SparseSequential(
            (0): SubMConv3d(128, 128, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(128, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
        )
        (encoder_layer5): SparseSequential(
          (0): SparseSequential(
            (0): SparseConv3d(128, 256, kernel_size=[3, 3, 3], stride=[2, 2, 2], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )
          (1): SparseSequential(
            (0): SubMConv3d(256, 256, kernel_size=[3, 3, 3], stride=[1, 1, 1], padding=[1, 1, 1], dilation=[1, 1, 1], output_padding=[0, 0, 0], bias=False, algo=ConvAlgo.MaskImplicitGemm)
            (1): NaiveSyncBatchNorm1d(256, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)
            (2): ReLU(inplace=True)
          )

What I want is to freeze layers from the beginning to model.backbone inclusively. But it seems that print(model) doesn’t reflect the order in which layers are being applied during inference – model.backbone definitely is not being aplied at the beginning.

Sources of the model are huge and I want to have a method to automatically determine, which layers I have to remain unfrozen.

Trang chủ Giới thiệu Sinh nhật bé trai Sinh nhật bé gái Tổ chức sự kiện Biểu diễn giải trí Dịch vụ khác Trang trí tiệc cưới Tổ chức khai trương Tư vấn dịch vụ Thư viện ảnh Tin tức - sự kiện Liên hệ Chú hề sinh nhật Trang trí YEAR END PARTY công ty Trang trí tất niên cuối năm Trang trí tất niên xu hướng mới nhất Trang trí sinh nhật bé trai Hải Đăng Trang trí sinh nhật bé Khánh Vân Trang trí sinh nhật Bích Ngân Trang trí sinh nhật bé Thanh Trang Thuê ông già Noel phát quà Biểu diễn xiếc khỉ Xiếc quay đĩa Dịch vụ tổ chức sự kiện 5 sao Thông tin về chúng tôi Dịch vụ sinh nhật bé trai Dịch vụ sinh nhật bé gái Sự kiện trọn gói Các tiết mục giải trí Dịch vụ bổ trợ Tiệc cưới sang trọng Dịch vụ khai trương Tư vấn tổ chức sự kiện Hình ảnh sự kiện Cập nhật tin tức Liên hệ ngay Thuê chú hề chuyên nghiệp Tiệc tất niên cho công ty Trang trí tiệc cuối năm Tiệc tất niên độc đáo Sinh nhật bé Hải Đăng Sinh nhật đáng yêu bé Khánh Vân Sinh nhật sang trọng Bích Ngân Tiệc sinh nhật bé Thanh Trang Dịch vụ ông già Noel Xiếc thú vui nhộn Biểu diễn xiếc quay đĩa Dịch vụ tổ chức tiệc uy tín Khám phá dịch vụ của chúng tôi Tiệc sinh nhật cho bé trai Trang trí tiệc cho bé gái Gói sự kiện chuyên nghiệp Chương trình giải trí hấp dẫn Dịch vụ hỗ trợ sự kiện Trang trí tiệc cưới đẹp Khởi đầu thành công với khai trương Chuyên gia tư vấn sự kiện Xem ảnh các sự kiện đẹp Tin mới về sự kiện Kết nối với đội ngũ chuyên gia Chú hề vui nhộn cho tiệc sinh nhật Ý tưởng tiệc cuối năm Tất niên độc đáo Trang trí tiệc hiện đại Tổ chức sinh nhật cho Hải Đăng Sinh nhật độc quyền Khánh Vân Phong cách tiệc Bích Ngân Trang trí tiệc bé Thanh Trang Thuê dịch vụ ông già Noel chuyên nghiệp Xem xiếc khỉ đặc sắc Xiếc quay đĩa thú vị
Trang chủ Giới thiệu Sinh nhật bé trai Sinh nhật bé gái Tổ chức sự kiện Biểu diễn giải trí Dịch vụ khác Trang trí tiệc cưới Tổ chức khai trương Tư vấn dịch vụ Thư viện ảnh Tin tức - sự kiện Liên hệ Chú hề sinh nhật Trang trí YEAR END PARTY công ty Trang trí tất niên cuối năm Trang trí tất niên xu hướng mới nhất Trang trí sinh nhật bé trai Hải Đăng Trang trí sinh nhật bé Khánh Vân Trang trí sinh nhật Bích Ngân Trang trí sinh nhật bé Thanh Trang Thuê ông già Noel phát quà Biểu diễn xiếc khỉ Xiếc quay đĩa
Thiết kế website Thiết kế website Thiết kế website Cách kháng tài khoản quảng cáo Mua bán Fanpage Facebook Dịch vụ SEO Tổ chức sinh nhật