JustPaste.it

DeeplabV3X(
  (backbone): SwinTransformer(
    (patch_embed): PatchEmbed(
      (proj): QuantizedConv2d(3, 96, kernel_size=(4, 4), stride=(4, 4), scale=0.06761670857667923, zero_point=67)
      (norm): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
    )
    (pos_drop): Dropout(p=0.0, inplace=False)
    (layers): ModuleList(
      (0): BasicLayer(
        (blocks): ModuleList(
          (0): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=96, out_features=288, scale=0.01362359058111906, zero_point=67, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=96, out_features=96, scale=0.0019612994510680437, zero_point=67, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): Identity()
            (norm2): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=96, out_features=384, scale=0.013676497153937817, zero_point=63, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=384, out_features=96, scale=0.0026818912010639906, zero_point=66, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (1): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=96, out_features=288, scale=0.013088734820485115, zero_point=66, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=96, out_features=96, scale=0.0017343172803521156, zero_point=66, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=96, out_features=384, scale=0.014216942712664604, zero_point=60, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=384, out_features=96, scale=0.002679420169442892, zero_point=63, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
        )
        (downsample): PatchMerging(
          (reduction): QuantizedLinear(in_features=384, out_features=192, scale=0.026706397533416748, zero_point=64, qscheme=torch.per_tensor_affine)
          (norm): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
        )
      )
      (1): BasicLayer(
        (blocks): ModuleList(
          (0): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=192, out_features=576, scale=0.023032117635011673, zero_point=64, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=192, out_features=192, scale=0.004315770696848631, zero_point=65, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=192, out_features=768, scale=0.020925428718328476, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=768, out_features=192, scale=0.00621451623737812, zero_point=60, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (1): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=192, out_features=576, scale=0.02201320417225361, zero_point=66, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=192, out_features=192, scale=0.003921349532902241, zero_point=64, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=192, out_features=768, scale=0.020007258281111717, zero_point=64, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=768, out_features=192, scale=0.005808696616441011, zero_point=63, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
        )
        (downsample): PatchMerging(
          (reduction): QuantizedLinear(in_features=768, out_features=384, scale=0.03915118798613548, zero_point=64, qscheme=torch.per_tensor_affine)
          (norm): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
        )
      )
      (2): BasicLayer(
        (blocks): ModuleList(
          (0): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.029468612745404243, zero_point=61, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.006302351597696543, zero_point=60, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.03160104900598526, zero_point=62, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.01298795361071825, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (1): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.029296405613422394, zero_point=59, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007326021790504456, zero_point=65, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.02870587259531021, zero_point=63, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.011839921586215496, zero_point=61, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (2): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.030392281711101532, zero_point=59, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.00685079162940383, zero_point=70, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.030345004051923752, zero_point=60, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012270520441234112, zero_point=60, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (3): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02849041484296322, zero_point=63, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.0070808944292366505, zero_point=65, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.029457369819283485, zero_point=61, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.01272126380354166, zero_point=68, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (4): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02990530990064144, zero_point=61, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.008007015101611614, zero_point=63, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.02981744334101677, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012656887993216515, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (5): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02932538464665413, zero_point=62, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007566846441477537, zero_point=66, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.029996048659086227, zero_point=67, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012462695129215717, zero_point=65, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (6): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.029887797310948372, zero_point=63, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.0074864779599010944, zero_point=66, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.030620301142334938, zero_point=61, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.0115205692127347, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (7): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.030361773446202278, zero_point=62, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007208541501313448, zero_point=63, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.028441332280635834, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012846059165894985, zero_point=57, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (8): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.029588090255856514, zero_point=63, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.006774429231882095, zero_point=68, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.029718732461333275, zero_point=63, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.011388733983039856, zero_point=65, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (9): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02889113500714302, zero_point=62, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.00810705590993166, zero_point=58, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.028659628704190254, zero_point=63, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.011895324103534222, zero_point=63, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (10): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.030113833025097847, zero_point=62, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007680691312998533, zero_point=68, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.08890105783939362, zero_point=60, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012328598648309708, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (11): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02870173752307892, zero_point=64, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.008362512104213238, zero_point=57, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.03138143941760063, zero_point=67, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.011300045996904373, zero_point=63, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (12): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.028905702754855156, zero_point=64, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007498701103031635, zero_point=62, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.02869526855647564, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012514174915850163, zero_point=67, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (13): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02920895628631115, zero_point=65, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007571683265268803, zero_point=61, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.029414210468530655, zero_point=63, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012454728595912457, zero_point=57, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (14): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.03000316023826599, zero_point=63, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007436979562044144, zero_point=61, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.032556112855672836, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012291498482227325, zero_point=59, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (15): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02962823584675789, zero_point=64, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.008334063924849033, zero_point=60, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.02965456433594227, zero_point=59, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.01212706696242094, zero_point=67, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (16): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.028739627450704575, zero_point=66, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.007334803696721792, zero_point=62, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.028169788420200348, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.011958799324929714, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (17): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=384, out_features=1152, scale=0.02961859665811062, zero_point=66, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=384, out_features=384, scale=0.008223773911595345, zero_point=66, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=384, out_features=1536, scale=0.029315564781427383, zero_point=65, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=1536, out_features=384, scale=0.012360114604234695, zero_point=62, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
        )
        (downsample): PatchMerging(
          (reduction): QuantizedLinear(in_features=1536, out_features=768, scale=0.05471605435013771, zero_point=61, qscheme=torch.per_tensor_affine)
          (norm): QuantizedLayerNorm((1536,), eps=1e-05, elementwise_affine=True)
        )
      )
      (3): BasicLayer(
        (blocks): ModuleList(
          (0): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=768, out_features=2304, scale=0.041208866983652115, zero_point=65, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=768, out_features=768, scale=0.015247215516865253, zero_point=63, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=768, out_features=3072, scale=0.06584074348211288, zero_point=76, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=3072, out_features=768, scale=0.024494048207998276, zero_point=59, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
          (1): SwinTransformerBlock(
            (norm1): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
            (attn): WindowAttention(
              (qkv): QuantizedLinear(in_features=768, out_features=2304, scale=0.04043778032064438, zero_point=59, qscheme=torch.per_tensor_affine)
              (attn_drop): Dropout(p=0.0, inplace=False)
              (proj): QuantizedLinear(in_features=768, out_features=768, scale=0.01638011448085308, zero_point=56, qscheme=torch.per_tensor_affine)
              (proj_drop): Dropout(p=0.0, inplace=False)
              (softmax): Softmax(dim=-1)
            )
            (drop_path): DropPath()
            (norm2): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
            (mlp): Mlp(
              (fc1): QuantizedLinear(in_features=768, out_features=3072, scale=0.042017657309770584, zero_point=64, qscheme=torch.per_tensor_affine)
              (act): GELU()
              (fc2): QuantizedLinear(in_features=3072, out_features=768, scale=0.025227313861250877, zero_point=64, qscheme=torch.per_tensor_affine)
              (drop): Dropout(p=0.0, inplace=False)
            )
          )
        )
      )
    )
    (norm0): QuantizedLayerNorm((96,), eps=1e-05, elementwise_affine=True)
    (norm1): QuantizedLayerNorm((192,), eps=1e-05, elementwise_affine=True)
    (norm2): QuantizedLayerNorm((384,), eps=1e-05, elementwise_affine=True)
    (norm3): QuantizedLayerNorm((768,), eps=1e-05, elementwise_affine=True)
  )
  (quant): Quantize(scale=tensor([0.0743]), zero_point=tensor([65]), dtype=torch.quint8)
  (dequant): DeQuantize()
  (atrous_pooling_layer): KSAC(
    (_bn_layers): ModuleList(
      (0): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (3): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (4): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    )
    (global_avg_pool): Sequential(
      (0): AdaptiveAvgPool2d(output_size=(1, 1))
      (1): QuantizedConv2d(768, 256, kernel_size=(1, 1), stride=(1, 1), scale=0.05054910108447075, zero_point=63, bias=False)
      (2): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (3): ReLU()
    )
    (conv_out): QuantizedConv2d(1280, 256, kernel_size=(1, 1), stride=(1, 1), scale=0.06600148975849152, zero_point=65, bias=False)
    (bn_out): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (relu): ReLU()
    (dropout): Dropout(p=0.5, inplace=False)
  )
  (dec_cn_1): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), scale=0.037859007716178894, zero_point=63, padding=(1, 1))
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (dec_cn_1_1): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), scale=0.02312176674604416, zero_point=58)
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (dec_cn_2): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(192, 256, kernel_size=(3, 3), stride=(1, 1), scale=0.04312286898493767, zero_point=65, padding=(1, 1))
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (dec_cn_2_1): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), scale=0.014349674805998802, zero_point=59)
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (dec_cn_3): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(96, 256, kernel_size=(3, 3), stride=(1, 1), scale=0.04244370386004448, zero_point=62, padding=(1, 1))
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (dec_cn_3_1): ConvModule(
    (layer): Sequential(
      (0): QuantizedConv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), scale=0.013204840011894703, zero_point=66)
      (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
    )
  )
  (head): Sequential(
    (0): ConvModule(
      (layer): Sequential(
        (0): QuantizedConv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), scale=0.005600635427981615, zero_point=58, padding=(1, 1))
        (1): QuantizedBatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (2): ReLU(inplace=True)
      )
    )
    (1): ConvModule(
      (layer): Sequential(
        (0): QuantizedConv2d(256, 64, kernel_size=(3, 3), stride=(1, 1), scale=0.00172130623832345, zero_point=62, padding=(1, 1))
        (1): QuantizedBatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        (2): ReLU(inplace=True)
      )
    )
    (2): QuantizedConv2d(64, 12, kernel_size=(3, 3), stride=(1, 1), scale=0.0009344882564619184, zero_point=64, padding=(1, 1))
  )
  (dropout): Dropout2d(p=0.5, inplace=False)
)