Vision Language Models

Vision Language Models and Lora adapters with quantization
vlm
lora
quantization
Author

Shataxi Dubey

Published

May 23, 2025

import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

from transformers import BitsAndBytesConfig
import torch
from transformers import Qwen2_5_VLForConditionalGeneration
bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                # bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4",
                bnb_4bit_compute_type=torch.bfloat16,)

model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
    "Qwen/Qwen2.5-VL-72B-Instruct", torch_dtype="auto", device_map="auto", quantization_config=bnb_config
)

model.get_memory_footprint()
40448783184
model
Qwen2_5_VLForConditionalGeneration(
  (visual): Qwen2_5_VisionTransformerPretrainedModel(
    (patch_embed): Qwen2_5_VisionPatchEmbed(
      (proj): Conv3d(3, 1280, kernel_size=(2, 14, 14), stride=(2, 14, 14), bias=False)
    )
    (rotary_pos_emb): Qwen2_5_VisionRotaryEmbedding()
    (blocks): ModuleList(
      (0-31): 32 x Qwen2_5_VLVisionBlock(
        (norm1): Qwen2RMSNorm((1280,), eps=1e-06)
        (norm2): Qwen2RMSNorm((1280,), eps=1e-06)
        (attn): Qwen2_5_VLVisionSdpaAttention(
          (qkv): Linear4bit(in_features=1280, out_features=3840, bias=True)
          (proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
        )
        (mlp): Qwen2_5_VLMLP(
          (gate_proj): Linear4bit(in_features=1280, out_features=3456, bias=True)
          (up_proj): Linear4bit(in_features=1280, out_features=3456, bias=True)
          (down_proj): Linear4bit(in_features=3456, out_features=1280, bias=True)
          (act_fn): SiLU()
        )
      )
    )
    (merger): Qwen2_5_VLPatchMerger(
      (ln_q): Qwen2RMSNorm((1280,), eps=1e-06)
      (mlp): Sequential(
        (0): Linear4bit(in_features=5120, out_features=5120, bias=True)
        (1): GELU(approximate='none')
        (2): Linear4bit(in_features=5120, out_features=8192, bias=True)
      )
    )
  )
  (model): Qwen2_5_VLModel(
    (embed_tokens): Embedding(152064, 8192)
    (layers): ModuleList(
      (0-79): 80 x Qwen2_5_VLDecoderLayer(
        (self_attn): Qwen2_5_VLSdpaAttention(
          (q_proj): Linear4bit(in_features=8192, out_features=8192, bias=True)
          (k_proj): Linear4bit(in_features=8192, out_features=1024, bias=True)
          (v_proj): Linear4bit(in_features=8192, out_features=1024, bias=True)
          (o_proj): Linear4bit(in_features=8192, out_features=8192, bias=False)
          (rotary_emb): Qwen2_5_VLRotaryEmbedding()
        )
        (mlp): Qwen2MLP(
          (gate_proj): Linear4bit(in_features=8192, out_features=29568, bias=False)
          (up_proj): Linear4bit(in_features=8192, out_features=29568, bias=False)
          (down_proj): Linear4bit(in_features=29568, out_features=8192, bias=False)
          (act_fn): SiLU()
        )
        (input_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
        (post_attention_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
      )
    )
    (norm): Qwen2RMSNorm((8192,), eps=1e-06)
    (rotary_emb): Qwen2_5_VLRotaryEmbedding()
  )
  (lm_head): Linear(in_features=8192, out_features=152064, bias=False)
)
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
    r=8,
    lora_alpha=16,
    target_modules=["q_proj", "v_proj", "o_proj", "k_proj", "gate_proj", "up_proj", "down_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",
)

peft_model = get_peft_model(model, lora_config)
peft_model.print_trainable_parameters()
print(peft_model.get_memory_footprint())
peft_model
trainable params: 108,904,448 || all params: 73,519,681,792 || trainable%: 0.1481
40884400976
PeftModelForCausalLM(
  (base_model): LoraModel(
    (model): Qwen2_5_VLForConditionalGeneration(
      (visual): Qwen2_5_VisionTransformerPretrainedModel(
        (patch_embed): Qwen2_5_VisionPatchEmbed(
          (proj): Conv3d(3, 1280, kernel_size=(2, 14, 14), stride=(2, 14, 14), bias=False)
        )
        (rotary_pos_emb): Qwen2_5_VisionRotaryEmbedding()
        (blocks): ModuleList(
          (0-31): 32 x Qwen2_5_VLVisionBlock(
            (norm1): Qwen2RMSNorm((1280,), eps=1e-06)
            (norm2): Qwen2RMSNorm((1280,), eps=1e-06)
            (attn): Qwen2_5_VLVisionSdpaAttention(
              (qkv): Linear4bit(in_features=1280, out_features=3840, bias=True)
              (proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
            )
            (mlp): Qwen2_5_VLMLP(
              (gate_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=1280, out_features=3456, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=1280, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=3456, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (up_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=1280, out_features=3456, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=1280, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=3456, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (down_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=3456, out_features=1280, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=3456, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=1280, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (act_fn): SiLU()
            )
          )
        )
        (merger): Qwen2_5_VLPatchMerger(
          (ln_q): Qwen2RMSNorm((1280,), eps=1e-06)
          (mlp): Sequential(
            (0): Linear4bit(in_features=5120, out_features=5120, bias=True)
            (1): GELU(approximate='none')
            (2): Linear4bit(in_features=5120, out_features=8192, bias=True)
          )
        )
      )
      (model): Qwen2_5_VLModel(
        (embed_tokens): Embedding(152064, 8192)
        (layers): ModuleList(
          (0-79): 80 x Qwen2_5_VLDecoderLayer(
            (self_attn): Qwen2_5_VLSdpaAttention(
              (q_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=8192, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=8192, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (k_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=1024, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=1024, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (v_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=1024, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=1024, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (o_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=8192, bias=False)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=8192, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (rotary_emb): Qwen2_5_VLRotaryEmbedding()
            )
            (mlp): Qwen2MLP(
              (gate_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=29568, bias=False)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=29568, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (up_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=29568, bias=False)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=29568, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (down_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=29568, out_features=8192, bias=False)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=29568, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=8192, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (act_fn): SiLU()
            )
            (input_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
            (post_attention_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
          )
        )
        (norm): Qwen2RMSNorm((8192,), eps=1e-06)
        (rotary_emb): Qwen2_5_VLRotaryEmbedding()
      )
      (lm_head): Linear(in_features=8192, out_features=152064, bias=False)
    )
  )
)
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
    r=8,
    lora_alpha=16,
    target_modules=["q_proj", "v_proj",],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM",
)

peft_model = get_peft_model(model, lora_config)
peft_model.print_trainable_parameters()
print(peft_model.get_memory_footprint())
peft_model
trainable params: 16,384,000 || all params: 73,427,161,344 || trainable%: 0.0223
40514319184
PeftModelForCausalLM(
  (base_model): LoraModel(
    (model): Qwen2_5_VLForConditionalGeneration(
      (visual): Qwen2_5_VisionTransformerPretrainedModel(
        (patch_embed): Qwen2_5_VisionPatchEmbed(
          (proj): Conv3d(3, 1280, kernel_size=(2, 14, 14), stride=(2, 14, 14), bias=False)
        )
        (rotary_pos_emb): Qwen2_5_VisionRotaryEmbedding()
        (blocks): ModuleList(
          (0-31): 32 x Qwen2_5_VLVisionBlock(
            (norm1): Qwen2RMSNorm((1280,), eps=1e-06)
            (norm2): Qwen2RMSNorm((1280,), eps=1e-06)
            (attn): Qwen2_5_VLVisionSdpaAttention(
              (qkv): Linear4bit(in_features=1280, out_features=3840, bias=True)
              (proj): Linear4bit(in_features=1280, out_features=1280, bias=True)
            )
            (mlp): Qwen2_5_VLMLP(
              (gate_proj): Linear4bit(in_features=1280, out_features=3456, bias=True)
              (up_proj): Linear4bit(in_features=1280, out_features=3456, bias=True)
              (down_proj): Linear4bit(in_features=3456, out_features=1280, bias=True)
              (act_fn): SiLU()
            )
          )
        )
        (merger): Qwen2_5_VLPatchMerger(
          (ln_q): Qwen2RMSNorm((1280,), eps=1e-06)
          (mlp): Sequential(
            (0): Linear4bit(in_features=5120, out_features=5120, bias=True)
            (1): GELU(approximate='none')
            (2): Linear4bit(in_features=5120, out_features=8192, bias=True)
          )
        )
      )
      (model): Qwen2_5_VLModel(
        (embed_tokens): Embedding(152064, 8192)
        (layers): ModuleList(
          (0-79): 80 x Qwen2_5_VLDecoderLayer(
            (self_attn): Qwen2_5_VLSdpaAttention(
              (q_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=8192, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=8192, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (k_proj): Linear4bit(in_features=8192, out_features=1024, bias=True)
              (v_proj): lora.Linear4bit(
                (base_layer): Linear4bit(in_features=8192, out_features=1024, bias=True)
                (lora_dropout): ModuleDict(
                  (default): Dropout(p=0.05, inplace=False)
                )
                (lora_A): ModuleDict(
                  (default): Linear(in_features=8192, out_features=8, bias=False)
                )
                (lora_B): ModuleDict(
                  (default): Linear(in_features=8, out_features=1024, bias=False)
                )
                (lora_embedding_A): ParameterDict()
                (lora_embedding_B): ParameterDict()
                (lora_magnitude_vector): ModuleDict()
              )
              (o_proj): Linear4bit(in_features=8192, out_features=8192, bias=False)
              (rotary_emb): Qwen2_5_VLRotaryEmbedding()
            )
            (mlp): Qwen2MLP(
              (gate_proj): Linear4bit(in_features=8192, out_features=29568, bias=False)
              (up_proj): Linear4bit(in_features=8192, out_features=29568, bias=False)
              (down_proj): Linear4bit(in_features=29568, out_features=8192, bias=False)
              (act_fn): SiLU()
            )
            (input_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
            (post_attention_layernorm): Qwen2RMSNorm((8192,), eps=1e-06)
          )
        )
        (norm): Qwen2RMSNorm((8192,), eps=1e-06)
        (rotary_emb): Qwen2_5_VLRotaryEmbedding()
      )
      (lm_head): Linear(in_features=8192, out_features=152064, bias=False)
    )
  )
)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

from transformers import BitsAndBytesConfig
import torch
from transformers import PaliGemmaForConditionalGeneration

bnb_config = BitsAndBytesConfig(
                load_in_4bit=True,
                # bnb_4bit_use_double_quant=True,
                bnb_4bit_quant_type="nf4",
                bnb_4bit_compute_type=torch.bfloat16,)

model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma2-3b-pt-448", torch_dtype=torch.bfloat16, device_map="auto")
model.get_memory_footprint()
6066263008
model
PaliGemmaForConditionalGeneration(
  (vision_tower): SiglipVisionModel(
    (vision_model): SiglipVisionTransformer(
      (embeddings): SiglipVisionEmbeddings(
        (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid)
        (position_embedding): Embedding(1024, 1152)
      )
      (encoder): SiglipEncoder(
        (layers): ModuleList(
          (0-26): 27 x SiglipEncoderLayer(
            (self_attn): SiglipSdpaAttention(
              (k_proj): Linear(in_features=1152, out_features=1152, bias=True)
              (v_proj): Linear(in_features=1152, out_features=1152, bias=True)
              (q_proj): Linear(in_features=1152, out_features=1152, bias=True)
              (out_proj): Linear(in_features=1152, out_features=1152, bias=True)
            )
            (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
            (mlp): SiglipMLP(
              (activation_fn): PytorchGELUTanh()
              (fc1): Linear(in_features=1152, out_features=4304, bias=True)
              (fc2): Linear(in_features=4304, out_features=1152, bias=True)
            )
            (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
          )
        )
      )
      (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
    )
  )
  (multi_modal_projector): PaliGemmaMultiModalProjector(
    (linear): Linear(in_features=1152, out_features=2304, bias=True)
  )
  (language_model): Gemma2ForCausalLM(
    (model): Gemma2Model(
      (embed_tokens): Embedding(257216, 2304, padding_idx=0)
      (layers): ModuleList(
        (0-25): 26 x Gemma2DecoderLayer(
          (self_attn): Gemma2Attention(
            (q_proj): Linear(in_features=2304, out_features=2048, bias=False)
            (k_proj): Linear(in_features=2304, out_features=1024, bias=False)
            (v_proj): Linear(in_features=2304, out_features=1024, bias=False)
            (o_proj): Linear(in_features=2048, out_features=2304, bias=False)
          )
          (mlp): Gemma2MLP(
            (gate_proj): Linear(in_features=2304, out_features=9216, bias=False)
            (up_proj): Linear(in_features=2304, out_features=9216, bias=False)
            (down_proj): Linear(in_features=9216, out_features=2304, bias=False)
            (act_fn): PytorchGELUTanh()
          )
          (input_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
          (post_attention_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
          (pre_feedforward_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
          (post_feedforward_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
        )
      )
      (norm): Gemma2RMSNorm((2304,), eps=1e-06)
      (rotary_emb): Gemma2RotaryEmbedding()
    )
    (lm_head): Linear(in_features=2304, out_features=257216, bias=False)
  )
)
from peft import get_peft_model, LoraConfig
import peft

lora_config = LoraConfig(
        r=8,
        lora_alpha=8,
        target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"],
        task_type="CAUSAL_LM",
    )

peft_model = get_peft_model(model, lora_config)
peft_model.print_trainable_parameters()
print(peft_model.get_memory_footprint())
peft_model
trainable params: 11,876,352 || all params: 3,045,003,504 || trainable%: 0.3900
6113768416
PeftModelForCausalLM(
  (base_model): LoraModel(
    (model): PaliGemmaForConditionalGeneration(
      (vision_tower): SiglipVisionModel(
        (vision_model): SiglipVisionTransformer(
          (embeddings): SiglipVisionEmbeddings(
            (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid)
            (position_embedding): Embedding(1024, 1152)
          )
          (encoder): SiglipEncoder(
            (layers): ModuleList(
              (0-26): 27 x SiglipEncoderLayer(
                (self_attn): SiglipSdpaAttention(
                  (k_proj): lora.Linear(
                    (base_layer): Linear(in_features=1152, out_features=1152, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Identity()
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1152, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1152, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (v_proj): lora.Linear(
                    (base_layer): Linear(in_features=1152, out_features=1152, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Identity()
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1152, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1152, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (q_proj): lora.Linear(
                    (base_layer): Linear(in_features=1152, out_features=1152, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Identity()
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1152, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1152, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (out_proj): Linear(in_features=1152, out_features=1152, bias=True)
                )
                (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
                (mlp): SiglipMLP(
                  (activation_fn): PytorchGELUTanh()
                  (fc1): Linear(in_features=1152, out_features=4304, bias=True)
                  (fc2): Linear(in_features=4304, out_features=1152, bias=True)
                )
                (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
              )
            )
          )
          (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
        )
      )
      (multi_modal_projector): PaliGemmaMultiModalProjector(
        (linear): Linear(in_features=1152, out_features=2304, bias=True)
      )
      (language_model): Gemma2ForCausalLM(
        (model): Gemma2Model(
          (embed_tokens): Embedding(257216, 2304, padding_idx=0)
          (layers): ModuleList(
            (0-25): 26 x Gemma2DecoderLayer(
              (self_attn): Gemma2Attention(
                (q_proj): lora.Linear(
                  (base_layer): Linear(in_features=2304, out_features=2048, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2304, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=2048, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (k_proj): lora.Linear(
                  (base_layer): Linear(in_features=2304, out_features=1024, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2304, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=1024, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (v_proj): lora.Linear(
                  (base_layer): Linear(in_features=2304, out_features=1024, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2304, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=1024, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (o_proj): lora.Linear(
                  (base_layer): Linear(in_features=2048, out_features=2304, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2048, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=2304, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
              )
              (mlp): Gemma2MLP(
                (gate_proj): lora.Linear(
                  (base_layer): Linear(in_features=2304, out_features=9216, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2304, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=9216, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (up_proj): lora.Linear(
                  (base_layer): Linear(in_features=2304, out_features=9216, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=2304, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=9216, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (down_proj): lora.Linear(
                  (base_layer): Linear(in_features=9216, out_features=2304, bias=False)
                  (lora_dropout): ModuleDict(
                    (default): Identity()
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=9216, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=2304, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (act_fn): PytorchGELUTanh()
              )
              (input_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
              (post_attention_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
              (pre_feedforward_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
              (post_feedforward_layernorm): Gemma2RMSNorm((2304,), eps=1e-06)
            )
          )
          (norm): Gemma2RMSNorm((2304,), eps=1e-06)
          (rotary_emb): Gemma2RotaryEmbedding()
        )
        (lm_head): Linear(in_features=2304, out_features=257216, bias=False)
      )
    )
  )
)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '3'

import torch
from transformers import AutoModelForCausalLM 

model = AutoModelForCausalLM.from_pretrained("microsoft/Florence-2-large-ft", torch_dtype="auto", trust_remote_code=True)
model.get_memory_footprint()
Florence2LanguageForConditionalGeneration has generative capabilities, as `prepare_inputs_for_generation` is explicitly overwritten. However, it doesn't directly inherit from `GenerationMixin`. From 👉v4.50👈 onwards, `PreTrainedModel` will NOT inherit from `GenerationMixin`, and this model will lose the ability to call `generate` and other related functions.
  - If you're using `trust_remote_code=True`, you can get rid of this warning by loading the model with an auto class. See https://huggingface.co/docs/transformers/en/model_doc/auto#auto-classes
  - If you are the owner of the model architecture code, please modify your model class such that it inherits from `GenerationMixin` (after `PreTrainedModel`, otherwise you'll get an exception).
  - If you are not the owner of the model architecture class, please contact the model code owner to update it.
1645899954
model.dtype
torch.float16
model
Florence2ForConditionalGeneration(
  (vision_tower): DaViT(
    (convs): ModuleList(
      (0): ConvEmbed(
        (proj): Conv2d(3, 256, kernel_size=(7, 7), stride=(4, 4), padding=(3, 3))
        (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
      )
      (1): ConvEmbed(
        (proj): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
        (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
      )
      (2): ConvEmbed(
        (proj): Conv2d(512, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
        (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
      )
      (3): ConvEmbed(
        (proj): Conv2d(1024, 2048, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
        (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
      )
    )
    (blocks): ModuleList(
      (0): MySequential(
        (0): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=256, out_features=768, bias=True)
                (proj): Linear(in_features=256, out_features=256, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): Identity()
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=256, out_features=1024, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=1024, out_features=256, bias=True)
                )
              )
              (drop_path): Identity()
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=256, out_features=768, bias=True)
                (proj): Linear(in_features=256, out_features=256, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.004)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=256, out_features=1024, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=1024, out_features=256, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.004)
            )
          )
        )
      )
      (1): MySequential(
        (0): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=512, out_features=1536, bias=True)
                (proj): Linear(in_features=512, out_features=512, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.009)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=512, out_features=2048, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=2048, out_features=512, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.009)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=512, out_features=1536, bias=True)
                (proj): Linear(in_features=512, out_features=512, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.013)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=512, out_features=2048, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=2048, out_features=512, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.013)
            )
          )
        )
      )
      (2): MySequential(
        (0): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.017)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.017)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.022)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.022)
            )
          )
        )
        (1): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.026)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.026)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.030)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.030)
            )
          )
        )
        (2): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.035)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.035)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.039)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.039)
            )
          )
        )
        (3): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.043)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.043)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.048)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.048)
            )
          )
        )
        (4): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.052)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.052)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.057)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.057)
            )
          )
        )
        (5): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.061)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.061)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.065)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.065)
            )
          )
        )
        (6): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.070)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.070)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.074)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.074)
            )
          )
        )
        (7): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.078)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.078)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.083)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.083)
            )
          )
        )
        (8): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.087)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.087)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                (proj): Linear(in_features=1024, out_features=1024, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.091)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=4096, out_features=1024, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.091)
            )
          )
        )
      )
      (3): MySequential(
        (0): MySequential(
          (spatial_block): SpatialBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
              )
            )
            (window_attn): PreNorm(
              (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
              (fn): WindowAttention(
                (qkv): Linear(in_features=2048, out_features=6144, bias=True)
                (proj): Linear(in_features=2048, out_features=2048, bias=True)
                (softmax): Softmax(dim=-1)
              )
              (drop_path): DropPath(drop_prob=0.096)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=2048, out_features=8192, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=8192, out_features=2048, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.096)
            )
          )
          (channel_block): ChannelBlock(
            (conv1): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
              )
            )
            (channel_attn): PreNorm(
              (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
              (fn): ChannelAttention(
                (qkv): Linear(in_features=2048, out_features=6144, bias=True)
                (proj): Linear(in_features=2048, out_features=2048, bias=True)
              )
              (drop_path): DropPath(drop_prob=0.100)
            )
            (conv2): PreNorm(
              (fn): DepthWiseConv2d(
                (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
              )
            )
            (ffn): PreNorm(
              (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
              (fn): Mlp(
                (net): Sequential(
                  (fc1): Linear(in_features=2048, out_features=8192, bias=True)
                  (act): GELU(approximate='none')
                  (fc2): Linear(in_features=8192, out_features=2048, bias=True)
                )
              )
              (drop_path): DropPath(drop_prob=0.100)
            )
          )
        )
      )
    )
    (avgpool): AdaptiveAvgPool1d(output_size=1)
  )
  (image_proj_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
  (image_pos_embed): LearnedAbsolutePositionEmbedding2D(
    (row_embeddings): Embedding(50, 1024)
    (column_embeddings): Embedding(50, 1024)
  )
  (visual_temporal_embed): PositionalEmbeddingCosine1D()
  (language_model): Florence2LanguageForConditionalGeneration(
    (model): Florence2LanguageModel(
      (shared): Embedding(51289, 1024, padding_idx=1)
      (encoder): Florence2Encoder(
        (embed_tokens): Florence2ScaledWordEmbedding(51289, 1024, padding_idx=1)
        (embed_positions): Florence2LearnedPositionalEmbedding(1026, 1024)
        (layers): ModuleList(
          (0-11): 12 x Florence2EncoderLayer(
            (self_attn): Florence2SdpaAttention(
              (k_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (v_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (q_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
            )
            (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
            (activation_fn): GELUActivation()
            (fc1): Linear(in_features=1024, out_features=4096, bias=True)
            (fc2): Linear(in_features=4096, out_features=1024, bias=True)
            (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
          )
        )
        (layernorm_embedding): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
      )
      (decoder): Florence2Decoder(
        (embed_tokens): Florence2ScaledWordEmbedding(51289, 1024, padding_idx=1)
        (embed_positions): Florence2LearnedPositionalEmbedding(1026, 1024)
        (layers): ModuleList(
          (0-11): 12 x Florence2DecoderLayer(
            (self_attn): Florence2SdpaAttention(
              (k_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (v_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (q_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
            )
            (activation_fn): GELUActivation()
            (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
            (encoder_attn): Florence2SdpaAttention(
              (k_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (v_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (q_proj): Linear(in_features=1024, out_features=1024, bias=True)
              (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
            )
            (encoder_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
            (fc1): Linear(in_features=1024, out_features=4096, bias=True)
            (fc2): Linear(in_features=4096, out_features=1024, bias=True)
            (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
          )
        )
        (layernorm_embedding): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
      )
    )
    (lm_head): Linear(in_features=1024, out_features=51289, bias=False)
  )
)
from peft import LoraConfig, get_peft_model
lora_config = LoraConfig(
    r = 8,
    lora_alpha = 8,
    lora_dropout = 0.05,
    target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "linear", "Conv2d", "lm_head", "fc2"],
    task_type = "CAUSAL_LM"
)

peft_model = get_peft_model(model, lora_config)
peft_model.print_trainable_parameters()
print(peft_model.get_memory_footprint())
peft_model
trainable params: 4,133,576 || all params: 826,827,464 || trainable%: 0.4999
1662434258
PeftModelForCausalLM(
  (base_model): LoraModel(
    (model): Florence2ForConditionalGeneration(
      (vision_tower): DaViT(
        (convs): ModuleList(
          (0): ConvEmbed(
            (proj): Conv2d(3, 256, kernel_size=(7, 7), stride=(4, 4), padding=(3, 3))
            (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
          )
          (1): ConvEmbed(
            (proj): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
            (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
          )
          (2): ConvEmbed(
            (proj): Conv2d(512, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
            (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
          )
          (3): ConvEmbed(
            (proj): Conv2d(1024, 2048, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
            (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
          )
        )
        (blocks): ModuleList(
          (0): MySequential(
            (0): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=256, out_features=768, bias=True)
                    (proj): Linear(in_features=256, out_features=256, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): Identity()
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=256, out_features=1024, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=1024, out_features=256, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=1024, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=256, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): Identity()
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=256, out_features=768, bias=True)
                    (proj): Linear(in_features=256, out_features=256, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.004)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=256, out_features=1024, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=1024, out_features=256, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=1024, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=256, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.004)
                )
              )
            )
          )
          (1): MySequential(
            (0): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=512, out_features=1536, bias=True)
                    (proj): Linear(in_features=512, out_features=512, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.009)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=512, out_features=2048, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=2048, out_features=512, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=2048, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=512, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.009)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=512, out_features=1536, bias=True)
                    (proj): Linear(in_features=512, out_features=512, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.013)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=512)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((512,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=512, out_features=2048, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=2048, out_features=512, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=2048, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=512, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.013)
                )
              )
            )
          )
          (2): MySequential(
            (0): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.017)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.017)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.022)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.022)
                )
              )
            )
            (1): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.026)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.026)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.030)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.030)
                )
              )
            )
            (2): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.035)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.035)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.039)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.039)
                )
              )
            )
            (3): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.043)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.043)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.048)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.048)
                )
              )
            )
            (4): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.052)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.052)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.057)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.057)
                )
              )
            )
            (5): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.061)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.061)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.065)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.065)
                )
              )
            )
            (6): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.070)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.070)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.074)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.074)
                )
              )
            )
            (7): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.078)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.078)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.083)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.083)
                )
              )
            )
            (8): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.087)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.087)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=1024, out_features=3072, bias=True)
                    (proj): Linear(in_features=1024, out_features=1024, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.091)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(1024, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=1024)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=4096, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=1024, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.091)
                )
              )
            )
          )
          (3): MySequential(
            (0): MySequential(
              (spatial_block): SpatialBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
                  )
                )
                (window_attn): PreNorm(
                  (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
                  (fn): WindowAttention(
                    (qkv): Linear(in_features=2048, out_features=6144, bias=True)
                    (proj): Linear(in_features=2048, out_features=2048, bias=True)
                    (softmax): Softmax(dim=-1)
                  )
                  (drop_path): DropPath(drop_prob=0.096)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=2048, out_features=8192, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=8192, out_features=2048, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=8192, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=2048, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.096)
                )
              )
              (channel_block): ChannelBlock(
                (conv1): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
                  )
                )
                (channel_attn): PreNorm(
                  (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
                  (fn): ChannelAttention(
                    (qkv): Linear(in_features=2048, out_features=6144, bias=True)
                    (proj): Linear(in_features=2048, out_features=2048, bias=True)
                  )
                  (drop_path): DropPath(drop_prob=0.100)
                )
                (conv2): PreNorm(
                  (fn): DepthWiseConv2d(
                    (dw): Conv2d(2048, 2048, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=2048)
                  )
                )
                (ffn): PreNorm(
                  (norm): LayerNorm((2048,), eps=1e-05, elementwise_affine=True)
                  (fn): Mlp(
                    (net): Sequential(
                      (fc1): Linear(in_features=2048, out_features=8192, bias=True)
                      (act): GELU(approximate='none')
                      (fc2): lora.Linear(
                        (base_layer): Linear(in_features=8192, out_features=2048, bias=True)
                        (lora_dropout): ModuleDict(
                          (default): Dropout(p=0.05, inplace=False)
                        )
                        (lora_A): ModuleDict(
                          (default): Linear(in_features=8192, out_features=8, bias=False)
                        )
                        (lora_B): ModuleDict(
                          (default): Linear(in_features=8, out_features=2048, bias=False)
                        )
                        (lora_embedding_A): ParameterDict()
                        (lora_embedding_B): ParameterDict()
                        (lora_magnitude_vector): ModuleDict()
                      )
                    )
                  )
                  (drop_path): DropPath(drop_prob=0.100)
                )
              )
            )
          )
        )
        (avgpool): AdaptiveAvgPool1d(output_size=1)
      )
      (image_proj_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
      (image_pos_embed): LearnedAbsolutePositionEmbedding2D(
        (row_embeddings): Embedding(50, 1024)
        (column_embeddings): Embedding(50, 1024)
      )
      (visual_temporal_embed): PositionalEmbeddingCosine1D()
      (language_model): Florence2LanguageForConditionalGeneration(
        (model): Florence2LanguageModel(
          (shared): Embedding(51289, 1024, padding_idx=1)
          (encoder): Florence2Encoder(
            (embed_tokens): Florence2ScaledWordEmbedding(51289, 1024, padding_idx=1)
            (embed_positions): Florence2LearnedPositionalEmbedding(1026, 1024)
            (layers): ModuleList(
              (0-11): 12 x Florence2EncoderLayer(
                (self_attn): Florence2SdpaAttention(
                  (k_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (v_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (q_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
                )
                (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                (activation_fn): GELUActivation()
                (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                (fc2): lora.Linear(
                  (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.05, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=4096, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=1024, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              )
            )
            (layernorm_embedding): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
          )
          (decoder): Florence2Decoder(
            (embed_tokens): Florence2ScaledWordEmbedding(51289, 1024, padding_idx=1)
            (embed_positions): Florence2LearnedPositionalEmbedding(1026, 1024)
            (layers): ModuleList(
              (0-11): 12 x Florence2DecoderLayer(
                (self_attn): Florence2SdpaAttention(
                  (k_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (v_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (q_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
                )
                (activation_fn): GELUActivation()
                (self_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                (encoder_attn): Florence2SdpaAttention(
                  (k_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (v_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (q_proj): lora.Linear(
                    (base_layer): Linear(in_features=1024, out_features=1024, bias=True)
                    (lora_dropout): ModuleDict(
                      (default): Dropout(p=0.05, inplace=False)
                    )
                    (lora_A): ModuleDict(
                      (default): Linear(in_features=1024, out_features=8, bias=False)
                    )
                    (lora_B): ModuleDict(
                      (default): Linear(in_features=8, out_features=1024, bias=False)
                    )
                    (lora_embedding_A): ParameterDict()
                    (lora_embedding_B): ParameterDict()
                    (lora_magnitude_vector): ModuleDict()
                  )
                  (out_proj): Linear(in_features=1024, out_features=1024, bias=True)
                )
                (encoder_attn_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
                (fc1): Linear(in_features=1024, out_features=4096, bias=True)
                (fc2): lora.Linear(
                  (base_layer): Linear(in_features=4096, out_features=1024, bias=True)
                  (lora_dropout): ModuleDict(
                    (default): Dropout(p=0.05, inplace=False)
                  )
                  (lora_A): ModuleDict(
                    (default): Linear(in_features=4096, out_features=8, bias=False)
                  )
                  (lora_B): ModuleDict(
                    (default): Linear(in_features=8, out_features=1024, bias=False)
                  )
                  (lora_embedding_A): ParameterDict()
                  (lora_embedding_B): ParameterDict()
                  (lora_magnitude_vector): ModuleDict()
                )
                (final_layer_norm): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
              )
            )
            (layernorm_embedding): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
          )
        )
        (lm_head): lora.Linear(
          (base_layer): Linear(in_features=1024, out_features=51289, bias=False)
          (lora_dropout): ModuleDict(
            (default): Dropout(p=0.05, inplace=False)
          )
          (lora_A): ModuleDict(
            (default): Linear(in_features=1024, out_features=8, bias=False)
          )
          (lora_B): ModuleDict(
            (default): Linear(in_features=8, out_features=51289, bias=False)
          )
          (lora_embedding_A): ParameterDict()
          (lora_embedding_B): ParameterDict()
          (lora_magnitude_vector): ModuleDict()
        )
      )
    )
  )
)