#include "llama-arch.h"

#include "llama-impl.h"

#include <map>
#include <set>
#include <vector>

static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
    { LLM_ARCH_CLIP,             "clip"             }, // dummy, only used by llama-quantize
    { LLM_ARCH_LLAMA,            "llama"            },
    { LLM_ARCH_LLAMA4,           "llama4"           },
    { LLM_ARCH_DECI,             "deci"             },
    { LLM_ARCH_FALCON,           "falcon"           },
    { LLM_ARCH_GROK,             "grok"             },
    { LLM_ARCH_GPT2,             "gpt2"             },
    { LLM_ARCH_GPTJ,             "gptj"             },
    { LLM_ARCH_GPTNEOX,          "gptneox"          },
    { LLM_ARCH_MPT,              "mpt"              },
    { LLM_ARCH_BAICHUAN,         "baichuan"         },
    { LLM_ARCH_STARCODER,        "starcoder"        },
    { LLM_ARCH_REFACT,           "refact"           },
    { LLM_ARCH_BERT,             "bert"             },
    { LLM_ARCH_MODERN_BERT,      "modern-bert"      },
    { LLM_ARCH_NOMIC_BERT,       "nomic-bert"       },
    { LLM_ARCH_NOMIC_BERT_MOE,   "nomic-bert-moe"   },
    { LLM_ARCH_NEO_BERT,         "neo-bert"         },
    { LLM_ARCH_JINA_BERT_V2,     "jina-bert-v2"     },
    { LLM_ARCH_JINA_BERT_V3,     "jina-bert-v3"     },
    { LLM_ARCH_EUROBERT,         "eurobert"         },
    { LLM_ARCH_BLOOM,            "bloom"            },
    { LLM_ARCH_STABLELM,         "stablelm"         },
    { LLM_ARCH_QWEN,             "qwen"             },
    { LLM_ARCH_QWEN2,            "qwen2"            },
    { LLM_ARCH_QWEN2MOE,         "qwen2moe"         },
    { LLM_ARCH_QWEN2VL,          "qwen2vl"          },
    { LLM_ARCH_QWEN3,            "qwen3"            },
    { LLM_ARCH_QWEN3MOE,         "qwen3moe"         },
    { LLM_ARCH_QWEN3NEXT,        "qwen3next"        },
    { LLM_ARCH_QWEN3VL,          "qwen3vl"          },
    { LLM_ARCH_QWEN3VLMOE,       "qwen3vlmoe"       },
    { LLM_ARCH_QWEN35,           "qwen35"           },
    { LLM_ARCH_QWEN35MOE,        "qwen35moe"        },
    { LLM_ARCH_PHI2,             "phi2"             },
    { LLM_ARCH_PHI3,             "phi3"             },
    { LLM_ARCH_PHIMOE,           "phimoe"           },
    { LLM_ARCH_PLAMO,            "plamo"            },
    { LLM_ARCH_PLAMO2,           "plamo2"           },
    { LLM_ARCH_PLAMO3,           "plamo3"           },
    { LLM_ARCH_CODESHELL,        "codeshell"        },
    { LLM_ARCH_ORION,            "orion"            },
    { LLM_ARCH_INTERNLM2,        "internlm2"        },
    { LLM_ARCH_MINICPM,          "minicpm"          },
    { LLM_ARCH_MINICPM3,         "minicpm3"         },
    { LLM_ARCH_GEMMA,            "gemma"            },
    { LLM_ARCH_GEMMA2,           "gemma2"           },
    { LLM_ARCH_GEMMA3,           "gemma3"           },
    { LLM_ARCH_GEMMA3N,          "gemma3n"          },
    { LLM_ARCH_GEMMA4,           "gemma4"           },
    { LLM_ARCH_GEMMA_EMBEDDING,  "gemma-embedding"  },
    { LLM_ARCH_STARCODER2,       "starcoder2"       },
    { LLM_ARCH_MAMBA,            "mamba"            },
    { LLM_ARCH_MAMBA2,           "mamba2"           },
    { LLM_ARCH_JAMBA,            "jamba"            },
    { LLM_ARCH_FALCON_H1,        "falcon-h1"        },
    { LLM_ARCH_XVERSE,           "xverse"           },
    { LLM_ARCH_COMMAND_R,        "command-r"        },
    { LLM_ARCH_COHERE2,          "cohere2"          },
    { LLM_ARCH_DBRX,             "dbrx"             },
    { LLM_ARCH_OLMO,             "olmo"             },
    { LLM_ARCH_OLMO2,            "olmo2"            },
    { LLM_ARCH_OLMOE,            "olmoe"            },
    { LLM_ARCH_OPENELM,          "openelm"          },
    { LLM_ARCH_ARCTIC,           "arctic"           },
    { LLM_ARCH_DEEPSEEK,         "deepseek"         },
    { LLM_ARCH_DEEPSEEK2,        "deepseek2"        },
    { LLM_ARCH_DEEPSEEK2OCR,     "deepseek2-ocr"    },
    { LLM_ARCH_CHATGLM,          "chatglm"          },
    { LLM_ARCH_GLM4,             "glm4"             },
    { LLM_ARCH_GLM4_MOE,         "glm4moe"          },
    { LLM_ARCH_GLM_DSA,          "glm-dsa"          },
    { LLM_ARCH_BITNET,           "bitnet"           },
    { LLM_ARCH_T5,               "t5"               },
    { LLM_ARCH_T5ENCODER,        "t5encoder"        },
    { LLM_ARCH_JAIS,             "jais"             },
    { LLM_ARCH_JAIS2,            "jais2"            },
    { LLM_ARCH_NEMOTRON,         "nemotron"         },
    { LLM_ARCH_NEMOTRON_H,       "nemotron_h"       },
    { LLM_ARCH_NEMOTRON_H_MOE,   "nemotron_h_moe"   },
    { LLM_ARCH_EXAONE,           "exaone"           },
    { LLM_ARCH_EXAONE4,          "exaone4"          },
    { LLM_ARCH_EXAONE_MOE,       "exaone-moe"       },
    { LLM_ARCH_RWKV6,            "rwkv6"            },
    { LLM_ARCH_RWKV6QWEN2,       "rwkv6qwen2"       },
    { LLM_ARCH_RWKV7,            "rwkv7"            },
    { LLM_ARCH_ARWKV7,           "arwkv7"           },
    { LLM_ARCH_GRANITE,          "granite"          },
    { LLM_ARCH_GRANITE_MOE,      "granitemoe"       },
    { LLM_ARCH_GRANITE_HYBRID,   "granitehybrid"    },
    { LLM_ARCH_CHAMELEON,        "chameleon"        },
    { LLM_ARCH_WAVTOKENIZER_DEC, "wavtokenizer-dec" },
    { LLM_ARCH_PLM,              "plm"              },
    { LLM_ARCH_BAILINGMOE,       "bailingmoe"       },
    { LLM_ARCH_BAILINGMOE2,      "bailingmoe2"      },
    { LLM_ARCH_DOTS1,            "dots1"            },
    { LLM_ARCH_ARCEE,            "arcee"            },
    { LLM_ARCH_AFMOE,            "afmoe"            },
    { LLM_ARCH_ERNIE4_5,         "ernie4_5"         },
    { LLM_ARCH_ERNIE4_5_MOE,     "ernie4_5-moe"     },
    { LLM_ARCH_HUNYUAN_MOE,      "hunyuan-moe"      },
    { LLM_ARCH_HUNYUAN_DENSE,    "hunyuan-dense"    },
    { LLM_ARCH_SMOLLM3,          "smollm3"          },
    { LLM_ARCH_OPENAI_MOE,       "gpt-oss"          },
    { LLM_ARCH_LFM2,             "lfm2"             },
    { LLM_ARCH_LFM2MOE,          "lfm2moe"          },
    { LLM_ARCH_DREAM,            "dream"            },
    { LLM_ARCH_SMALLTHINKER,     "smallthinker"     },
    { LLM_ARCH_LLADA,            "llada"            },
    { LLM_ARCH_LLADA_MOE,        "llada-moe"        },
    { LLM_ARCH_SEED_OSS,         "seed_oss"         },
    { LLM_ARCH_GROVEMOE,         "grovemoe"         },
    { LLM_ARCH_APERTUS,          "apertus"          },
    { LLM_ARCH_MINIMAX_M2,       "minimax-m2"       },
    { LLM_ARCH_COGVLM,           "cogvlm"           },
    { LLM_ARCH_RND1,             "rnd1"             },
    { LLM_ARCH_PANGU_EMBED,      "pangu-embedded"   },
    { LLM_ARCH_MISTRAL3,         "mistral3"         },
    { LLM_ARCH_MISTRAL4,         "mistral4"         },
    { LLM_ARCH_PADDLEOCR,        "paddleocr"        },
    { LLM_ARCH_MIMO2,            "mimo2"            },
    { LLM_ARCH_STEP35,           "step35"           },
    { LLM_ARCH_LLAMA_EMBED,      "llama-embed"      },
    { LLM_ARCH_MAINCODER,        "maincoder"        },
    { LLM_ARCH_KIMI_LINEAR,      "kimi-linear"      },
    { LLM_ARCH_UNKNOWN,          "(unknown)"        },
};

static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
    { LLM_KV_GENERAL_TYPE,                     "general.type"                          },
    { LLM_KV_GENERAL_ARCHITECTURE,             "general.architecture"                  },
    { LLM_KV_GENERAL_QUANTIZATION_VERSION,     "general.quantization_version"          },
    { LLM_KV_GENERAL_ALIGNMENT,                "general.alignment"                     },
    { LLM_KV_GENERAL_FILE_TYPE,                "general.file_type"                     },
    { LLM_KV_GENERAL_SAMPLING_SEQUENCE,        "general.sampling.sequence"             },
    { LLM_KV_GENERAL_SAMPLING_TOP_K,           "general.sampling.top_k"                },
    { LLM_KV_GENERAL_SAMPLING_TOP_P,           "general.sampling.top_p"                },
    { LLM_KV_GENERAL_SAMPLING_MIN_P,           "general.sampling.min_p"                },
    { LLM_KV_GENERAL_SAMPLING_XTC_PROBABILITY, "general.sampling.xtc_probability"      },
    { LLM_KV_GENERAL_SAMPLING_XTC_THRESHOLD,   "general.sampling.xtc_threshold"        },
    { LLM_KV_GENERAL_SAMPLING_TEMP,            "general.sampling.temp"                 },
    { LLM_KV_GENERAL_SAMPLING_PENALTY_LAST_N,  "general.sampling.penalty_last_n"       },
    { LLM_KV_GENERAL_SAMPLING_PENALTY_REPEAT,  "general.sampling.penalty_repeat"       },
    { LLM_KV_GENERAL_SAMPLING_MIROSTAT,        "general.sampling.mirostat"             },
    { LLM_KV_GENERAL_SAMPLING_MIROSTAT_TAU,    "general.sampling.mirostat_tau"         },
    { LLM_KV_GENERAL_SAMPLING_MIROSTAT_ETA,    "general.sampling.mirostat_eta"         },
    { LLM_KV_GENERAL_NAME,                     "general.name"                          },
    { LLM_KV_GENERAL_AUTHOR,                   "general.author"                        },
    { LLM_KV_GENERAL_VERSION,                  "general.version"                       },
    { LLM_KV_GENERAL_URL,                      "general.url"                           },
    { LLM_KV_GENERAL_DESCRIPTION,              "general.description"                   },
    { LLM_KV_GENERAL_LICENSE,                  "general.license"                       },
    { LLM_KV_GENERAL_SOURCE_URL,               "general.source.url"                    },
    { LLM_KV_GENERAL_SOURCE_HF_REPO,           "general.source.huggingface.repository" },

    { LLM_KV_VOCAB_SIZE,                        "%s.vocab_size"                        },
    { LLM_KV_CONTEXT_LENGTH,                    "%s.context_length"                    },
    { LLM_KV_EMBEDDING_LENGTH,                  "%s.embedding_length"                  },
    { LLM_KV_EMBEDDING_LENGTH_OUT,              "%s.embedding_length_out"              },
    { LLM_KV_EMBEDDING_LENGTH_PER_LAYER,        "%s.embedding_length_per_layer_input"  },
    { LLM_KV_FEATURES_LENGTH,                   "%s.features_length"                   },
    { LLM_KV_BLOCK_COUNT,                       "%s.block_count"                       },
    { LLM_KV_LEADING_DENSE_BLOCK_COUNT,         "%s.leading_dense_block_count"         },
    { LLM_KV_FEED_FORWARD_LENGTH,               "%s.feed_forward_length"               },
    { LLM_KV_EXPERT_FEED_FORWARD_LENGTH,        "%s.expert_feed_forward_length"        },
    { LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, "%s.expert_shared_feed_forward_length" },
    { LLM_KV_EXPERT_CHUNK_FEED_FORWARD_LENGTH,  "%s.expert_chunk_feed_forward_length"  },
    { LLM_KV_SWIGLU_CLAMP_EXP,                  "%s.swiglu_clamp_exp"                  },
    { LLM_KV_SWIGLU_CLAMP_SHEXP,                "%s.swiglu_clamp_shexp"                },
    { LLM_KV_USE_PARALLEL_RESIDUAL,             "%s.use_parallel_residual"             },
    { LLM_KV_TENSOR_DATA_LAYOUT,                "%s.tensor_data_layout"                },
    { LLM_KV_EXPERT_COUNT,                      "%s.expert_count"                      },
    { LLM_KV_EXPERT_USED_COUNT,                 "%s.expert_used_count"                 },
    { LLM_KV_EXPERT_SHARED_COUNT,               "%s.expert_shared_count"               },
    { LLM_KV_EXPERT_GROUP_COUNT,                "%s.expert_group_count"                },
    { LLM_KV_EXPERT_GROUP_USED_COUNT,           "%s.expert_group_used_count"           },
    { LLM_KV_EXPERT_WEIGHTS_SCALE,              "%s.expert_weights_scale"              },
    { LLM_KV_EXPERT_WEIGHTS_NORM,               "%s.expert_weights_norm"               },
    { LLM_KV_EXPERT_GATING_FUNC,                "%s.expert_gating_func"                },
    { LLM_KV_EXPERT_GROUP_SCALE,                "%s.expert_group_scale"                },
    { LLM_KV_EXPERTS_PER_GROUP,                 "%s.experts_per_group"                 },
    { LLM_KV_MOE_EVERY_N_LAYERS,                "%s.moe_every_n_layers"                },
    { LLM_KV_MOE_LATENT_SIZE,                   "%s.moe_latent_size"                   },
    { LLM_KV_NEXTN_PREDICT_LAYERS,              "%s.nextn_predict_layers"              },
    { LLM_KV_NUM_DEEPSTACK_LAYERS,              "%s.n_deepstack_layers"                },
    { LLM_KV_POOLING_TYPE,                      "%s.pooling_type"                      },
    { LLM_KV_LOGIT_SCALE,                       "%s.logit_scale"                       },
    { LLM_KV_DECODER_START_TOKEN_ID,            "%s.decoder_start_token_id"            },
    { LLM_KV_DECODER_BLOCK_COUNT,               "%s.decoder_block_count"               },
    { LLM_KV_ATTN_LOGIT_SOFTCAPPING,            "%s.attn_logit_softcapping"            },
    { LLM_KV_ROUTER_LOGIT_SOFTCAPPING,          "%s.router_logit_softcapping"          },
    { LLM_KV_FINAL_LOGIT_SOFTCAPPING,           "%s.final_logit_softcapping"           },
    { LLM_KV_SWIN_NORM,                         "%s.swin_norm"                         },
    { LLM_KV_RESCALE_EVERY_N_LAYERS,            "%s.rescale_every_n_layers"            },
    { LLM_KV_TIME_MIX_EXTRA_DIM,                "%s.time_mix_extra_dim"                },
    { LLM_KV_TIME_DECAY_EXTRA_DIM,              "%s.time_decay_extra_dim"              },
    { LLM_KV_RESIDUAL_SCALE,                    "%s.residual_scale"                    },
    { LLM_KV_EMBEDDING_SCALE,                   "%s.embedding_scale"                   },
    { LLM_KV_TOKEN_SHIFT_COUNT,                 "%s.token_shift_count"                 },
    { LLM_KV_INTERLEAVE_MOE_LAYER_STEP,         "%s.interleave_moe_layer_step"         },
    { LLM_KV_FULL_ATTENTION_INTERVAL,           "%s.full_attention_interval"           },

    { LLM_KV_ATTENTION_HEAD_COUNT,                   "%s.attention.head_count"                   },
    { LLM_KV_ATTENTION_HEAD_COUNT_KV,                "%s.attention.head_count_kv"                },
    { LLM_KV_ATTENTION_MAX_ALIBI_BIAS,               "%s.attention.max_alibi_bias"               },
    { LLM_KV_ATTENTION_CLAMP_KQV,                    "%s.attention.clamp_kqv"                    },
    { LLM_KV_ATTENTION_KEY_LENGTH,                   "%s.attention.key_length"                   },
    { LLM_KV_ATTENTION_VALUE_LENGTH,                 "%s.attention.value_length"                 },
    { LLM_KV_ATTENTION_LAYERNORM_EPS,                "%s.attention.layer_norm_epsilon"           },
    { LLM_KV_ATTENTION_LAYERNORM_RMS_EPS,            "%s.attention.layer_norm_rms_epsilon"       },
    { LLM_KV_ATTENTION_GROUPNORM_EPS,                "%s.attention.group_norm_epsilon"           },
    { LLM_KV_ATTENTION_GROUPNORM_GROUPS,             "%s.attention.group_norm_groups"            },
    { LLM_KV_ATTENTION_CAUSAL,                       "%s.attention.causal"                       },
    { LLM_KV_ATTENTION_Q_LORA_RANK,                  "%s.attention.q_lora_rank"                  },
    { LLM_KV_ATTENTION_KV_LORA_RANK,                 "%s.attention.kv_lora_rank"                 },
    { LLM_KV_ATTENTION_DECAY_LORA_RANK,              "%s.attention.decay_lora_rank"              },
    { LLM_KV_ATTENTION_ICLR_LORA_RANK,               "%s.attention.iclr_lora_rank"               },
    { LLM_KV_ATTENTION_VALUE_RESIDUAL_MIX_LORA_RANK, "%s.attention.value_residual_mix_lora_rank" },
    { LLM_KV_ATTENTION_GATE_LORA_RANK,               "%s.attention.gate_lora_rank"               },
    { LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT,       "%s.attention.relative_buckets_count"       },
    { LLM_KV_ATTENTION_SLIDING_WINDOW,               "%s.attention.sliding_window"               },
    { LLM_KV_ATTENTION_SLIDING_WINDOW_PATTERN,       "%s.attention.sliding_window_pattern"       },
    { LLM_KV_ATTENTION_SCALE,                        "%s.attention.scale"                        },
    { LLM_KV_ATTENTION_OUTPUT_SCALE,                 "%s.attention.output_scale"                 },
    { LLM_KV_ATTENTION_TEMPERATURE_LENGTH,           "%s.attention.temperature_length"           },
    { LLM_KV_ATTENTION_TEMPERATURE_SCALE,            "%s.attention.temperature_scale"            },
    { LLM_KV_ATTENTION_KEY_LENGTH_MLA,               "%s.attention.key_length_mla"               },
    { LLM_KV_ATTENTION_VALUE_LENGTH_MLA,             "%s.attention.value_length_mla"             },
    { LLM_KV_ATTENTION_KEY_LENGTH_SWA,               "%s.attention.key_length_swa"               },
    { LLM_KV_ATTENTION_VALUE_LENGTH_SWA,             "%s.attention.value_length_swa"             },
    { LLM_KV_ATTENTION_INDEXER_HEAD_COUNT,           "%s.attention.indexer.head_count"           },
    { LLM_KV_ATTENTION_INDEXER_KEY_LENGTH,           "%s.attention.indexer.key_length"           },
    { LLM_KV_ATTENTION_INDEXER_TOP_K,                "%s.attention.indexer.top_k"                },
    { LLM_KV_ATTENTION_SHARED_KV_LAYERS,             "%s.attention.shared_kv_layers"             },

    { LLM_KV_ROPE_DIMENSION_COUNT,           "%s.rope.dimension_count"                 },
    { LLM_KV_ROPE_DIMENSION_COUNT_SWA,       "%s.rope.dimension_count_swa"             },
    { LLM_KV_ROPE_DIMENSION_SECTIONS,        "%s.rope.dimension_sections"              },
    { LLM_KV_ROPE_FREQ_BASE,                 "%s.rope.freq_base"                       },
    { LLM_KV_ROPE_FREQ_BASE_SWA,             "%s.rope.freq_base_swa"                   },
    { LLM_KV_ROPE_SCALE_LINEAR,              "%s.rope.scale_linear"                    },
    { LLM_KV_ROPE_SCALING_TYPE,              "%s.rope.scaling.type"                    },
    { LLM_KV_ROPE_SCALING_FACTOR,            "%s.rope.scaling.factor"                  },
    { LLM_KV_ROPE_SCALING_ATTN_FACTOR,       "%s.rope.scaling.attn_factor"             },
    { LLM_KV_ROPE_SCALING_ORIG_CTX_LEN,      "%s.rope.scaling.original_context_length" },
    { LLM_KV_ROPE_SCALING_FINETUNED,         "%s.rope.scaling.finetuned"               },
    { LLM_KV_ROPE_SCALING_YARN_LOG_MUL,      "%s.rope.scaling.yarn_log_multiplier"     },
    { LLM_KV_ROPE_SCALING_YARN_EXT_FACTOR,   "%s.rope.scaling.yarn_ext_factor"         },
    { LLM_KV_ROPE_SCALING_YARN_ATTN_FACTOR,  "%s.rope.scaling.yarn_attn_factor"        },
    { LLM_KV_ROPE_SCALING_YARN_BETA_FAST,    "%s.rope.scaling.yarn_beta_fast"          },
    { LLM_KV_ROPE_SCALING_YARN_BETA_SLOW,    "%s.rope.scaling.yarn_beta_slow"          },

    { LLM_KV_SPLIT_NO,            "split.no"            },
    { LLM_KV_SPLIT_COUNT,         "split.count"         },
    { LLM_KV_SPLIT_TENSORS_COUNT, "split.tensors.count" },

    { LLM_KV_SSM_CONV_KERNEL,    "%s.ssm.conv_kernel"    },
    { LLM_KV_SSM_INNER_SIZE,     "%s.ssm.inner_size"     },
    { LLM_KV_SSM_STATE_SIZE,     "%s.ssm.state_size"     },
    { LLM_KV_SSM_TIME_STEP_RANK, "%s.ssm.time_step_rank" },
    { LLM_KV_SSM_GROUP_COUNT,    "%s.ssm.group_count"    },
    { LLM_KV_SSM_DT_B_C_RMS,     "%s.ssm.dt_b_c_rms"     },

    { LLM_KV_KDA_HEAD_DIM, "%s.kda.head_dim" },

    { LLM_KV_WKV_HEAD_SIZE, "%s.wkv.head_size" },

    { LLM_KV_POSNET_EMBEDDING_LENGTH, "%s.posnet.embedding_length" },
    { LLM_KV_POSNET_BLOCK_COUNT,      "%s.posnet.block_count"      },

    { LLM_KV_CONVNEXT_EMBEDDING_LENGTH, "%s.convnext.embedding_length" },
    { LLM_KV_CONVNEXT_BLOCK_COUNT,      "%s.convnext.block_count"      },

    { LLM_KV_CLASSIFIER_OUTPUT_LABELS, "%s.classifier.output_labels" },

    { LLM_KV_SHORTCONV_L_CACHE, "%s.shortconv.l_cache" },
    // sentence-transformers dense modules feature dims
    { LLM_KV_DENSE_2_FEAT_IN,        "%s.dense_2_feat_in"  },
    { LLM_KV_DENSE_2_FEAT_OUT,       "%s.dense_2_feat_out"  },
    { LLM_KV_DENSE_3_FEAT_IN,        "%s.dense_3_feat_in"   },
    { LLM_KV_DENSE_3_FEAT_OUT,       "%s.dense_3_feat_out"  },

    { LLM_KV_TOKENIZER_MODEL,                "tokenizer.ggml.model"                    },
    { LLM_KV_TOKENIZER_PRE,                  "tokenizer.ggml.pre"                      },
    { LLM_KV_TOKENIZER_LIST,                 "tokenizer.ggml.tokens"                   },
    { LLM_KV_TOKENIZER_TOKEN_TYPE,           "tokenizer.ggml.token_type"               },
    { LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT,     "tokenizer.ggml.token_type_count"         },
    { LLM_KV_TOKENIZER_SCORES,               "tokenizer.ggml.scores"                   },
    { LLM_KV_TOKENIZER_MERGES,               "tokenizer.ggml.merges"                   },
    { LLM_KV_TOKENIZER_BOS_ID,               "tokenizer.ggml.bos_token_id"             },
    { LLM_KV_TOKENIZER_EOS_ID,               "tokenizer.ggml.eos_token_id"             },
    { LLM_KV_TOKENIZER_EOT_ID,               "tokenizer.ggml.eot_token_id"             },
    { LLM_KV_TOKENIZER_EOM_ID,               "tokenizer.ggml.eom_token_id"             },
    { LLM_KV_TOKENIZER_UNK_ID,               "tokenizer.ggml.unknown_token_id"         },
    { LLM_KV_TOKENIZER_SEP_ID,               "tokenizer.ggml.seperator_token_id"       },
    { LLM_KV_TOKENIZER_PAD_ID,               "tokenizer.ggml.padding_token_id"         },
    { LLM_KV_TOKENIZER_CLS_ID,               "tokenizer.ggml.cls_token_id"             },
    { LLM_KV_TOKENIZER_MASK_ID,              "tokenizer.ggml.mask_token_id"            },
    { LLM_KV_TOKENIZER_ADD_BOS,              "tokenizer.ggml.add_bos_token"            },
    { LLM_KV_TOKENIZER_ADD_EOS,              "tokenizer.ggml.add_eos_token"            },
    { LLM_KV_TOKENIZER_ADD_SEP,              "tokenizer.ggml.add_sep_token"            },
    { LLM_KV_TOKENIZER_ADD_PREFIX,           "tokenizer.ggml.add_space_prefix"         },
    { LLM_KV_TOKENIZER_REMOVE_EXTRA_WS,      "tokenizer.ggml.remove_extra_whitespaces" },
    { LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, "tokenizer.ggml.precompiled_charsmap"     },
    { LLM_KV_TOKENIZER_HF_JSON,              "tokenizer.huggingface.json"              },
    { LLM_KV_TOKENIZER_RWKV,                 "tokenizer.rwkv.world"                    },
    { LLM_KV_TOKENIZER_CHAT_TEMPLATE,        "tokenizer.chat_template"                 },
    { LLM_KV_TOKENIZER_FIM_PRE_ID,           "tokenizer.ggml.fim_pre_token_id"         },
    { LLM_KV_TOKENIZER_FIM_SUF_ID,           "tokenizer.ggml.fim_suf_token_id"         },
    { LLM_KV_TOKENIZER_FIM_MID_ID,           "tokenizer.ggml.fim_mid_token_id"         },
    { LLM_KV_TOKENIZER_FIM_PAD_ID,           "tokenizer.ggml.fim_pad_token_id"         },
    { LLM_KV_TOKENIZER_FIM_REP_ID,           "tokenizer.ggml.fim_rep_token_id"         },
    { LLM_KV_TOKENIZER_FIM_SEP_ID,           "tokenizer.ggml.fim_sep_token_id"         },

    { LLM_KV_ADAPTER_TYPE,                    "adapter.type"               },
    { LLM_KV_ADAPTER_LORA_ALPHA,              "adapter.lora.alpha"         },
    { LLM_KV_ADAPTER_LORA_TASK_NAME,          "adapter.lora.task_name"     },
    { LLM_KV_ADAPTER_LORA_PROMPT_PREFIX,      "adapter.lora.prompt_prefix" },
    { LLM_KV_ADAPTER_ALORA_INVOCATION_TOKENS, "adapter.alora.invocation_tokens" },

    { LLM_KV_XIELU_ALPHA_N,         "xielu.alpha_n"         },
    { LLM_KV_XIELU_ALPHA_P,         "xielu.alpha_p"         },
    { LLM_KV_XIELU_BETA,            "xielu.beta"            },
    { LLM_KV_XIELU_EPS,             "xielu.eps"             },

    // deprecated
    { LLM_KV_TOKENIZER_PREFIX_ID, "tokenizer.ggml.prefix_token_id" },
    { LLM_KV_TOKENIZER_SUFFIX_ID, "tokenizer.ggml.suffix_token_id" },
    { LLM_KV_TOKENIZER_MIDDLE_ID, "tokenizer.ggml.middle_token_id" },
};

static const std::map<llm_tensor, const char *> LLM_TENSOR_NAMES = {
    { LLM_TENSOR_TOKEN_EMBD,                             "token_embd" },
    { LLM_TENSOR_OUTPUT_NORM,                            "output_norm" },
    { LLM_TENSOR_OUTPUT_NORM_LFM2,                       "token_embd_norm" }, // fix for wrong tensor name
    { LLM_TENSOR_OUTPUT,                                 "output" },
    { LLM_TENSOR_ROPE_FREQS,                             "rope_freqs" },
    { LLM_TENSOR_ATTN_NORM,                              "blk.%d.attn_norm" },
    { LLM_TENSOR_ATTN_Q,                                 "blk.%d.attn_q" },
    { LLM_TENSOR_ATTN_K,                                 "blk.%d.attn_k" },
    { LLM_TENSOR_ATTN_V,                                 "blk.%d.attn_v" },
    { LLM_TENSOR_ATTN_OUT,                               "blk.%d.attn_output" },
    { LLM_TENSOR_ATTN_ROT_EMBD,                          "blk.%d.attn_rot_embd" },
    { LLM_TENSOR_FFN_GATE_INP,                           "blk.%d.ffn_gate_inp" },
    { LLM_TENSOR_FFN_NORM,                               "blk.%d.ffn_norm" },
    { LLM_TENSOR_FFN_GATE,                               "blk.%d.ffn_gate" },
    { LLM_TENSOR_FFN_DOWN,                               "blk.%d.ffn_down" },
    { LLM_TENSOR_FFN_UP,                                 "blk.%d.ffn_up" },
    { LLM_TENSOR_FFN_GATE_EXP,                           "blk.%d.ffn_gate.%d" },
    { LLM_TENSOR_FFN_DOWN_EXP,                           "blk.%d.ffn_down.%d" },
    { LLM_TENSOR_FFN_UP_EXP,                             "blk.%d.ffn_up.%d" },
    { LLM_TENSOR_FFN_GATE_EXPS,                          "blk.%d.ffn_gate_exps" },
    { LLM_TENSOR_FFN_GATE_UP_EXPS,                       "blk.%d.ffn_gate_up_exps" },
    { LLM_TENSOR_FFN_DOWN_EXPS,                          "blk.%d.ffn_down_exps" },
    { LLM_TENSOR_FFN_UP_EXPS,                            "blk.%d.ffn_up_exps" },
    { LLM_TENSOR_ATTN_POST_NORM,                         "blk.%d.post_attention_norm" },
    { LLM_TENSOR_ATTN_Q_NORM,                            "blk.%d.attn_q_norm" },
    { LLM_TENSOR_ATTN_K_NORM,                            "blk.%d.attn_k_norm" },
    { LLM_TENSOR_ATTN_GATE,                              "blk.%d.attn_gate" },
    { LLM_TENSOR_FFN_POST_NORM,                          "blk.%d.post_ffw_norm" },
    { LLM_TENSOR_FFN_POST_NORM_1,                        "blk.%d.post_ffw_norm_1" },
    { LLM_TENSOR_FFN_POST_NORM_2,                        "blk.%d.post_ffw_norm_2" },
    { LLM_TENSOR_FFN_PRE_NORM_2,                         "blk.%d.pre_ffw_norm_2" },
    { LLM_TENSOR_FFN_GATE_SHEXP,                         "blk.%d.ffn_gate_shexp" },
    { LLM_TENSOR_FFN_UP_SHEXP,                           "blk.%d.ffn_up_shexp" },
    { LLM_TENSOR_FFN_DOWN_SHEXP,                         "blk.%d.ffn_down_shexp" },
    { LLM_TENSOR_FFN_EXP_PROBS_B,                        "blk.%d.exp_probs_b" },
    { LLM_TENSOR_FFN_LATENT_DOWN,                        "blk.%d.ffn_latent_down" },
    { LLM_TENSOR_FFN_LATENT_UP,                          "blk.%d.ffn_latent_up" },
    { LLM_TENSOR_ATTN_NORM_2,                            "blk.%d.attn_norm_2" },
    { LLM_TENSOR_ATTN_QKV,                               "blk.%d.attn_qkv" },
    { LLM_TENSOR_LAYER_OUT_NORM,                         "blk.%d.layer_output_norm" },
    { LLM_TENSOR_LAYER_OUT_SCALE,                        "blk.%d.layer_output_scale" },
    { LLM_TENSOR_ATTN_OUT_NORM,                          "blk.%d.attn_output_norm" },
    { LLM_TENSOR_POS_EMBD,                               "position_embd" },
    { LLM_TENSOR_FFN_ACT,                                "blk.%d.ffn.act" },
    { LLM_TENSOR_TOKEN_EMBD_NORM,                        "token_embd_norm" },
    { LLM_TENSOR_TOKEN_TYPES,                            "token_types" },
    { LLM_TENSOR_CLS,                                    "cls" },
    { LLM_TENSOR_CLS_OUT,                                "cls.output" },
    { LLM_TENSOR_CLS_NORM,                               "cls.norm" },
    { LLM_TENSOR_ENC_OUTPUT_NORM,                        "enc.output_norm" },
    { LLM_TENSOR_FFN_GATE_INP_SHEXP,                     "blk.%d.ffn_gate_inp_shexp" },
    { LLM_TENSOR_SSM_A_NOSCAN,                           "blk.%d.ssm_a" },
    { LLM_TENSOR_SSM_CONV1D,                             "blk.%d.ssm_conv1d" },
    { LLM_TENSOR_SSM_DT,                                 "blk.%d.ssm_dt" },
    { LLM_TENSOR_SSM_BETA_ALPHA,                         "blk.%d.ssm_ba" },
    { LLM_TENSOR_SSM_ALPHA,                              "blk.%d.ssm_alpha" },
    { LLM_TENSOR_SSM_IN,                                 "blk.%d.ssm_in" },
    { LLM_TENSOR_SSM_NORM,                               "blk.%d.ssm_norm" },
    { LLM_TENSOR_SSM_OUT,                                "blk.%d.ssm_out" },
    { LLM_TENSOR_ROPE_FACTORS_LONG,                      "rope_factors_long" },
    { LLM_TENSOR_ROPE_FACTORS_SHORT,                     "rope_factors_short" },
    { LLM_TENSOR_SSM_X,                                  "blk.%d.ssm_x" },
    { LLM_TENSOR_SSM_A,                                  "blk.%d.ssm_a" },
    { LLM_TENSOR_SSM_D,                                  "blk.%d.ssm_d" },
    { LLM_TENSOR_SSM_DT_NORM,                            "blk.%d.ssm_dt_norm" },
    { LLM_TENSOR_SSM_B_NORM,                             "blk.%d.ssm_b_norm" },
    { LLM_TENSOR_SSM_C_NORM,                             "blk.%d.ssm_c_norm" },
    { LLM_TENSOR_SSM_CONV1D_Q,                           "blk.%d.ssm_conv1d_q" },
    { LLM_TENSOR_SSM_CONV1D_K,                           "blk.%d.ssm_conv1d_k" },
    { LLM_TENSOR_SSM_CONV1D_V,                           "blk.%d.ssm_conv1d_v" },
    { LLM_TENSOR_SSM_F_A,                                "blk.%d.ssm_f_a" },
    { LLM_TENSOR_SSM_F_B,                                "blk.%d.ssm_f_b" },
    { LLM_TENSOR_SSM_BETA,                               "blk.%d.ssm_beta" },
    { LLM_TENSOR_SSM_G_A,                                "blk.%d.ssm_g_a" },
    { LLM_TENSOR_SSM_G_B,                                "blk.%d.ssm_g_b" },
    { LLM_TENSOR_SSM_NORM,                               "blk.%d.ssm_norm" },
    { LLM_TENSOR_ATTN_Q_A_NORM,                          "blk.%d.attn_q_a_norm" },
    { LLM_TENSOR_ATTN_KV_A_NORM,                         "blk.%d.attn_kv_a_norm" },
    { LLM_TENSOR_ATTN_Q_A,                               "blk.%d.attn_q_a" },
    { LLM_TENSOR_ATTN_Q_B,                               "blk.%d.attn_q_b" },
    { LLM_TENSOR_ATTN_KV_A_MQA,                          "blk.%d.attn_kv_a_mqa" },
    { LLM_TENSOR_ATTN_KV_B,                              "blk.%d.attn_kv_b" },
    { LLM_TENSOR_PER_LAYER_TOKEN_EMBD,                   "per_layer_token_embd" },
    { LLM_TENSOR_PER_LAYER_MODEL_PROJ,                   "per_layer_model_proj" },
    { LLM_TENSOR_PER_LAYER_PROJ_NORM,                    "per_layer_proj_norm" },
    { LLM_TENSOR_ALTUP_UNEMBD_PROJ,                      "altup_unembd_proj" },
    { LLM_TENSOR_ALTUP_PROJ,                             "altup_proj" },
    { LLM_TENSOR_PER_LAYER_INP_GATE,                     "blk.%d.inp_gate" },
    { LLM_TENSOR_PER_LAYER_PROJ,                         "blk.%d.proj" },
    { LLM_TENSOR_PER_LAYER_POST_NORM,                    "blk.%d.post_norm" },
    { LLM_TENSOR_ALTUP_CORRECT_COEF,                     "blk.%d.altup_correct_coef" },
    { LLM_TENSOR_ALTUP_CORRECT_SCALE,                    "blk.%d.altup_correct_scale" },
    { LLM_TENSOR_ALTUP_PREDICT_COEF,                     "blk.%d.altup_predict_coef" },
    { LLM_TENSOR_ALTUP_ROUTER,                           "blk.%d.altup_router" },
    { LLM_TENSOR_ALTUP_ROUTER_NORM,                      "blk.%d.altup_router_norm" },
    { LLM_TENSOR_LAUREL_L,                               "blk.%d.laurel_l" },
    { LLM_TENSOR_LAUREL_R,                               "blk.%d.laurel_r" },
    { LLM_TENSOR_LAUREL_POST_NORM,                       "blk.%d.laurel_post_norm" },
    { LLM_TENSOR_DENSE_2_OUT,                            "dense_2" },
    { LLM_TENSOR_DENSE_3_OUT,                            "dense_3" },
    { LLM_TENSOR_FFN_NORM_EXPS,                          "blk.%d.ffn_norm_exps" },
    { LLM_TENSOR_ATTN_K_B,                               "blk.%d.attn_k_b" },
    { LLM_TENSOR_ATTN_V_B,                               "blk.%d.attn_v_b" },
    { LLM_TENSOR_NEXTN_EH_PROJ,                          "blk.%d.nextn.eh_proj" },
    { LLM_TENSOR_NEXTN_EMBED_TOKENS,                     "blk.%d.nextn.embed_tokens" },
    { LLM_TENSOR_NEXTN_ENORM,                            "blk.%d.nextn.enorm" },
    { LLM_TENSOR_NEXTN_HNORM,                            "blk.%d.nextn.hnorm" },
    { LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,                 "blk.%d.nextn.shared_head_head" },
    { LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,                 "blk.%d.nextn.shared_head_norm" },
    { LLM_TENSOR_ATTN_SUB_NORM,                          "blk.%d.attn_sub_norm" },
    { LLM_TENSOR_FFN_SUB_NORM,                           "blk.%d.ffn_sub_norm" },
    { LLM_TENSOR_DEC_OUTPUT_NORM,                        "dec.output_norm" },
    { LLM_TENSOR_DEC_ATTN_NORM,                          "dec.blk.%d.attn_norm" },
    { LLM_TENSOR_DEC_ATTN_Q,                             "dec.blk.%d.attn_q" },
    { LLM_TENSOR_DEC_ATTN_K,                             "dec.blk.%d.attn_k" },
    { LLM_TENSOR_DEC_ATTN_V,                             "dec.blk.%d.attn_v" },
    { LLM_TENSOR_DEC_ATTN_OUT,                           "dec.blk.%d.attn_o" },
    { LLM_TENSOR_DEC_ATTN_REL_B,                         "dec.blk.%d.attn_rel_b" },
    { LLM_TENSOR_DEC_CROSS_ATTN_NORM,                    "dec.blk.%d.cross_attn_norm" },
    { LLM_TENSOR_DEC_CROSS_ATTN_Q,                       "dec.blk.%d.cross_attn_q" },
    { LLM_TENSOR_DEC_CROSS_ATTN_K,                       "dec.blk.%d.cross_attn_k" },
    { LLM_TENSOR_DEC_CROSS_ATTN_V,                       "dec.blk.%d.cross_attn_v" },
    { LLM_TENSOR_DEC_CROSS_ATTN_OUT,                     "dec.blk.%d.cross_attn_o" },
    { LLM_TENSOR_DEC_CROSS_ATTN_REL_B,                   "dec.blk.%d.cross_attn_rel_b" },
    { LLM_TENSOR_DEC_FFN_NORM,                           "dec.blk.%d.ffn_norm" },
    { LLM_TENSOR_DEC_FFN_GATE,                           "dec.blk.%d.ffn_gate" },
    { LLM_TENSOR_DEC_FFN_DOWN,                           "dec.blk.%d.ffn_down" },
    { LLM_TENSOR_DEC_FFN_UP,                             "dec.blk.%d.ffn_up" },
    { LLM_TENSOR_ENC_ATTN_NORM,                          "enc.blk.%d.attn_norm" },
    { LLM_TENSOR_ENC_ATTN_Q,                             "enc.blk.%d.attn_q" },
    { LLM_TENSOR_ENC_ATTN_K,                             "enc.blk.%d.attn_k" },
    { LLM_TENSOR_ENC_ATTN_V,                             "enc.blk.%d.attn_v" },
    { LLM_TENSOR_ENC_ATTN_OUT,                           "enc.blk.%d.attn_o" },
    { LLM_TENSOR_ENC_ATTN_REL_B,                         "enc.blk.%d.attn_rel_b" },
    { LLM_TENSOR_ENC_FFN_NORM,                           "enc.blk.%d.ffn_norm" },
    { LLM_TENSOR_ENC_FFN_GATE,                           "enc.blk.%d.ffn_gate" },
    { LLM_TENSOR_ENC_FFN_DOWN,                           "enc.blk.%d.ffn_down" },
    { LLM_TENSOR_ENC_FFN_UP,                             "enc.blk.%d.ffn_up" },
    { LLM_TENSOR_TIME_MIX_W1,                            "blk.%d.time_mix_w1" },
    { LLM_TENSOR_TIME_MIX_W2,                            "blk.%d.time_mix_w2" },
    { LLM_TENSOR_TIME_MIX_LERP_X,                        "blk.%d.time_mix_lerp_x" },
    { LLM_TENSOR_TIME_MIX_LERP_W,                        "blk.%d.time_mix_lerp_w" },
    { LLM_TENSOR_TIME_MIX_LERP_K,                        "blk.%d.time_mix_lerp_k" },
    { LLM_TENSOR_TIME_MIX_LERP_V,                        "blk.%d.time_mix_lerp_v" },
    { LLM_TENSOR_TIME_MIX_LERP_R,                        "blk.%d.time_mix_lerp_r" },
    { LLM_TENSOR_TIME_MIX_LERP_G,                        "blk.%d.time_mix_lerp_g" },
    { LLM_TENSOR_TIME_MIX_LERP_FUSED,                    "blk.%d.time_mix_lerp_fused" },
    { LLM_TENSOR_TIME_MIX_FIRST,                         "blk.%d.time_mix_first" },
    { LLM_TENSOR_TIME_MIX_DECAY,                         "blk.%d.time_mix_decay" },
    { LLM_TENSOR_TIME_MIX_DECAY_W1,                      "blk.%d.time_mix_decay_w1" },
    { LLM_TENSOR_TIME_MIX_DECAY_W2,                      "blk.%d.time_mix_decay_w2" },
    { LLM_TENSOR_TIME_MIX_KEY,                           "blk.%d.time_mix_key" },
    { LLM_TENSOR_TIME_MIX_VALUE,                         "blk.%d.time_mix_value" },
    { LLM_TENSOR_TIME_MIX_RECEPTANCE,                    "blk.%d.time_mix_receptance" },
    { LLM_TENSOR_TIME_MIX_GATE,                          "blk.%d.time_mix_gate" },
    { LLM_TENSOR_TIME_MIX_LN,                            "blk.%d.time_mix_ln" },
    { LLM_TENSOR_TIME_MIX_OUTPUT,                        "blk.%d.time_mix_output" },
    { LLM_TENSOR_CHANNEL_MIX_LERP_K,                     "blk.%d.channel_mix_lerp_k" },
    { LLM_TENSOR_CHANNEL_MIX_LERP_R,                     "blk.%d.channel_mix_lerp_r" },
    { LLM_TENSOR_CHANNEL_MIX_KEY,                        "blk.%d.channel_mix_key" },
    { LLM_TENSOR_CHANNEL_MIX_VALUE,                      "blk.%d.channel_mix_value" },
    { LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,                 "blk.%d.channel_mix_receptance" },
    { LLM_TENSOR_TIME_MIX_W0,                            "blk.%d.time_mix_w0" },
    { LLM_TENSOR_TIME_MIX_A0,                            "blk.%d.time_mix_a0" },
    { LLM_TENSOR_TIME_MIX_A1,                            "blk.%d.time_mix_a1" },
    { LLM_TENSOR_TIME_MIX_A2,                            "blk.%d.time_mix_a2" },
    { LLM_TENSOR_TIME_MIX_V0,                            "blk.%d.time_mix_v0" },
    { LLM_TENSOR_TIME_MIX_V1,                            "blk.%d.time_mix_v1" },
    { LLM_TENSOR_TIME_MIX_V2,                            "blk.%d.time_mix_v2" },
    { LLM_TENSOR_TIME_MIX_G1,                            "blk.%d.time_mix_g1" },
    { LLM_TENSOR_TIME_MIX_G2,                            "blk.%d.time_mix_g2" },
    { LLM_TENSOR_TIME_MIX_K_K,                           "blk.%d.time_mix_k_k" },
    { LLM_TENSOR_TIME_MIX_K_A,                           "blk.%d.time_mix_k_a" },
    { LLM_TENSOR_TIME_MIX_R_K,                           "blk.%d.time_mix_r_k" },
    { LLM_TENSOR_CONV1D,                                 "conv1d" },
    { LLM_TENSOR_CONVNEXT_DW,                            "convnext.%d.dw" },
    { LLM_TENSOR_CONVNEXT_NORM,                          "convnext.%d.norm" },
    { LLM_TENSOR_CONVNEXT_PW1,                           "convnext.%d.pw1" },
    { LLM_TENSOR_CONVNEXT_PW2,                           "convnext.%d.pw2" },
    { LLM_TENSOR_CONVNEXT_GAMMA,                         "convnext.%d.gamma" },
    { LLM_TENSOR_POS_NET_CONV1,                          "posnet.%d.conv1" },
    { LLM_TENSOR_POS_NET_CONV2,                          "posnet.%d.conv2" },
    { LLM_TENSOR_POS_NET_NORM,                           "posnet.%d.norm" },
    { LLM_TENSOR_POS_NET_NORM1,                          "posnet.%d.norm1" },
    { LLM_TENSOR_POS_NET_NORM2,                          "posnet.%d.norm2" },
    { LLM_TENSOR_POS_NET_ATTN_NORM,                      "posnet.%d.attn_norm" },
    { LLM_TENSOR_POS_NET_ATTN_Q,                         "posnet.%d.attn_q" },
    { LLM_TENSOR_POS_NET_ATTN_K,                         "posnet.%d.attn_k" },
    { LLM_TENSOR_POS_NET_ATTN_V,                         "posnet.%d.attn_v" },
    { LLM_TENSOR_POS_NET_ATTN_OUT,                       "posnet.%d.attn_output" },
    { LLM_TENSOR_ATTN_SINKS,                             "blk.%d.attn_sinks" },
    { LLM_TENSOR_SHORTCONV_CONV,                         "blk.%d.shortconv.conv" },
    { LLM_TENSOR_SHORTCONV_INPROJ,                       "blk.%d.shortconv.in_proj" },
    { LLM_TENSOR_SHORTCONV_OUTPROJ,                      "blk.%d.shortconv.out_proj" },
    { LLM_TENSOR_FFN_GATE_CHEXPS,                        "blk.%d.ffn_gate_chexps" },
    { LLM_TENSOR_FFN_DOWN_CHEXPS,                        "blk.%d.ffn_down_chexps" },
    { LLM_TENSOR_FFN_UP_CHEXPS,                          "blk.%d.ffn_up_chexps" },
    { LLM_TENSOR_VISEXP_ATTN_QKV,                        "blk.%d.vis_attn_qkv" },
    { LLM_TENSOR_VISEXP_ATTN_OUT,                        "blk.%d.vis_attn_output" },
    { LLM_TENSOR_VISEXP_FFN_GATE,                        "blk.%d.vis_gate" },
    { LLM_TENSOR_VISEXP_FFN_DOWN,                        "blk.%d.vis_down" },
    { LLM_TENSOR_VISEXP_FFN_UP,                          "blk.%d.vis_up" },
    { LLM_TENSOR_INDEXER_K_NORM,                         "blk.%d.indexer.k_norm" },
    { LLM_TENSOR_INDEXER_PROJ,                           "blk.%d.indexer.proj" },
    { LLM_TENSOR_INDEXER_ATTN_K,                         "blk.%d.indexer.attn_k" },
    { LLM_TENSOR_INDEXER_ATTN_Q_B,                       "blk.%d.indexer.attn_q_b" },
};

static std::set<llm_tensor> llm_get_tensor_names(llm_arch arch) {
    switch (arch) {
        case LLM_ARCH_CLIP:
            return {};
        case LLM_ARCH_LLAMA:
        case LLM_ARCH_REFACT:
        case LLM_ARCH_MINICPM:
        case LLM_ARCH_GRANITE:
        case LLM_ARCH_GRANITE_MOE:
        case LLM_ARCH_DECI:
        case LLM_ARCH_MISTRAL3:
        case LLM_ARCH_LLAMA_EMBED:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ROPE_FACTORS_LONG,
                LLM_TENSOR_ROPE_FACTORS_SHORT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXP,
                LLM_TENSOR_FFN_DOWN_EXP,
                LLM_TENSOR_FFN_UP_EXP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_ARCEE:
        case LLM_ARCH_STARCODER2:
        case LLM_ARCH_NEMOTRON:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_AFMOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_GATE,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_LLAMA4:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXP,
                LLM_TENSOR_FFN_DOWN_EXP,
                LLM_TENSOR_FFN_UP_EXP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_BAICHUAN:
        case LLM_ARCH_ORION:
        case LLM_ARCH_XVERSE:
        case LLM_ARCH_EXAONE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_FALCON:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_NORM_2,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_GROK:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXP,
                LLM_TENSOR_FFN_DOWN_EXP,
                LLM_TENSOR_FFN_UP_EXP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_LAYER_OUT_NORM,
                LLM_TENSOR_ATTN_OUT_NORM,
            };
        case LLM_ARCH_GPT2:
        case LLM_ARCH_STARCODER:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_POS_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_GPTNEOX:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_MPT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_ACT,
                LLM_TENSOR_POS_EMBD,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
            };
        case LLM_ARCH_QWEN2:
        case LLM_ARCH_QWEN2VL:
        case LLM_ARCH_INTERNLM2:
        case LLM_ARCH_ERNIE4_5:
        case LLM_ARCH_PADDLEOCR:
        case LLM_ARCH_SMOLLM3:
        case LLM_ARCH_DREAM:
        case LLM_ARCH_LLADA:
        case LLM_ARCH_PANGU_EMBED:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_BERT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_TOKEN_TYPES,
                LLM_TENSOR_POS_EMBD,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_LAYER_OUT_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_CLS,
                LLM_TENSOR_CLS_OUT,
            };
        case LLM_ARCH_NOMIC_BERT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_TOKEN_TYPES,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_LAYER_OUT_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_NOMIC_BERT_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_TOKEN_TYPES,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_LAYER_OUT_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_NEO_BERT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_ENC_OUTPUT_NORM,
                LLM_TENSOR_CLS,
                LLM_TENSOR_CLS_OUT,
            };
        case LLM_ARCH_EUROBERT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_MODERN_BERT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_CLS,
                LLM_TENSOR_CLS_OUT,
                LLM_TENSOR_CLS_NORM,
            };
        case LLM_ARCH_JINA_BERT_V2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_TOKEN_TYPES,
                LLM_TENSOR_ATTN_NORM_2,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_LAYER_OUT_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_CLS,
            };
        case LLM_ARCH_JINA_BERT_V3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_TOKEN_TYPES,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_LAYER_OUT_NORM,
            };
        case LLM_ARCH_BLOOM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_STABLELM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
            };
        case LLM_ARCH_QWEN:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_QWEN2MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_QWEN3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_CLS_OUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_QWEN3MOE:
        case LLM_ARCH_QWEN3VLMOE:
        case LLM_ARCH_OLMOE:
        case LLM_ARCH_LLADA_MOE:
        case LLM_ARCH_RND1:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_QWEN3NEXT:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_GATE,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_SSM_A_NOSCAN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_BETA_ALPHA,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
            };
        case LLM_ARCH_QWEN35:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_GATE,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_SSM_A_NOSCAN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_BETA,
                LLM_TENSOR_SSM_ALPHA,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
            };
        case LLM_ARCH_QWEN35MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_GATE,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_SSM_A_NOSCAN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_BETA,
                LLM_TENSOR_SSM_ALPHA,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
            };
        case LLM_ARCH_QWEN3VL:
        case LLM_ARCH_CHAMELEON:
        case LLM_ARCH_HUNYUAN_DENSE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_CLS_OUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_PHI2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_PHI3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FACTORS_LONG,
                LLM_TENSOR_ROPE_FACTORS_SHORT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_PHIMOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FACTORS_LONG,
                LLM_TENSOR_ROPE_FACTORS_SHORT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_PLAMO:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_PLAMO2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_X,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_OUT,
                LLM_TENSOR_SSM_DT_NORM,
                LLM_TENSOR_SSM_B_NORM,
                LLM_TENSOR_SSM_C_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_POST_NORM,
            };
        case LLM_ARCH_PLAMO3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_CODESHELL:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_MINICPM3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FACTORS_LONG,
                LLM_TENSOR_ROPE_FACTORS_SHORT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q_A_NORM,
                LLM_TENSOR_ATTN_KV_A_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_A,
                LLM_TENSOR_ATTN_Q_B,
                LLM_TENSOR_ATTN_KV_A_MQA,
                LLM_TENSOR_ATTN_KV_B,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_GEMMA:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_GEMMA2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_POST_NORM,
            };
        case LLM_ARCH_GEMMA3:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_POST_NORM,
            };
        case LLM_ARCH_GEMMA3N:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_PER_LAYER_TOKEN_EMBD,
                LLM_TENSOR_PER_LAYER_MODEL_PROJ,
                LLM_TENSOR_PER_LAYER_PROJ_NORM,
                LLM_TENSOR_ALTUP_UNEMBD_PROJ,
                LLM_TENSOR_ALTUP_PROJ,
                LLM_TENSOR_PER_LAYER_INP_GATE,
                LLM_TENSOR_PER_LAYER_PROJ,
                LLM_TENSOR_PER_LAYER_POST_NORM,
                LLM_TENSOR_ALTUP_CORRECT_COEF,
                LLM_TENSOR_ALTUP_CORRECT_SCALE,
                LLM_TENSOR_ALTUP_PREDICT_COEF,
                LLM_TENSOR_ALTUP_ROUTER,
                LLM_TENSOR_ALTUP_ROUTER_NORM,
                LLM_TENSOR_LAUREL_L,
                LLM_TENSOR_LAUREL_R,
                LLM_TENSOR_LAUREL_POST_NORM,
            };
        case LLM_ARCH_GEMMA4:
            return {
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_UP_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_FFN_POST_NORM_1,
                LLM_TENSOR_FFN_POST_NORM_2,
                LLM_TENSOR_FFN_PRE_NORM_2,
                LLM_TENSOR_LAYER_OUT_SCALE,
                LLM_TENSOR_PER_LAYER_TOKEN_EMBD,
                LLM_TENSOR_PER_LAYER_MODEL_PROJ,
                LLM_TENSOR_PER_LAYER_PROJ_NORM,
                LLM_TENSOR_PER_LAYER_INP_GATE,
                LLM_TENSOR_PER_LAYER_PROJ,
                LLM_TENSOR_PER_LAYER_POST_NORM,
            };
        case LLM_ARCH_GEMMA_EMBEDDING:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_DENSE_2_OUT,
                LLM_TENSOR_DENSE_3_OUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_POST_NORM,
            };
        case LLM_ARCH_MAMBA:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_X,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_OUT,
            };
        case LLM_ARCH_MAMBA2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
            };
        case LLM_ARCH_JAMBA:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_X,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_DT_NORM,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_B_NORM,
                LLM_TENSOR_SSM_C_NORM,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_OUT,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_FALCON_H1:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_COMMAND_R:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
            };
        case LLM_ARCH_COHERE2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_DBRX:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_OUT_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_OLMO:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_OLMO2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_OPENELM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_ARCTIC:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_NORM_EXPS,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_DEEPSEEK:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_ROT_EMBD,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_DEEPSEEK2:
        case LLM_ARCH_DEEPSEEK2OCR:
        case LLM_ARCH_MISTRAL4:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q_A_NORM,
                LLM_TENSOR_ATTN_KV_A_NORM,
                LLM_TENSOR_ATTN_K, // deepseek-ocr
                LLM_TENSOR_ATTN_V, // deepseek-ocr
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_A,
                LLM_TENSOR_ATTN_Q_B,
                LLM_TENSOR_ATTN_KV_A_MQA,
                LLM_TENSOR_ATTN_KV_B,
                LLM_TENSOR_ATTN_K_B,
                LLM_TENSOR_ATTN_V_B,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_PLM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_KV_A_MQA,
                LLM_TENSOR_ATTN_KV_A_NORM,
                LLM_TENSOR_ATTN_KV_B,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_CHATGLM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_GLM4:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_POST_NORM,
                LLM_TENSOR_NEXTN_EH_PROJ,
                LLM_TENSOR_NEXTN_EMBED_TOKENS,
                LLM_TENSOR_NEXTN_ENORM,
                LLM_TENSOR_NEXTN_HNORM,
                LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
                LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
            };
        case LLM_ARCH_GLM4_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                LLM_TENSOR_NEXTN_EH_PROJ,
                LLM_TENSOR_NEXTN_EMBED_TOKENS,
                LLM_TENSOR_NEXTN_ENORM,
                LLM_TENSOR_NEXTN_HNORM,
                LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
                LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
            };
        case LLM_ARCH_GLM_DSA:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q_A_NORM,
                LLM_TENSOR_ATTN_KV_A_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_A,
                LLM_TENSOR_ATTN_Q_B,
                LLM_TENSOR_ATTN_KV_A_MQA,
                LLM_TENSOR_ATTN_KV_B,
                LLM_TENSOR_ATTN_K_B,
                LLM_TENSOR_ATTN_V_B,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                LLM_TENSOR_INDEXER_K_NORM,
                LLM_TENSOR_INDEXER_PROJ,
                LLM_TENSOR_INDEXER_ATTN_K,
                LLM_TENSOR_INDEXER_ATTN_Q_B,
                LLM_TENSOR_NEXTN_EH_PROJ,
                LLM_TENSOR_NEXTN_EMBED_TOKENS,
                LLM_TENSOR_NEXTN_ENORM,
                LLM_TENSOR_NEXTN_HNORM,
                LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
                LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
            };
        case LLM_ARCH_BITNET:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_SUB_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_SUB_NORM,
            };
        case LLM_ARCH_T5:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_DEC_OUTPUT_NORM,
                LLM_TENSOR_DEC_ATTN_NORM,
                LLM_TENSOR_DEC_ATTN_Q,
                LLM_TENSOR_DEC_ATTN_K,
                LLM_TENSOR_DEC_ATTN_V,
                LLM_TENSOR_DEC_ATTN_OUT,
                LLM_TENSOR_DEC_ATTN_REL_B,
                LLM_TENSOR_DEC_CROSS_ATTN_NORM,
                LLM_TENSOR_DEC_CROSS_ATTN_Q,
                LLM_TENSOR_DEC_CROSS_ATTN_K,
                LLM_TENSOR_DEC_CROSS_ATTN_V,
                LLM_TENSOR_DEC_CROSS_ATTN_OUT,
                LLM_TENSOR_DEC_CROSS_ATTN_REL_B,
                LLM_TENSOR_DEC_FFN_NORM,
                LLM_TENSOR_DEC_FFN_GATE,
                LLM_TENSOR_DEC_FFN_DOWN,
                LLM_TENSOR_DEC_FFN_UP,
                LLM_TENSOR_ENC_OUTPUT_NORM,
                LLM_TENSOR_ENC_ATTN_NORM,
                LLM_TENSOR_ENC_ATTN_Q,
                LLM_TENSOR_ENC_ATTN_K,
                LLM_TENSOR_ENC_ATTN_V,
                LLM_TENSOR_ENC_ATTN_OUT,
                LLM_TENSOR_ENC_ATTN_REL_B,
                LLM_TENSOR_ENC_FFN_NORM,
                LLM_TENSOR_ENC_FFN_GATE,
                LLM_TENSOR_ENC_FFN_DOWN,
                LLM_TENSOR_ENC_FFN_UP,
            };
        case LLM_ARCH_T5ENCODER:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ENC_OUTPUT_NORM,
                LLM_TENSOR_ENC_ATTN_NORM,
                LLM_TENSOR_ENC_ATTN_Q,
                LLM_TENSOR_ENC_ATTN_K,
                LLM_TENSOR_ENC_ATTN_V,
                LLM_TENSOR_ENC_ATTN_OUT,
                LLM_TENSOR_ENC_ATTN_REL_B,
                LLM_TENSOR_ENC_FFN_NORM,
                LLM_TENSOR_ENC_FFN_GATE,
                LLM_TENSOR_ENC_FFN_DOWN,
                LLM_TENSOR_ENC_FFN_UP,
            };
        case LLM_ARCH_JAIS:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_JAIS2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
            };
        case LLM_ARCH_NEMOTRON_H:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_NEMOTRON_H_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                // mamba(2) ssm layers
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
                // attention layers
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                // dense FFN
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                // MoE FFN (for MoE layers)
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                LLM_TENSOR_FFN_LATENT_DOWN,
                LLM_TENSOR_FFN_LATENT_UP,
                // MoE shared expert layer
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_EXAONE4:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_POST_NORM,
            };
        case LLM_ARCH_EXAONE_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                LLM_TENSOR_NEXTN_EH_PROJ,
                LLM_TENSOR_NEXTN_EMBED_TOKENS,
                LLM_TENSOR_NEXTN_ENORM,
                LLM_TENSOR_NEXTN_HNORM,
                LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
                LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
            };
        case LLM_ARCH_RWKV6:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_NORM_2,
                LLM_TENSOR_TIME_MIX_W1,
                LLM_TENSOR_TIME_MIX_W2,
                LLM_TENSOR_TIME_MIX_LERP_X,
                LLM_TENSOR_TIME_MIX_LERP_W,
                LLM_TENSOR_TIME_MIX_LERP_K,
                LLM_TENSOR_TIME_MIX_LERP_V,
                LLM_TENSOR_TIME_MIX_LERP_R,
                LLM_TENSOR_TIME_MIX_LERP_G,
                LLM_TENSOR_TIME_MIX_LERP_FUSED,
                LLM_TENSOR_TIME_MIX_FIRST,
                LLM_TENSOR_TIME_MIX_DECAY,
                LLM_TENSOR_TIME_MIX_DECAY_W1,
                LLM_TENSOR_TIME_MIX_DECAY_W2,
                LLM_TENSOR_TIME_MIX_KEY,
                LLM_TENSOR_TIME_MIX_VALUE,
                LLM_TENSOR_TIME_MIX_RECEPTANCE,
                LLM_TENSOR_TIME_MIX_GATE,
                LLM_TENSOR_TIME_MIX_LN,
                LLM_TENSOR_TIME_MIX_OUTPUT,
                LLM_TENSOR_CHANNEL_MIX_LERP_K,
                LLM_TENSOR_CHANNEL_MIX_LERP_R,
                LLM_TENSOR_CHANNEL_MIX_KEY,
                LLM_TENSOR_CHANNEL_MIX_VALUE,
                LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,
            };
        case LLM_ARCH_RWKV6QWEN2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_TIME_MIX_W1,
                LLM_TENSOR_TIME_MIX_W2,
                LLM_TENSOR_TIME_MIX_LERP_X,
                LLM_TENSOR_TIME_MIX_LERP_FUSED,
                LLM_TENSOR_TIME_MIX_FIRST,
                LLM_TENSOR_TIME_MIX_DECAY,
                LLM_TENSOR_TIME_MIX_DECAY_W1,
                LLM_TENSOR_TIME_MIX_DECAY_W2,
                LLM_TENSOR_TIME_MIX_KEY,
                LLM_TENSOR_TIME_MIX_VALUE,
                LLM_TENSOR_TIME_MIX_RECEPTANCE,
                LLM_TENSOR_TIME_MIX_GATE,
                LLM_TENSOR_TIME_MIX_OUTPUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_RWKV7:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_NORM_2,
                LLM_TENSOR_TIME_MIX_W0,
                LLM_TENSOR_TIME_MIX_W1,
                LLM_TENSOR_TIME_MIX_W2,
                LLM_TENSOR_TIME_MIX_A0,
                LLM_TENSOR_TIME_MIX_A1,
                LLM_TENSOR_TIME_MIX_A2,
                LLM_TENSOR_TIME_MIX_V0,
                LLM_TENSOR_TIME_MIX_V1,
                LLM_TENSOR_TIME_MIX_V2,
                LLM_TENSOR_TIME_MIX_G1,
                LLM_TENSOR_TIME_MIX_G2,
                LLM_TENSOR_TIME_MIX_K_K,
                LLM_TENSOR_TIME_MIX_K_A,
                LLM_TENSOR_TIME_MIX_R_K,
                LLM_TENSOR_TIME_MIX_LERP_FUSED,
                LLM_TENSOR_TIME_MIX_KEY,
                LLM_TENSOR_TIME_MIX_VALUE,
                LLM_TENSOR_TIME_MIX_RECEPTANCE,
                LLM_TENSOR_TIME_MIX_LN,
                LLM_TENSOR_TIME_MIX_OUTPUT,
                LLM_TENSOR_CHANNEL_MIX_LERP_K,
                LLM_TENSOR_CHANNEL_MIX_KEY,
                LLM_TENSOR_CHANNEL_MIX_VALUE,
            };
        case LLM_ARCH_ARWKV7:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_TIME_MIX_W0,
                LLM_TENSOR_TIME_MIX_W1,
                LLM_TENSOR_TIME_MIX_W2,
                LLM_TENSOR_TIME_MIX_A0,
                LLM_TENSOR_TIME_MIX_A1,
                LLM_TENSOR_TIME_MIX_A2,
                LLM_TENSOR_TIME_MIX_V0,
                LLM_TENSOR_TIME_MIX_V1,
                LLM_TENSOR_TIME_MIX_V2,
                LLM_TENSOR_TIME_MIX_G1,
                LLM_TENSOR_TIME_MIX_G2,
                LLM_TENSOR_TIME_MIX_K_K,
                LLM_TENSOR_TIME_MIX_K_A,
                LLM_TENSOR_TIME_MIX_R_K,
                LLM_TENSOR_TIME_MIX_LERP_FUSED,
                LLM_TENSOR_TIME_MIX_KEY,
                LLM_TENSOR_TIME_MIX_VALUE,
                LLM_TENSOR_TIME_MIX_RECEPTANCE,
                LLM_TENSOR_TIME_MIX_LN,
                LLM_TENSOR_TIME_MIX_OUTPUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_GRANITE_HYBRID:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_SSM_IN,
                LLM_TENSOR_SSM_CONV1D,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_D,
                LLM_TENSOR_SSM_NORM,
                LLM_TENSOR_SSM_OUT,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_WAVTOKENIZER_DEC:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_TOKEN_EMBD_NORM,
                LLM_TENSOR_CONV1D,
                LLM_TENSOR_CONVNEXT_DW,
                LLM_TENSOR_CONVNEXT_NORM,
                LLM_TENSOR_CONVNEXT_PW1,
                LLM_TENSOR_CONVNEXT_PW2,
                LLM_TENSOR_CONVNEXT_GAMMA,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_POS_NET_CONV1,
                LLM_TENSOR_POS_NET_CONV2,
                LLM_TENSOR_POS_NET_NORM,
                LLM_TENSOR_POS_NET_NORM1,
                LLM_TENSOR_POS_NET_NORM2,
                LLM_TENSOR_POS_NET_ATTN_NORM,
                LLM_TENSOR_POS_NET_ATTN_Q,
                LLM_TENSOR_POS_NET_ATTN_K,
                LLM_TENSOR_POS_NET_ATTN_V,
                LLM_TENSOR_POS_NET_ATTN_OUT,
            };
        case LLM_ARCH_BAILINGMOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
            };
        case LLM_ARCH_BAILINGMOE2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_NEXTN_EH_PROJ,
                LLM_TENSOR_NEXTN_EMBED_TOKENS,
                LLM_TENSOR_NEXTN_ENORM,
                LLM_TENSOR_NEXTN_HNORM,
                LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,
                LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,
                LLM_TENSOR_LAYER_OUT_NORM,
            };
        case LLM_ARCH_DOTS1:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_INP_SHEXP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_ERNIE4_5_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_HUNYUAN_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_OPENAI_MOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_SINKS,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_LFM2:
            return {
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_SHORTCONV_CONV,
                LLM_TENSOR_SHORTCONV_INPROJ,
                LLM_TENSOR_SHORTCONV_OUTPROJ,
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM_LFM2,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_DENSE_2_OUT,
            };
        case LLM_ARCH_LFM2MOE:
            return {
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_SHORTCONV_CONV,
                LLM_TENSOR_SHORTCONV_INPROJ,
                LLM_TENSOR_SHORTCONV_OUTPROJ,
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM_LFM2,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_SMALLTHINKER:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
            };
        case LLM_ARCH_APERTUS:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_SEED_OSS:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_POST_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_GROVEMOE:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_CHEXPS,
                LLM_TENSOR_FFN_DOWN_CHEXPS,
                LLM_TENSOR_FFN_UP_CHEXPS,
            };
        case LLM_ARCH_MINIMAX_M2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_COGVLM:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_QKV,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_VISEXP_ATTN_QKV,
                LLM_TENSOR_VISEXP_ATTN_OUT,
                LLM_TENSOR_VISEXP_FFN_GATE,
                LLM_TENSOR_VISEXP_FFN_DOWN,
                LLM_TENSOR_VISEXP_FFN_UP,
            };
        case LLM_ARCH_MIMO2:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_SINKS,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_STEP35:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ROPE_FACTORS_LONG,
                LLM_TENSOR_ROPE_FACTORS_SHORT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_GATE,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_EXP_PROBS_B,
            };
        case LLM_ARCH_GPTJ:
        case LLM_ARCH_UNKNOWN:
            return {
                LLM_TENSOR_TOKEN_EMBD,
            };
        case LLM_ARCH_MAINCODER:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_Q_NORM,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_K_NORM,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
            };
        case LLM_ARCH_KIMI_LINEAR:
            return {
                LLM_TENSOR_TOKEN_EMBD,
                LLM_TENSOR_OUTPUT_NORM,
                LLM_TENSOR_OUTPUT,
                LLM_TENSOR_ROPE_FREQS,
                LLM_TENSOR_ATTN_NORM,
                LLM_TENSOR_ATTN_Q,
                LLM_TENSOR_ATTN_K,
                LLM_TENSOR_ATTN_V,
                LLM_TENSOR_ATTN_OUT,
                LLM_TENSOR_FFN_NORM,
                // Dense FFN (layer 0 only)
                LLM_TENSOR_FFN_GATE,
                LLM_TENSOR_FFN_DOWN,
                LLM_TENSOR_FFN_UP,
                // MoE FFN (layers 1+)
                LLM_TENSOR_FFN_GATE_INP,
                LLM_TENSOR_FFN_GATE_EXPS,
                LLM_TENSOR_FFN_DOWN_EXPS,
                LLM_TENSOR_FFN_UP_EXPS,
                LLM_TENSOR_FFN_EXP_PROBS_B,
                // Shared experts
                LLM_TENSOR_FFN_GATE_SHEXP,
                LLM_TENSOR_FFN_DOWN_SHEXP,
                LLM_TENSOR_FFN_UP_SHEXP,
                // KDA (using SSM_ enum prefix, keeping GGUF names for backward compat)
                LLM_TENSOR_SSM_CONV1D_Q,
                LLM_TENSOR_SSM_CONV1D_K,
                LLM_TENSOR_SSM_CONV1D_V,
                LLM_TENSOR_SSM_F_A,
                LLM_TENSOR_SSM_F_B,
                LLM_TENSOR_SSM_BETA,
                LLM_TENSOR_SSM_A,
                LLM_TENSOR_SSM_G_A,
                LLM_TENSOR_SSM_G_B,
                LLM_TENSOR_SSM_DT,
                LLM_TENSOR_SSM_NORM,
                // MLA
                LLM_TENSOR_ATTN_Q_A,
                LLM_TENSOR_ATTN_Q_B,
                LLM_TENSOR_ATTN_Q_A_NORM,
                LLM_TENSOR_ATTN_KV_A_MQA,
                LLM_TENSOR_ATTN_KV_B,
                LLM_TENSOR_ATTN_K_B,
                LLM_TENSOR_ATTN_V_B,
                LLM_TENSOR_ATTN_KV_A_NORM,
            };
        default:
            GGML_ABORT("unknown architecture for tensor mapping");
    }
}

// declare information about the model weight tensors:
// - the layer in which the tensor is going to be used. this is needed in order to assign the correct buffer type for the weight
// - the operator which is going to use the weight. this is needed to determine if the respective backend supports the operator
//
// for example, input layers are usually assigned to CPU/host buffer types
//
// a mismatch between the declared information and the actual layer/op in which the tensor is used can lead to sub-optimal
//   assignment of the buffer types and extra overhead during computation
// example: https://github.com/ggml-org/llama.cpp/pull/17548
//
static const std::map<llm_tensor, llm_tensor_info> LLM_TENSOR_INFOS = {
    {LLM_TENSOR_TOKEN_EMBD,                 {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_POS_EMBD,                   {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_TOKEN_TYPES,                {LLM_TENSOR_LAYER_INPUT, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_TOKEN_EMBD_NORM,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},  // do the norms on the first layer (not the input layer)
    {LLM_TENSOR_OUTPUT,                     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CLS,                        {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CLS_OUT,                    {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CLS_NORM,                   {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_DENSE_2_OUT,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, // Dense layer output
    {LLM_TENSOR_DENSE_3_OUT,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}}, // Dense layer output
    {LLM_TENSOR_OUTPUT_NORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_OUTPUT_NORM_LFM2,           {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_DEC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_ENC_OUTPUT_NORM,            {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_ROPE_FREQS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
    {LLM_TENSOR_ROPE_FACTORS_LONG,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
    {LLM_TENSOR_ROPE_FACTORS_SHORT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ROPE}},
    {LLM_TENSOR_ATTN_Q,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_K,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_V,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_QKV,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_OUT,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_GATE,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_GATE,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_DOWN,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_UP,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_DOWN_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_GATE_SHEXP,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_UP_SHEXP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_Q_A,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_Q_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_KV_A_MQA,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_KV_B,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_K_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_V_B,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ATTN_SINKS,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SCALE}},
    {LLM_TENSOR_DEC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_CROSS_ATTN_Q,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_CROSS_ATTN_K,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_CROSS_ATTN_V,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_CROSS_ATTN_OUT,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_DEC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_ATTN_Q,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_ATTN_K,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_ATTN_V,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_ATTN_OUT,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_FFN_GATE,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_FFN_DOWN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ENC_FFN_UP,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_GATE_INP_SHEXP,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_GATE_INP,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_IN,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_X,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_DT,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_OUT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_ALPHA,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_BETA_ALPHA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_W1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_W2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_A1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_A2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_V1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_V2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_G1,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_G2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_DECAY_W1,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_DECAY_W2,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_KEY,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_VALUE,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_RECEPTANCE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_GATE,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_OUTPUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CHANNEL_MIX_KEY,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CHANNEL_MIX_RECEPTANCE,     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CHANNEL_MIX_VALUE,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_FFN_ACT,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_DIV}},
    {LLM_TENSOR_SSM_CONV1D,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
    {LLM_TENSOR_SSM_A,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_SCAN}},
    {LLM_TENSOR_SSM_A_NOSCAN,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}}, // a version of SSM_A used for MUL instead of SSM_SCAN
    {LLM_TENSOR_SSM_DT_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_B_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_C_NORM,                 {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_D,                      {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    // Kimi KDA - Conv tensors are 4D [d_conv, 1, d_inner, 1], reshaped to 2D at runtime
    {LLM_TENSOR_SSM_CONV1D_Q,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_CONV1D_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_CONV1D_V,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SSM_F_A,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_F_B,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_BETA,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_G_A,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SSM_G_B,                    {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_TIME_MIX_LERP_X,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_TIME_MIX_LN,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_CHANNEL_MIX_LERP_K,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_CHANNEL_MIX_LERP_R,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_TIME_MIX_K_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_TIME_MIX_K_A,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_TIME_MIX_R_K,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_TIME_MIX_LERP_W,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_LERP_K,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_LERP_V,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_LERP_R,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_LERP_G,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_LERP_FUSED,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_DECAY,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_W0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_A0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_V0,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    {LLM_TENSOR_TIME_MIX_FIRST,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_RWKV_WKV6}},
    {LLM_TENSOR_ATTN_NORM,                  {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_NORM_2,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_OUT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_POST_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_NORM,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_PRE_NORM_2,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_POST_NORM_1,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_POST_NORM_2,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_POST_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_NORM_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_Q_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_K_NORM,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_LAYER_OUT_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_LAYER_OUT_SCALE,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_Q_A_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_KV_A_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ATTN_SUB_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_SUB_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_DEC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_DEC_CROSS_ATTN_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_DEC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ENC_ATTN_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ENC_FFN_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_DEC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_ENC_ATTN_REL_B,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_FFN_DOWN_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_GATE_EXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_UP_EXPS,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_GATE_UP_EXPS,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_DOWN_CHEXPS,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_GATE_CHEXPS,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_UP_CHEXPS,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT_ID}},
    {LLM_TENSOR_FFN_EXP_PROBS_B,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_ADD}},
    // altup / laurel (gemma 3n)
    {LLM_TENSOR_PER_LAYER_TOKEN_EMBD,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_GET_ROWS}},
    {LLM_TENSOR_PER_LAYER_MODEL_PROJ,       {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
    {LLM_TENSOR_PER_LAYER_PROJ_NORM,        {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL}},
    {LLM_TENSOR_ALTUP_PROJ,                 {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ALTUP_UNEMBD_PROJ,          {LLM_TENSOR_LAYER_OUTPUT,    GGML_OP_MUL_MAT}},
    {LLM_TENSOR_PER_LAYER_INP_GATE,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_PER_LAYER_PROJ,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_PER_LAYER_POST_NORM,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ALTUP_CORRECT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ALTUP_CORRECT_SCALE,        {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_ALTUP_PREDICT_COEF,         {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ALTUP_ROUTER,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_ALTUP_ROUTER_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_LAUREL_L,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_LAUREL_R,                   {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_LAUREL_POST_NORM,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    // this tensor is loaded for T5, but never used
    {LLM_TENSOR_DEC_CROSS_ATTN_REL_B,       {LLM_TENSOR_LAYER_REPEATING, GGML_OP_NONE}},
    {LLM_TENSOR_CONV1D,                     {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
    {LLM_TENSOR_POS_NET_NORM,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_POS_NET_NORM1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_POS_NET_NORM2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_POS_NET_CONV1,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
    {LLM_TENSOR_POS_NET_CONV2,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
    {LLM_TENSOR_POS_NET_ATTN_NORM,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_POS_NET_ATTN_Q,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_POS_NET_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_POS_NET_ATTN_V,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_POS_NET_ATTN_OUT,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CONVNEXT_DW,                {LLM_TENSOR_LAYER_REPEATING, GGML_OP_IM2COL}},
    {LLM_TENSOR_CONVNEXT_NORM,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_CONVNEXT_PW1,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CONVNEXT_PW2,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_CONVNEXT_GAMMA,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_SHORTCONV_CONV,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_SSM_CONV}},
    {LLM_TENSOR_SHORTCONV_INPROJ,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_SHORTCONV_OUTPROJ,          {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_VISEXP_ATTN_QKV,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_VISEXP_ATTN_OUT,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_VISEXP_FFN_GATE,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_VISEXP_FFN_DOWN,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_VISEXP_FFN_UP,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_INDEXER_K_NORM,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_INDEXER_PROJ,               {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_INDEXER_ATTN_K,             {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_INDEXER_ATTN_Q_B,           {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL_MAT}},
    // NextN/MTP tensors are currently ignored (reserved for future MTP support)
    // These tensors only exist in the last layer(s) and are treated as output tensors
    {LLM_TENSOR_NEXTN_EH_PROJ,              {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_NEXTN_EMBED_TOKENS,         {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_NEXTN_ENORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_GET_ROWS}},
    {LLM_TENSOR_NEXTN_HNORM,                {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    {LLM_TENSOR_NEXTN_SHARED_HEAD_HEAD,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL_MAT}},
    {LLM_TENSOR_NEXTN_SHARED_HEAD_NORM,     {LLM_TENSOR_LAYER_OUTPUT, GGML_OP_MUL}},
    // Nemotron 3 Super
    {LLM_TENSOR_FFN_LATENT_DOWN,            {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
    {LLM_TENSOR_FFN_LATENT_UP,              {LLM_TENSOR_LAYER_REPEATING, GGML_OP_MUL}},
};

LLM_KV::LLM_KV(llm_arch arch, const char * suffix) : arch(arch), suffix(suffix) {}

std::string LLM_KV::operator()(llm_kv kv) const {
    std::string name = ::format(LLM_KV_NAMES.at(kv), LLM_ARCH_NAMES.at(arch));

    if (suffix != nullptr) {
        name += ".";
        name += suffix;
    }

    return name;
}

LLM_TN_IMPL::LLM_TN_IMPL(llm_arch arch, llm_tensor tensor, const char * suffix, int bid, int xid)
    : arch(arch), tensor(tensor), suffix(suffix), bid(bid), xid(xid),
      model_tensors(llm_get_tensor_names(arch)) {}

std::string LLM_TN_IMPL::str() const {
    if (LLM_TENSOR_NAMES.find(tensor) == LLM_TENSOR_NAMES.end()) {
        GGML_ABORT("unknown tensor name for tensor id %d", static_cast<int>(tensor));
    }

    if (model_tensors.find(tensor) == model_tensors.end()) {
        const char * name = LLM_TENSOR_NAMES.at(tensor);
        if (suffix != nullptr || bid != -1 || xid != -1) {
            LLAMA_LOG_WARN("%s: cannot properly format tensor name %s with suffix=%s bid=%d xid=%d\n",
                __func__, name, suffix, bid, xid);
        }
        return name;
    }

    std::string name = ::format(LLM_TENSOR_NAMES.at(tensor), bid, xid);
    if (suffix != nullptr) {
        name += ".";
        name += suffix;
    }

    return name;
}

std::vector<llm_arch> llm_arch_all() {
    std::vector<llm_arch> ret;
    ret.reserve(LLM_ARCH_NAMES.size());
    for (const auto & [arch, _] : LLM_ARCH_NAMES) {
        ret.push_back(arch);
    }
    return ret;
}

const char * llm_arch_name(llm_arch arch) {
    auto it = LLM_ARCH_NAMES.find(arch);
    if (it == LLM_ARCH_NAMES.end()) {
        return "unknown";
    }
    return it->second;
}

llm_arch llm_arch_from_string(const std::string & name) {
    for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
        if (kv.second == name) {
            return kv.first;
        }
    }

    return LLM_ARCH_UNKNOWN;
}

const llm_tensor_info & llm_tensor_info_for(llm_tensor tensor) {
    return LLM_TENSOR_INFOS.at(tensor);
}

bool llm_arch_is_recurrent(const llm_arch & arch) {
    switch (arch) {
        case LLM_ARCH_MAMBA:
        case LLM_ARCH_MAMBA2:
        case LLM_ARCH_RWKV6:
        case LLM_ARCH_RWKV6QWEN2:
        case LLM_ARCH_RWKV7:
        case LLM_ARCH_ARWKV7:
            return true;
        default:
            return false;
    }
}

bool llm_arch_is_hybrid(const llm_arch & arch) {
    switch (arch) {
        case LLM_ARCH_JAMBA:
        case LLM_ARCH_FALCON_H1:
        case LLM_ARCH_PLAMO2:
        case LLM_ARCH_GRANITE_HYBRID:
        case LLM_ARCH_LFM2:
        case LLM_ARCH_LFM2MOE:
        case LLM_ARCH_NEMOTRON_H:
        case LLM_ARCH_NEMOTRON_H_MOE:
        case LLM_ARCH_QWEN3NEXT:
        case LLM_ARCH_KIMI_LINEAR:
        case LLM_ARCH_QWEN35:
        case LLM_ARCH_QWEN35MOE:
            return true;
        default:
            return false;
    }
}

bool llm_arch_is_diffusion(const llm_arch & arch) {
    switch (arch) {
        case LLM_ARCH_DREAM:
        case LLM_ARCH_LLADA:
        case LLM_ARCH_LLADA_MOE:
        case LLM_ARCH_RND1:
            return true;
        default:
            return false;
    }
}
