This reverts commit ad6fbea22df6deaba31e146dddb456e4a5d5dd75 Revert "chore: add engine logo from local instead of metadata logo (#3363)" This reverts commit ad6fbea22df6deaba31e146dddb456e4a5d5dd75. Revert "fix: LaTex formula render issue (#3353)" This reverts commit 3b2c84c4fee61b886c883c68801be3bc5a8584ad. Revert "chore: minor ui improvement (#3352)" This reverts commit 6dd387db2b5b9890f19d0c3505cf9cb770fd492f. Revert "fix: failed to relaunch app to update (#3351)" This reverts commit fcaf98a2fa4e674799602e8093914bcc04ced153. Revert "chore: add back GPU information to system monitoring bar (#3350)" This reverts commit 03455a91807c7af6c6325901997c6d7231d2cd0d. Revert "fix: empty model page not shown when delete all threads and models (#3343)" This reverts commit 9e29fcd69eb9085843896686806fd453a1285723. Revert "feat: allow user configure remote model from my model (#3348)" This reverts commit fdab8af057f80cf1ccaae0dc42c4e5161925f51e. Revert "chore: ui fix button outline for configure cloud model (#3347)" This reverts commit fe8ed1f26dc86ead92ffea4f36e2989caf7dad88. Revert "feat: move icon create new thread into top panel (#3346)" This reverts commit 46cb1b45b997181e2188f8dafb2fc0d0cc12ddcd. Revert "chore(UI): update experience model dropdown (#3342)" This reverts commit 8b44613015a907dc491113aeb99c963080424892. Revert "Chore/simple bug template and correct a copy (#3344)" This reverts commit 23cd5fd3979e7529811045da5c4912369bcc7532. Revert "chore(ui): fix alignment loader starter screen (#3338)" This reverts commit e9f5d2f837ce323b0851ea04cded913ab433388c. Revert "Increase retry upload to R2 to 5 times (#3337)" This reverts commit dcfb497934edc795955d971b6d391ee1e6309a03. Revert "fix: broken jan build - add log trace (jan.log) (#3336)" This reverts commit 77422c3a7ed240909942ac0d8c4b259af8d87a28. Revert "chore: disable quick ask (#3334)" This reverts commit 6e4b6b09ae009149f262d86d5b19bb8096267c19. Revert "fix: update legacy path (#3328)" This reverts commit 5eb112142c6431cfe0cdf11ce28810ca650a5427. Revert "chore: add cortex version (#3318)" This reverts commit 60587649c56a1f24272e763f25aa5b4042f7719a. Revert "fix: broken app due to incorrect api path (#3316)" This reverts commit 3de4eab2a0dfbf9f593d73b9dde6bca1d9df2279. Revert "feat: modal waiting cortex (#3306)" This reverts commit 1f5168d4af9080b867c19d334c398bf32e4f54b8. Revert "fix: refresh should not create new thread (#3314)" This reverts commit 624d07703c50ea332ed4eeac9dc3a26bc8190d08. Revert "fix: avoid lose title threads (#3307)" This reverts commit a4f5fda104c2d1e01ea72798f055e5b4e3cfd616. Revert "feat: change data folder (#3309)" This reverts commit b43242b9b24352c7f90995eccab753dede679616. Revert "feat: embed cortex into jan as a js module (#3305)" This reverts commit b348110fb73bd5f13c69f1b915168687dea776d0. Revert "fix: migration item in setting detail omit buttons (#3298)" This reverts commit 709204b2bc9d9ed08e2245cbb084482f5908ab3a. Revert "fix: merge gpu arch and os tensorrt models (#3299)" This reverts commit aa7dbdc9fa701debeee28d9c7eb4af6258685321. Revert "chore: update cortex new version (#3300)" This reverts commit 602097909d38b4874db8b9f19a729c65a0ac9619. Revert "fix: engine logo on model dropdown (#3291)" This reverts commit 8eb8611c28f6c4cdf1ab142a6e18c82bcc4c2073. Revert "fix: icon setting can close and open right panel (#3295)" This reverts commit be31e9315e2df5c483de3f46bd37740d277cfccd. Revert "fix: error while importing local model is not shown (#3294)" This reverts commit 26be941e8426462e1e3a28e5b9bf1f834f462f82. Revert "fix: add lower case quantization support (#3293)" This reverts commit 3135ccc27e894a4056f882cd25f0bf7e10e56f49. Revert "fix: onnx can't be selected in download model modal (#3283)" This reverts commit 2521e1db518e9e01493e89dcc98c181ccd2b48a2. Revert "feat: add chunk count (#3290)" This reverts commit bad481bf05aa38edcf553e1273f5d692a65c9225. Revert "fix: RAM always show 0% (#3287)" This reverts commit 2201e6c5f87538b953503937fe6b135fe1aa2d94. Revert "fix: remote engine should not allow reinit (#3284)" This reverts commit 98abff0da3467c090618233db12a25bfa4c1db69. Revert "chore": update minor UI (#3281)" This reverts commit 105a9aa1a1830648a32ae285f751b4078c8ac2b2. Revert "chore: update z-index tooltip (#3280)" This reverts commit 5a81865508c205ed8c54df209092553a0c40054f. Revert "feat: add nvidia engine (#3279)" This reverts commit 8372f30f0ee99606b123351e7bb62636c62c8b23. Revert "fix: migration wrong directory (#3278)" This reverts commit 7fb1354287677f577070ccb065ed3a5f9e5b9882. Revert "fix: clearer app loading prompt (#3275)" This reverts commit 44a6401000334b79b225ab6fd6afb79f9da4bd51. Revert "fix: allow user to reinit engine from settings page (#3277)" This reverts commit 57cf3c7b3d5bface785763d06813906ba6eab7c9. Revert "feat: enable copy over instructions (#3266)" This reverts commit 2074511067201f0addb9d274cc90d1e782f2bc1d. Revert "chore: toast message on model import fail with reason (#3276)" This reverts commit 3bebdfe67e1571c7414065a36d16eb5941115ee0. Revert "fix: should not let second instance terminate cortex (#3274)" This reverts commit d074a5a445b73ca195a49814a935300f9e895aaa. Revert "chore: remnove focus button (#3272)" This reverts commit 07fa79e71a401becdbc0f474c27b860654a8bd62. Revert "chore: update hub search result (#3273)" This reverts commit 10b4a9087af709d147b34f6c3ee63d2d3b75c77a. Revert "chore: temporary hidden import model (#3270)" This reverts commit db5d8aba454fd4cc1e07253ca4805d4b1b3e7fb2. Revert "fix: set cortex data folder path when starting jan (#3252)" This reverts commit 91c77eda78ecd251d480e58b853fe7b261f6de50. Revert "fix: remote model added manually does not shown in model drop down (#3261)" This reverts commit 224ca3f7cc25b2577ab123829907964b78b78aa8. Revert "feat: add more options for cortex popup (#3236)" This reverts commit 5e06ed8a122aaed9d68fbd04ce42b65bf8987e58. Revert "feat: manage cloud models from threads screen (#3223)" This reverts commit 37a3c4f844419e66cfe3f2a9ff79ba688538241f. Revert "chore: check the legacy incompatible message type (#3248)" This reverts commit c10caf8d7f1f9cf68551e41de5d54cd4450cf44a. Revert "chore: minor copy for grammar (#3235)" This reverts commit f0f23078f31f58e01ba27787d6926f5c1eb2ff0b. Revert "fix: add back normalize message function (#3234)" This reverts commit 83579df3a40ff61eac25975da8295fceaec679dc. Revert "chore: update conditional starter screen after cortex load (#3227)" This reverts commit 4d3a97f1dca9e6c3ea746586e8607541f2d1c0b3. Revert "fix: broken status parse due to empty category (#3233)" This reverts commit 68714eeaf9212a6fdacd5c6a48d8691db9cc99eb. Revert "feat: make scroll area type auto for make default visible scrollbar (#3220)" This reverts commit 13428d60e7d3ea6a24c0df8871ea13e2dec0d5fd. Revert "fix: update new api from cortex to support 0.5.0 (#3221)" This reverts commit ec9b5bf682a8676e132a08075b6ae03cf9e23132. Revert "feat: new starter screen (#3217)" This reverts commit e8ee694abd33b34112d2c7d09f8c03370c2d22cc. Revert "bump-cortex-0.5.0-1 (#3218)" This reverts commit 5369da78f5b83b1c8761cb48820ccf3111728a90. Revert "Deprecate Docker and K8s (#3219)" This reverts commit 7611a05c44982d07465bec57658d5bf965f30ad5. Revert "chore: set container max width for chat message and new hub screen (#3213)" This reverts commit 007daa71616268b0e741e7a890b319401e49a81e. Revert "feat: integrating cortex (#3001)" This reverts commit 101268f6f36df96b62982a9eeb8581ebe103a909.
333 lines
16 KiB
Python
333 lines
16 KiB
Python
from __future__ import annotations
|
|
|
|
from typing import Sequence
|
|
|
|
from .constants import MODEL_ARCH, MODEL_TENSOR, MODEL_TENSORS, TENSOR_NAMES
|
|
|
|
|
|
class TensorNameMap:
|
|
mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
|
# Token embeddings
|
|
MODEL_TENSOR.TOKEN_EMBD: (
|
|
"gpt_neox.embed_in", # gptneox
|
|
"transformer.wte", # gpt2 gpt-j mpt refact qwen
|
|
"transformer.word_embeddings", # falcon
|
|
"word_embeddings", # bloom
|
|
"model.embed_tokens", # llama-hf
|
|
"tok_embeddings", # llama-pth
|
|
"embeddings.word_embeddings", # bert
|
|
"language_model.embedding.word_embeddings", # persimmon
|
|
"wte", # gpt2
|
|
"transformer.embd.wte", # phi2
|
|
"model.tok_embeddings", # internlm2
|
|
),
|
|
|
|
# Token type embeddings
|
|
MODEL_TENSOR.TOKEN_TYPES: (
|
|
"embeddings.token_type_embeddings", # bert
|
|
),
|
|
|
|
# Normalization of token embeddings
|
|
MODEL_TENSOR.TOKEN_EMBD_NORM: (
|
|
"word_embeddings_layernorm", # bloom
|
|
),
|
|
|
|
# Position embeddings
|
|
MODEL_TENSOR.POS_EMBD: (
|
|
"transformer.wpe", # gpt2
|
|
"embeddings.position_embeddings", # bert
|
|
"wpe", # gpt2
|
|
),
|
|
|
|
# Output
|
|
MODEL_TENSOR.OUTPUT: (
|
|
"embed_out", # gptneox
|
|
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen
|
|
"output", # llama-pth bloom internlm2
|
|
"word_embeddings_for_head", # persimmon
|
|
"lm_head.linear", # phi2
|
|
),
|
|
|
|
# Output norm
|
|
MODEL_TENSOR.OUTPUT_NORM: (
|
|
"gpt_neox.final_layer_norm", # gptneox
|
|
"transformer.ln_f", # gpt2 gpt-j falcon
|
|
"model.norm", # llama-hf baichuan internlm2
|
|
"norm", # llama-pth
|
|
"embeddings.LayerNorm", # bert
|
|
"transformer.norm_f", # mpt
|
|
"ln_f", # refact bloom qwen gpt2
|
|
"language_model.encoder.final_layernorm", # persimmon
|
|
"model.final_layernorm", # persimmon
|
|
"lm_head.ln", # phi2
|
|
),
|
|
|
|
# Rope frequencies
|
|
MODEL_TENSOR.ROPE_FREQS: (
|
|
"rope.freqs", # llama-pth
|
|
),
|
|
}
|
|
|
|
block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
|
|
# Attention norm
|
|
MODEL_TENSOR.ATTN_NORM: (
|
|
"gpt_neox.layers.{bid}.input_layernorm", # gptneox
|
|
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen
|
|
"transformer.blocks.{bid}.norm_1", # mpt
|
|
"transformer.h.{bid}.input_layernorm", # falcon7b
|
|
"h.{bid}.input_layernorm", # bloom
|
|
"transformer.h.{bid}.ln_mlp", # falcon40b
|
|
"model.layers.{bid}.input_layernorm", # llama-hf
|
|
"layers.{bid}.attention_norm", # llama-pth
|
|
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
|
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
|
"model.layers.{bid}.ln1", # yi
|
|
"h.{bid}.ln_1", # gpt2
|
|
"transformer.h.{bid}.ln", # phi2
|
|
"model.layers.layers.{bid}.norm", # plamo
|
|
"model.layers.{bid}.attention_norm", # internlm2
|
|
),
|
|
|
|
# Attention norm 2
|
|
MODEL_TENSOR.ATTN_NORM_2: (
|
|
"transformer.h.{bid}.ln_attn", # falcon40b
|
|
),
|
|
|
|
# Attention query-key-value
|
|
MODEL_TENSOR.ATTN_QKV: (
|
|
"gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
|
|
"transformer.h.{bid}.attn.c_attn", # gpt2 qwen
|
|
"transformer.blocks.{bid}.attn.Wqkv", # mpt
|
|
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
|
"h.{bid}.self_attention.query_key_value", # bloom
|
|
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
|
|
"model.layers.{bid}.self_attn.query_key_value", # persimmon
|
|
"h.{bid}.attn.c_attn", # gpt2
|
|
"transformer.h.{bid}.mixer.Wqkv", # phi2
|
|
),
|
|
|
|
# Attention query
|
|
MODEL_TENSOR.ATTN_Q: (
|
|
"model.layers.{bid}.self_attn.q_proj", # llama-hf
|
|
"layers.{bid}.attention.wq", # llama-pth
|
|
"encoder.layer.{bid}.attention.self.query", # bert
|
|
"transformer.h.{bid}.attn.q_proj", # gpt-j
|
|
"model.layers.layers.{bid}.self_attn.q_proj", # plamo
|
|
"model.layers.{bid}.attention.wq" # internlm2
|
|
),
|
|
|
|
# Attention key
|
|
MODEL_TENSOR.ATTN_K: (
|
|
"model.layers.{bid}.self_attn.k_proj", # llama-hf
|
|
"layers.{bid}.attention.wk", # llama-pth
|
|
"encoder.layer.{bid}.attention.self.key", # bert
|
|
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
|
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
|
"model.layers.{bid}.attention.wk" # internlm2
|
|
),
|
|
|
|
# Attention value
|
|
MODEL_TENSOR.ATTN_V: (
|
|
"model.layers.{bid}.self_attn.v_proj", # llama-hf
|
|
"layers.{bid}.attention.wv", # llama-pth
|
|
"encoder.layer.{bid}.attention.self.value", # bert
|
|
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
|
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
|
"model.layers.{bid}.attention.wv" # internlm2
|
|
),
|
|
|
|
# Attention output
|
|
MODEL_TENSOR.ATTN_OUT: (
|
|
"gpt_neox.layers.{bid}.attention.dense", # gptneox
|
|
"transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen
|
|
"transformer.blocks.{bid}.attn.out_proj", # mpt
|
|
"transformer.h.{bid}.self_attention.dense", # falcon
|
|
"h.{bid}.self_attention.dense", # bloom
|
|
"model.layers.{bid}.self_attn.o_proj", # llama-hf
|
|
"layers.{bid}.attention.wo", # llama-pth
|
|
"encoder.layer.{bid}.attention.output.dense", # bert
|
|
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
|
"language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
|
|
"model.layers.{bid}.self_attn.dense", # persimmon
|
|
"h.{bid}.attn.c_proj", # gpt2
|
|
"transformer.h.{bid}.mixer.out_proj", # phi2
|
|
"model.layers.layers.{bid}.self_attn.o_proj", # plamo
|
|
"model.layers.{bid}.attention.wo", # internlm2
|
|
),
|
|
|
|
# Rotary embeddings
|
|
MODEL_TENSOR.ATTN_ROT_EMBD: (
|
|
"model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
|
|
"layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
|
|
"model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
|
|
"transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
|
|
),
|
|
|
|
# Feed-forward norm
|
|
MODEL_TENSOR.FFN_NORM: (
|
|
"gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
|
|
"transformer.h.{bid}.ln_2", # gpt2 refact qwen
|
|
"h.{bid}.post_attention_layernorm", # bloom
|
|
"transformer.blocks.{bid}.norm_2", # mpt
|
|
"model.layers.{bid}.post_attention_layernorm", # llama-hf
|
|
"layers.{bid}.ffn_norm", # llama-pth
|
|
"encoder.layer.{bid}.output.LayerNorm", # bert
|
|
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
|
"model.layers.{bid}.ln2", # yi
|
|
"h.{bid}.ln_2", # gpt2
|
|
"model.layers.{bid}.ffn_norm", # internlm2
|
|
),
|
|
|
|
MODEL_TENSOR.FFN_GATE_INP: (
|
|
"layers.{bid}.feed_forward.gate", # mixtral
|
|
"model.layers.{bid}.block_sparse_moe.gate", # mixtral
|
|
),
|
|
|
|
# Feed-forward up
|
|
MODEL_TENSOR.FFN_UP: (
|
|
"gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
|
|
"transformer.h.{bid}.mlp.c_fc", # gpt2
|
|
"transformer.blocks.{bid}.ffn.up_proj", # mpt
|
|
"transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
|
|
"h.{bid}.mlp.dense_h_to_4h", # bloom
|
|
"model.layers.{bid}.mlp.up_proj", # llama-hf refact
|
|
"layers.{bid}.feed_forward.w3", # llama-pth
|
|
"encoder.layer.{bid}.intermediate.dense", # bert
|
|
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
|
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
|
"model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
|
"transformer.h.{bid}.mlp.w1", # qwen
|
|
"h.{bid}.mlp.c_fc", # gpt2
|
|
"transformer.h.{bid}.mlp.fc1", # phi2
|
|
"model.layers.{bid}.mlp.fc1", # phi2
|
|
"model.layers.layers.{bid}.mlp.up_proj", # plamo
|
|
"model.layers.{bid}.feed_forward.w3", # internlm2
|
|
),
|
|
|
|
MODEL_TENSOR.FFN_UP_EXP: (
|
|
"layers.{bid}.feed_forward.experts.{xid}.w3", # mixtral
|
|
"model.layers.{bid}.block_sparse_moe.experts.{xid}.w3", # mixtral
|
|
),
|
|
|
|
# AWQ-activation gate
|
|
MODEL_TENSOR.FFN_ACT: (
|
|
"transformer.blocks.{bid}.ffn.act", # mpt
|
|
),
|
|
|
|
# Feed-forward gate
|
|
MODEL_TENSOR.FFN_GATE: (
|
|
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact
|
|
"layers.{bid}.feed_forward.w1", # llama-pth
|
|
"transformer.h.{bid}.mlp.w2", # qwen
|
|
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
|
"model.layers.{bid}.feed_forward.w1", # internlm2
|
|
),
|
|
|
|
MODEL_TENSOR.FFN_GATE_EXP: (
|
|
"layers.{bid}.feed_forward.experts.{xid}.w1", # mixtral
|
|
"model.layers.{bid}.block_sparse_moe.experts.{xid}.w1", # mixtral
|
|
),
|
|
|
|
# Feed-forward down
|
|
MODEL_TENSOR.FFN_DOWN: (
|
|
"gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
|
|
"transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen
|
|
"transformer.blocks.{bid}.ffn.down_proj", # mpt
|
|
"transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
|
|
"h.{bid}.mlp.dense_4h_to_h", # bloom
|
|
"model.layers.{bid}.mlp.down_proj", # llama-hf
|
|
"layers.{bid}.feed_forward.w2", # llama-pth
|
|
"encoder.layer.{bid}.output.dense", # bert
|
|
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
|
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
|
"model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
|
"h.{bid}.mlp.c_proj", # gpt2
|
|
"transformer.h.{bid}.mlp.fc2", # phi2
|
|
"model.layers.{bid}.mlp.fc2", # phi2
|
|
"model.layers.layers.{bid}.mlp.down_proj", # plamo
|
|
"model.layers.{bid}.feed_forward.w2", # internlm2
|
|
),
|
|
|
|
MODEL_TENSOR.FFN_DOWN_EXP: (
|
|
"layers.{bid}.feed_forward.experts.{xid}.w2", # mixtral
|
|
"model.layers.{bid}.block_sparse_moe.experts.{xid}.w2", # mixtral
|
|
),
|
|
|
|
MODEL_TENSOR.ATTN_Q_NORM: (
|
|
"language_model.encoder.layers.{bid}.self_attention.q_layernorm",
|
|
"model.layers.{bid}.self_attn.q_layernorm", # persimmon
|
|
),
|
|
|
|
MODEL_TENSOR.ATTN_K_NORM: (
|
|
"language_model.encoder.layers.{bid}.self_attention.k_layernorm",
|
|
"model.layers.{bid}.self_attn.k_layernorm", # persimmon
|
|
),
|
|
|
|
MODEL_TENSOR.ROPE_FREQS: (
|
|
"language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
|
|
),
|
|
}
|
|
|
|
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
|
|
|
def __init__(self, arch: MODEL_ARCH, n_blocks: int):
|
|
self.mapping = {}
|
|
for tensor, keys in self.mappings_cfg.items():
|
|
if tensor not in MODEL_TENSORS[arch]:
|
|
continue
|
|
tensor_name = TENSOR_NAMES[tensor]
|
|
self.mapping[tensor_name] = (tensor, tensor_name)
|
|
for key in keys:
|
|
self.mapping[key] = (tensor, tensor_name)
|
|
for bid in range(n_blocks):
|
|
for tensor, keys in self.block_mappings_cfg.items():
|
|
if tensor not in MODEL_TENSORS[arch]:
|
|
continue
|
|
# TODO: make this configurable
|
|
n_experts = 8
|
|
for xid in range(n_experts):
|
|
tensor_name = TENSOR_NAMES[tensor].format(bid = bid, xid = xid)
|
|
self.mapping[tensor_name] = (tensor, tensor_name)
|
|
for key in keys:
|
|
key = key.format(bid = bid, xid = xid)
|
|
self.mapping[key] = (tensor, tensor_name)
|
|
|
|
def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
|
|
result = self.mapping.get(key)
|
|
if result is not None:
|
|
return result
|
|
for suffix in try_suffixes:
|
|
if key.endswith(suffix):
|
|
result = self.mapping.get(key[:-len(suffix)])
|
|
if result is not None:
|
|
return result[0], result[1] + suffix
|
|
return None
|
|
|
|
def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
|
|
result = self.get_type_and_name(key, try_suffixes = try_suffixes)
|
|
if result is None:
|
|
return None
|
|
return result[1]
|
|
|
|
def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
|
|
result = self.get_type_and_name(key, try_suffixes = try_suffixes)
|
|
if result is None:
|
|
return None
|
|
return result[0]
|
|
|
|
def __getitem__(self, key: str) -> str:
|
|
try:
|
|
return self.mapping[key][1]
|
|
except KeyError:
|
|
raise KeyError(key)
|
|
|
|
def __contains__(self, key: str) -> bool:
|
|
return key in self.mapping
|
|
|
|
def __repr__(self) -> str:
|
|
return repr(self.mapping)
|
|
|
|
|
|
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> TensorNameMap:
|
|
return TensorNameMap(arch, n_blocks)
|