[ { "sources": [ { "filename": "config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/config.json" }, { "filename": "mistral_float16_tp1_rank0.engine", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine" }, { "filename": "tokenizer.model", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer.model" }, { "filename": "special_tokens_map.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/special_tokens_map.json" }, { "filename": "tokenizer.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer.json" }, { "filename": "tokenizer_config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/tokenizer_config.json" }, { "filename": "model.cache", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/LlamaCorn-1.1B-Chat-fp16/model.cache" } ], "id": "llamacorn-1.1b-chat-fp16", "object": "model", "name": "LlamaCorn 1.1B Chat FP16", "version": "1.0", "description": "LlamaCorn is a refined version of TinyLlama-1.1B, optimized for conversational quality, running on consumer devices through TensorRT-LLM", "format": "TensorRT-LLM", "settings": { "ctx_len": 2048, "text_model": false }, "parameters": { "max_tokens": 4096 }, "metadata": { "author": "LLama", "tags": ["TensorRT-LLM", "1B", "Finetuned"], "size": 2151000000 }, "engine": "nitro-tensorrt-llm" }, { "sources": [ { "filename": "config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/config.json" }, { "filename": "mistral_float16_tp1_rank0.engine", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/mistral_float16_tp1_rank0.engine" }, { "filename": "tokenizer.model", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.model" }, { "filename": "special_tokens_map.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/special_tokens_map.json" }, { "filename": "tokenizer.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer.json" }, { "filename": "tokenizer_config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/tokenizer_config.json" }, { "filename": "model.cache", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/TinyJensen-1.1B-Chat-fp16/model.cache" } ], "id": "tinyjensen-1.1b-chat-fp16", "object": "model", "name": "TinyJensen 1.1B Chat FP16", "version": "1.0", "description": "Do you want to chat with Jensen Huan? Here you are", "format": "TensorRT-LLM", "settings": { "ctx_len": 2048, "text_model": false }, "parameters": { "max_tokens": 4096 }, "metadata": { "author": "LLama", "tags": ["TensorRT-LLM", "1B", "Finetuned"], "size": 2151000000 }, "engine": "nitro-tensorrt-llm" }, { "sources": [ { "filename": "config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/config.json" }, { "filename": "mistral_float16_tp1_rank0.engine", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/mistral_float16_tp1_rank0.engine" }, { "filename": "tokenizer.model", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer.model" }, { "filename": "special_tokens_map.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/special_tokens_map.json" }, { "filename": "tokenizer.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer.json" }, { "filename": "tokenizer_config.json", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/tokenizer_config.json" }, { "filename": "model.cache", "url": "https://catalog.jan.ai/dist/models///tensorrt-llm-v0.7.1/Mistral-7B-Instruct-v0.1-int4/model.cache" } ], "id": "mistral-7b-instruct-int4", "object": "model", "name": "Mistral 7B Instruct v0.1 INT4", "version": "1.0", "description": "Mistral 7B Instruct v0.1 INT4", "format": "TensorRT-LLM", "settings": { "ctx_len": 2048, "text_model": false, "prompt_template": "[INST] {prompt} [/INST]" }, "parameters": { "max_tokens": 4096 }, "metadata": { "author": "MistralAI", "tags": ["TensorRT-LLM", "7B", "Finetuned"], "size": 3840000000 }, "engine": "nitro-tensorrt-llm" } ]