[{"owner":"google","name":"gemma-4-26b-a4b","revisionNumber":1,"createdAt":1775146154775,"updatedAt":1775146157859,"staffPickedAt":1775146457549,"description":"Gemma 4 is the latest on-device model family from Google. This is the 26B (active 4B) MoE version. Supports vision and reasoning.","likeCount":55,"forkCount":2,"downloads":298473,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma4"],"compatibilityTypes":["gguf"],"paramsStrings":["26B"],"minMemoryUsageBytes":17000000000,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/google/gemma-4-26b-a4b"},{"owner":"nvidia","name":"nemotron-3-nano-4b","revisionNumber":1,"createdAt":1773670750415,"updatedAt":1773670752858,"staffPickedAt":1773670963288,"description":"General purpose reasoning and chat model trained by NVIDIA","likeCount":7,"forkCount":2,"downloads":84495,"isPrivate":false,"metadata":{"type":"llm","architectures":["nemotron_h"],"compatibilityTypes":["gguf"],"paramsStrings":["4B"],"minMemoryUsageBytes":4500000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/nvidia/nemotron-3-nano-4b"},{"owner":"nvidia","name":"nemotron-3-super","revisionNumber":1,"createdAt":1773244928989,"updatedAt":1773244931805,"staffPickedAt":1773245044308,"description":"NVIDIA Nemotron 3 Super, a 120B open hybrid MoE model (12B active), supporting up to 1M tokens context window","likeCount":31,"forkCount":1,"downloads":110157,"isPrivate":false,"metadata":{"type":"llm","architectures":["nemotron_h_moe"],"compatibilityTypes":["gguf"],"paramsStrings":["120B"],"minMemoryUsageBytes":83000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[1048576]},"url":"https://lmstudio.ai/models/nvidia/nemotron-3-super"},{"owner":"qwen","name":"qwen3.5-9b","revisionNumber":2,"createdAt":1772470628805,"updatedAt":1772857537663,"staffPickedAt":1772503746959,"description":"Qwen3.5 represents a significant leap forward, integrating breakthroughs in multimodal learning, architectural efficiency, reinforcement learning scale, and global accessibility. This is a 9B parameter dense model, supporting a native context length of 262,144 tokens.","likeCount":61,"forkCount":3,"downloads":1075753,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen35"],"compatibilityTypes":["gguf"],"paramsStrings":["9B"],"minMemoryUsageBytes":7000000000,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3.5-9b"},{"owner":"qwen","name":"qwen3.5-35b-a3b","revisionNumber":1,"createdAt":1771967161403,"updatedAt":1771967164423,"staffPickedAt":1771967254121,"description":"Qwen3.5 is a reasoning vision-language model that supports tool use. With 35B total parameters and 3B activated, it outperforms previous generation models more than 6x its size.","likeCount":50,"forkCount":1,"downloads":647680,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen35moe"],"compatibilityTypes":["gguf"],"paramsStrings":["35B"],"minMemoryUsageBytes":21000000000,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3.5-35b-a3b"},{"owner":"liquid","name":"lfm2-24b-a2b","revisionNumber":1,"createdAt":1771942116236,"updatedAt":1771942118499,"staffPickedAt":1771946503261,"description":"LFM2 is a family of hybrid models designed for on-device deployment. LFM2-24B-A2B is the largest model in the family, a 24B MoE model with only 2B active parameters per token, fitting in 32 GB of RAM for deployment on consumer laptops and desktops.","likeCount":12,"forkCount":0,"downloads":57297,"isPrivate":false,"metadata":{"type":"llm","architectures":["lfm2_moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["24B"],"minMemoryUsageBytes":14000000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/liquid/lfm2-24b-a2b"},{"owner":"minimax","name":"minimax-m2.5","revisionNumber":1,"createdAt":1771020724769,"updatedAt":1771020728278,"staffPickedAt":1771020984143,"description":"MiniMax-M2.5 is an MoE model (230B total) extensively trained with reinforcement learning in hundreds of thousands of real-world environments, delivering SOTA results in coding, agentic tool use, search, and office work.","likeCount":3,"forkCount":0,"downloads":7266,"isPrivate":false,"metadata":{"type":"llm","architectures":["minimax-m2"],"compatibilityTypes":["safetensors","gguf"],"paramsStrings":["230B"],"minMemoryUsageBytes":121000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[196608]},"url":"https://lmstudio.ai/models/minimax/minimax-m2.5"},{"owner":"qwen","name":"qwen3-coder-next","revisionNumber":2,"createdAt":1770136296052,"updatedAt":1770136818279,"staffPickedAt":1770136336735,"description":"Qwen Coder Next is an 80B MoE with 3B active parameters designed for coding agents and local development. Excels at long-horizon reasoning, complex tool usage, and recovery from execution failures.","likeCount":71,"forkCount":2,"downloads":216200,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_next"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["80B"],"minMemoryUsageBytes":42000000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-coder-next"},{"owner":"zai-org","name":"glm-4.7-flash","revisionNumber":5,"createdAt":1768851284222,"updatedAt":1768930994293,"staffPickedAt":1768851770905,"description":"GLM 4.7 Flash is a 30B A3B MoE model form Z.ai. It supports a context length of 128k tokens and achieves strong performance on coding benchmarks among models of similar scale.","likeCount":85,"forkCount":4,"downloads":281543,"isPrivate":false,"metadata":{"type":"llm","architectures":["glm4_moe_lite"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":16000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/zai-org/glm-4.7-flash"},{"owner":"liquid","name":"lfm2.5-1.2b","revisionNumber":2,"createdAt":1767821759966,"updatedAt":1767823197323,"staffPickedAt":1767823537857,"description":"LFM2.5 is a new family of hybrid models designed for on-device deployment. It builds on the LFM2 device-optimized architecture and represents a significant leap forward in building reliable agents on the edge.","likeCount":10,"forkCount":1,"downloads":76862,"isPrivate":false,"metadata":{"type":"llm","architectures":["lfm2"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["1.2B"],"minMemoryUsageBytes":950000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/liquid/lfm2.5-1.2b"},{"owner":"nvidia","name":"nemotron-3-nano","revisionNumber":5,"createdAt":1765808078949,"updatedAt":1765911138908,"staffPickedAt":1765808298275,"description":"General purpose reasoning and chat model trained from scratch by NVIDIA. Contains 30B total parameters with only 3.5B active at a time for low-latency MoE inference","likeCount":55,"forkCount":3,"downloads":150740,"isPrivate":false,"metadata":{"type":"llm","architectures":["nemotron_h_moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":24620000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[1048576]},"url":"https://lmstudio.ai/models/nvidia/nemotron-3-nano"},{"owner":"zai-org","name":"glm-4.6v-flash","revisionNumber":2,"createdAt":1765587973184,"updatedAt":1766178903007,"staffPickedAt":1765590428134,"description":"GLM 4.6V Flash is a 9B vision-language model optimized for local deployment and low-latency applications. It supports a context length of 128k tokens and achieves strong performance in visual understanding among models of similar scale.","likeCount":52,"forkCount":2,"downloads":282118,"isPrivate":false,"metadata":{"type":"llm","architectures":["glm4v"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["9B"],"minMemoryUsageBytes":8000000000,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/zai-org/glm-4.6v-flash"},{"owner":"mistralai","name":"devstral-small-2-2512","revisionNumber":4,"createdAt":1765493797670,"updatedAt":1765636840498,"staffPickedAt":1765494342109,"description":"Second-generation Devstral Small for agentic coding. Built for tool use to explore codebases, edit multiple files, and power software engineering agents with newly added vision support.","likeCount":44,"forkCount":0,"downloads":167946,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral3"],"compatibilityTypes":["safetensors","gguf"],"paramsStrings":["24B"],"minMemoryUsageBytes":16300000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/mistralai/devstral-small-2-2512"},{"owner":"essentialai","name":"rnj-1","revisionNumber":4,"createdAt":1765301346744,"updatedAt":1765328118141,"staffPickedAt":1765327621741,"description":"Rnj-1 is an 8B parameter open-weight, dense model trained from scratch by Essential AI, optimized for code and STEM with capabilities on par with SOTA open-weight models.","likeCount":16,"forkCount":1,"downloads":57501,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3"],"compatibilityTypes":["gguf"],"paramsStrings":["8B"],"minMemoryUsageBytes":5500000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/essentialai/rnj-1"},{"owner":"mistralai","name":"ministral-3-14b-reasoning","revisionNumber":1,"createdAt":1764698611134,"updatedAt":1764698614813,"staffPickedAt":1764701507482,"description":"The reasoning post-trained version of Ministral 3 14B, optimized for complex reasoning tasks.","likeCount":29,"forkCount":0,"downloads":305741,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral3"],"compatibilityTypes":["gguf"],"paramsStrings":["14B"],"minMemoryUsageBytes":9500000000,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/mistralai/ministral-3-14b-reasoning"},{"owner":"mistralai","name":"ministral-3-3b","revisionNumber":1,"createdAt":1764694604545,"updatedAt":1764694608713,"staffPickedAt":1764696790428,"description":"The smallest model in the Ministral 3 family, combining a 3.4B language model with a 0.4B vision encoder for efficient edge deployment.","likeCount":24,"forkCount":1,"downloads":188531,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral3"],"compatibilityTypes":["gguf"],"paramsStrings":["3B"],"minMemoryUsageBytes":2000000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/mistralai/ministral-3-3b"},{"owner":"allenai","name":"olmo-3-32b-think","revisionNumber":3,"createdAt":1763699783066,"updatedAt":1764712685222,"staffPickedAt":1763700050069,"description":"AllenAI's flagship post-trained reasoning model built on Olmo 3-Base","likeCount":12,"forkCount":0,"downloads":35021,"isPrivate":false,"metadata":{"type":"llm","architectures":["olmo3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["32B"],"minMemoryUsageBytes":19000000000,"trainedForToolUse":false,"vision":false,"reasoning":true,"fim":false,"contextLengths":[65536]},"url":"https://lmstudio.ai/models/allenai/olmo-3-32b-think"},{"owner":"allenai","name":"olmocr-2-7b","revisionNumber":1,"createdAt":1763612148779,"updatedAt":1763612153617,"staffPickedAt":1763612337780,"description":"The olmOCR 2 model is a Vision Language Model (VLM) from Allen AI.","likeCount":18,"forkCount":0,"downloads":65020,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen2vl"],"compatibilityTypes":["gguf"],"paramsStrings":["7B"],"minMemoryUsageBytes":4700000000,"trainedForToolUse":false,"vision":true,"reasoning":false,"fim":false,"contextLengths":[128000]},"url":"https://lmstudio.ai/models/allenai/olmocr-2-7b"},{"owner":"minimax","name":"minimax-m2","revisionNumber":1,"createdAt":1762383689473,"updatedAt":1762383694602,"staffPickedAt":1762383824889,"description":"MiniMax M2 is a 230B MoE (10 active) LLM, built for coding and agentic workflows.","likeCount":34,"forkCount":3,"downloads":19464,"isPrivate":false,"metadata":{"type":"llm","architectures":["minimax-m2"],"compatibilityTypes":["safetensors","gguf"],"paramsStrings":["230B"],"minMemoryUsageBytes":121000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[128000]},"url":"https://lmstudio.ai/models/minimax/minimax-m2"},{"owner":"qwen","name":"qwen3-vl-4b","revisionNumber":4,"createdAt":1760473635064,"updatedAt":1762261222835,"staffPickedAt":1760473894838,"description":"The 4B version of Qwen's latest vision-language model. Includes comprehensive upgrades to visual perception, spatial reasoning, and image understanding.","likeCount":27,"forkCount":2,"downloads":246673,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_vl"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["4B"],"minMemoryUsageBytes":3000000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[256000]},"url":"https://lmstudio.ai/models/qwen/qwen3-vl-4b"},{"owner":"qwen","name":"qwen3-vl-8b","revisionNumber":4,"createdAt":1760473641201,"updatedAt":1762261234160,"staffPickedAt":1760473886444,"description":"The 8B version of Qwen's latest vision-language model. Includes comprehensive upgrades to visual perception, spatial reasoning, and image understanding.","likeCount":38,"forkCount":0,"downloads":327008,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_vl"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["8B"],"minMemoryUsageBytes":6000000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[256000]},"url":"https://lmstudio.ai/models/qwen/qwen3-vl-8b"},{"owner":"qwen","name":"qwen3-vl-30b","revisionNumber":5,"createdAt":1760473628950,"updatedAt":1762261203685,"staffPickedAt":1760473860805,"description":"The latest generation vision-language MoE model in the Qwen series with comprehensive upgrades to visual perception, spatial reasoning, and image understanding.","likeCount":26,"forkCount":0,"downloads":173458,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_vl_moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":18400000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[256000]},"url":"https://lmstudio.ai/models/qwen/qwen3-vl-30b"},{"owner":"ibm","name":"granite-4-h-tiny","revisionNumber":2,"createdAt":1759413923498,"updatedAt":1763393580371,"staffPickedAt":1759414036605,"description":"A hybrid MoE model trained for tool use from IBM.","likeCount":24,"forkCount":1,"downloads":63215,"isPrivate":false,"metadata":{"type":"llm","architectures":["granitehybrid"],"compatibilityTypes":["gguf"],"paramsStrings":["7B"],"minMemoryUsageBytes":4541927916,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/ibm/granite-4-h-tiny"},{"owner":"mistralai","name":"magistral-small-2509","revisionNumber":2,"createdAt":1758206112706,"updatedAt":1758207019740,"staffPickedAt":1758206303871,"description":"Reasoning model that supports image input and tools calling. By MistralAI. ","likeCount":35,"forkCount":0,"downloads":97094,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["24B"],"minMemoryUsageBytes":15354508083,"trainedForToolUse":true,"vision":true,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/mistralai/magistral-small-2509"},{"owner":"qwen","name":"qwen3-next-80b","revisionNumber":2,"createdAt":1757979796678,"updatedAt":1764634498906,"staffPickedAt":1757980908493,"description":"Hybrid attention architecture, high-sparsity Mixture-of-Experts 80B model (active 3B). Currently supported for Mac only with MLX.","likeCount":29,"forkCount":1,"downloads":60009,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_next"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["80B"],"minMemoryUsageBytes":42000000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-next-80b"},{"owner":"bytedance","name":"seed-oss-36b","revisionNumber":1,"createdAt":1756408276612,"updatedAt":1756408280062,"staffPickedAt":1756409070746,"description":"Advanced reasoning model from ByteDance with flexible \"thinking budget\" control and ability to reflect on the length of its own reasoning","likeCount":23,"forkCount":0,"downloads":51535,"isPrivate":false,"metadata":{"type":"llm","architectures":["seed_oss"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["36B"],"minMemoryUsageBytes":21000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[524288]},"url":"https://lmstudio.ai/models/bytedance/seed-oss-36b"},{"owner":"nousresearch","name":"hermes-4-70b","revisionNumber":5,"createdAt":1756235816243,"updatedAt":1756236012040,"staffPickedAt":1756236059369,"description":"Hybrid-mode reasoning model based on Llama-3.1-70B by Nous Research","likeCount":15,"forkCount":1,"downloads":27429,"isPrivate":false,"metadata":{"type":"llm","architectures":["llama"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["70B"],"minMemoryUsageBytes":39835821670,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/nousresearch/hermes-4-70b"},{"owner":"qwen","name":"qwen3-4b-thinking-2507","revisionNumber":3,"createdAt":1754495984308,"updatedAt":1758291375527,"staffPickedAt":1754510390000,"description":"Updated thinking version of Qwen3 4B featuring continued scaling of thinking capability, improving both the quality and depth of reasoning","likeCount":56,"forkCount":1,"downloads":206058,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["4B"],"minMemoryUsageBytes":2300000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-4b-thinking-2507"},{"owner":"qwen","name":"qwen3-4b-2507","revisionNumber":3,"createdAt":1754495980403,"updatedAt":1758291370457,"staffPickedAt":1754510386000,"description":"Updated version of Qwen3 4B non-thinking mode featuring significant improvements in general capabilities including instruction following, logical reasoning, text comprehension, mathematics, science, coding and tool usage.","likeCount":30,"forkCount":1,"downloads":120371,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["4B"],"minMemoryUsageBytes":2300000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-4b-2507"},{"owner":"openai","name":"gpt-oss-120b","revisionNumber":3,"createdAt":1754415724272,"updatedAt":1755578015321,"staffPickedAt":1754421401293,"description":"The 120B variant of OpenAI's open source model. Apache 2.0 licensed.","likeCount":79,"forkCount":0,"downloads":190582,"isPrivate":false,"metadata":{"type":"llm","architectures":["gpt-oss"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["120B"],"minMemoryUsageBytes":65000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/openai/gpt-oss-120b"},{"owner":"openai","name":"gpt-oss-20b","revisionNumber":7,"createdAt":1754414649426,"updatedAt":1756503346925,"staffPickedAt":1754415462732,"staffPickedDirectives":"[]","description":"The 20B variant of OpenAI's open source model. Apache 2.0 licensed.","likeCount":247,"forkCount":0,"downloads":1448394,"isPrivate":false,"metadata":{"type":"llm","architectures":["gpt-oss"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["20B"],"minMemoryUsageBytes":12000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/openai/gpt-oss-20b"},{"owner":"qwen","name":"qwen3-coder-30b","revisionNumber":2,"createdAt":1753975287385,"updatedAt":1754147147590,"staffPickedAt":1753975535603,"description":"A powerful 30B MoE coding model from Alibaba Qwen, joining its larger 480B counterpart","likeCount":104,"forkCount":1,"downloads":343455,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":15000000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-coder-30b"},{"owner":"qwen","name":"qwen3-30b-a3b-2507","revisionNumber":1,"createdAt":1753806570735,"updatedAt":1753806573169,"staffPickedAt":1753806634043,"description":"Updated version of Qwen3-30B-A3B featuring significant improvements in general capabilities including instruction following, logical reasoning, text comprehension, mathematics, science, coding and tool usage.","likeCount":41,"forkCount":0,"downloads":105693,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":17400000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-30b-a3b-2507"},{"owner":"qwen","name":"qwen3-coder-480b","revisionNumber":3,"createdAt":1753303753247,"updatedAt":1753975292963,"staffPickedAt":1753304561201,"description":"Qwen's most powerful code model, featuring 480B total parameters with 35B activated through Mixture of Experts (MoE) architecture.","likeCount":33,"forkCount":1,"downloads":18675,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3_moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["480B"],"minMemoryUsageBytes":250000000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-coder-480b"},{"owner":"qwen","name":"qwen3-235b-a22b-2507","revisionNumber":3,"createdAt":1753134432419,"updatedAt":1753303751912,"staffPickedAt":1753140342326,"description":"Updated version of Qwen3-235B-A22B featuring significant improvements in general capabilities including instruction following, logical reasoning, text comprehension, mathematics, science, coding and tool usage.","likeCount":14,"forkCount":0,"downloads":19910,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["235B"],"minMemoryUsageBytes":134300000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[262144]},"url":"https://lmstudio.ai/models/qwen/qwen3-235b-a22b-2507"},{"owner":"liquid","name":"lfm2-1.2b","revisionNumber":1,"createdAt":1752678477325,"updatedAt":1752678480235,"staffPickedAt":1752678903718,"description":"Hybrid architecture model intended for local use, by Liquid AI","likeCount":39,"forkCount":0,"downloads":63066,"isPrivate":false,"metadata":{"type":"llm","architectures":["lfm2"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["1.2B"],"minMemoryUsageBytes":700000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/liquid/lfm2-1.2b"},{"owner":"baidu","name":"ernie-4.5-21b-a3b","revisionNumber":3,"createdAt":1753458873879,"updatedAt":1753719741173,"staffPickedAt":1752159328000,"description":"Medium-size mixture-of-experts model from Baidu's new Ernie 4.5 line of foundation models","likeCount":11,"forkCount":0,"downloads":19080,"isPrivate":false,"metadata":{"type":"llm","architectures":["ernie4_5"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["21B"],"minMemoryUsageBytes":12000000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/baidu/ernie-4.5-21b-a3b"},{"owner":"mistralai","name":"devstral-small-2507","revisionNumber":4,"createdAt":1752155835796,"updatedAt":1752172317196,"staffPickedAt":1752157784182,"description":"Devstral excels at using tools to explore codebases and editing multiple files to power software engineering agents.","likeCount":15,"forkCount":0,"downloads":45445,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["safetensors","gguf"],"paramsStrings":["24B"],"minMemoryUsageBytes":14500000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/mistralai/devstral-small-2507"},{"owner":"google","name":"gemma-3n-e4b","revisionNumber":5,"createdAt":1750955645556,"updatedAt":1751997890567,"staffPickedAt":1752001664749,"staffPickedDirectives":"[]","description":"Gemma 3n is a multimodal generative AI model optimized for use in everyday devices, such as phones, laptops, and tablets.","likeCount":76,"forkCount":1,"downloads":232139,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3n"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["6.9B"],"minMemoryUsageBytes":4240000000,"trainedForToolUse":false,"vision":"mixed","reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/google/gemma-3n-e4b"},{"owner":"mistralai","name":"mistral-small-3.2","revisionNumber":4,"createdAt":1750460298518,"updatedAt":1752701348997,"staffPickedAt":1750461008548,"description":"Update to Mistral Small 3.1 with better instruction following, fewer infinite generation issues, and an improved tone.","likeCount":20,"forkCount":0,"downloads":77829,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["24B"],"minMemoryUsageBytes":14300000000,"trainedForToolUse":false,"vision":true,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/mistralai/mistral-small-3.2"},{"owner":"mistralai","name":"magistral-small","revisionNumber":2,"createdAt":1749564639092,"updatedAt":1749590216186,"staffPickedAt":1749565228069,"description":"MistralAI's first reasoning model, based on Mistral Small 3.1","likeCount":14,"forkCount":0,"downloads":32574,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["23.6B"],"minMemoryUsageBytes":19150000000,"trainedForToolUse":false,"vision":false,"reasoning":true,"fim":false,"contextLengths":[49152]},"url":"https://lmstudio.ai/models/mistralai/magistral-small"},{"owner":"deepseek","name":"deepseek-r1-0528-qwen3-8b","revisionNumber":5,"createdAt":1748527091381,"updatedAt":1748530177680,"staffPickedAt":1748527253578,"description":"Distilled version of the DeepSeek-R1-0528 model, created by continuing the post-training process on the Qwen3 8B Base model using Chain-of-Thought (CoT) from DeepSeek-R1-0528.","likeCount":136,"forkCount":4,"downloads":675638,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["8B"],"minMemoryUsageBytes":4300000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/deepseek/deepseek-r1-0528-qwen3-8b"},{"owner":"mistralai","name":"devstral-small-2505","revisionNumber":2,"createdAt":1747707102714,"updatedAt":1747842773669,"staffPickedAt":1747836708393,"description":"Devstral by MistralAI is based on Mistral Small 3.1. Debuts as the #1 open source model on SWE-bench.","likeCount":23,"forkCount":0,"downloads":31109,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["safetensors","gguf"],"paramsStrings":["23.6B"],"minMemoryUsageBytes":19150000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/mistralai/devstral-small-2505"},{"owner":"microsoft","name":"phi-4-mini-reasoning","revisionNumber":2,"createdAt":1747516828964,"updatedAt":1747517414960,"staffPickedAt":1746066961000,"description":"Lightweight open model from the Phi-4 family","likeCount":18,"forkCount":0,"downloads":86390,"isPrivate":false,"metadata":{"type":"llm","architectures":["phi-4"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["3.8B"],"minMemoryUsageBytes":3000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/microsoft/phi-4-mini-reasoning"},{"owner":"microsoft","name":"phi-4-reasoning-plus","revisionNumber":2,"createdAt":1747516856323,"updatedAt":1747517474542,"staffPickedAt":1746066961000,"description":"Advanced open-weight reasoning model, finetuned from Phi-4 with additional reinforcement learning for higher accuracy","likeCount":20,"forkCount":0,"downloads":71272,"isPrivate":false,"metadata":{"type":"llm","architectures":["phi-4"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["14.7B"],"minMemoryUsageBytes":8000000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/microsoft/phi-4-reasoning-plus"},{"owner":"qwen","name":"qwen3-235b-a22b","revisionNumber":11,"createdAt":1746068788704,"updatedAt":1748119450751,"staffPickedAt":1745886380000,"description":"The 235B parameter (MoE) version of the Qwen3 model family.","likeCount":7,"forkCount":1,"downloads":13290,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["235B"],"minMemoryUsageBytes":134300000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-235b-a22b"},{"owner":"qwen","name":"qwen3-32b","revisionNumber":11,"createdAt":1746068794190,"updatedAt":1748119456058,"staffPickedAt":1745870060000,"description":"The 32B parameter version of the Qwen3 model family.","likeCount":7,"forkCount":0,"downloads":67713,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["32B"],"minMemoryUsageBytes":18700000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-32b"},{"owner":"qwen","name":"qwen3-30b-a3b","revisionNumber":9,"createdAt":1746068791405,"updatedAt":1748119453432,"staffPickedAt":1745861657000,"description":"The 30B parameter (MoE) version of the Qwen3 model family.","likeCount":7,"forkCount":0,"downloads":37587,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3moe"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["30B"],"minMemoryUsageBytes":17400000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-30b-a3b"},{"owner":"qwen","name":"qwen3-1.7b","revisionNumber":8,"createdAt":1746068783317,"updatedAt":1748119445402,"staffPickedAt":1745860113000,"description":"The 1.7B parameter version of the Qwen3 model family.","likeCount":8,"forkCount":0,"downloads":63286,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["2B"],"minMemoryUsageBytes":1100000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-1.7b"},{"owner":"qwen","name":"qwen3-4b","revisionNumber":8,"createdAt":1746068796996,"updatedAt":1748119458713,"staffPickedAt":1745859364000,"staffPickedDirectives":"[{\"type\":\"onboarding\",\"message\":\"\"}]","description":"The 4B parameter version of the Qwen3 model family.","likeCount":8,"forkCount":2,"downloads":64071,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["4B"],"minMemoryUsageBytes":2200000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-4b"},{"owner":"qwen","name":"qwen3-14b","revisionNumber":15,"createdAt":1746068786158,"updatedAt":1748119448181,"staffPickedAt":1745857288000,"description":"The 14B parameter version of the Qwen3 model family.","likeCount":9,"forkCount":0,"downloads":117722,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["14B"],"minMemoryUsageBytes":8400000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-14b"},{"owner":"qwen","name":"qwen3-8b","revisionNumber":9,"createdAt":1746068800040,"updatedAt":1748119461362,"staffPickedAt":1745855836000,"staffPickedDirectives":"[]","description":"The 8B parameter version of the Qwen3 model family.","likeCount":11,"forkCount":0,"downloads":202521,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["8B"],"minMemoryUsageBytes":4600000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwen3-8b"},{"owner":"google","name":"gemma-3-27b","revisionNumber":3,"createdAt":1747167406799,"updatedAt":1747516825533,"staffPickedAt":1741804395000,"description":"State-of-the-art image + text input models from Google, built from the same research and tech used to create the Gemini models","likeCount":58,"forkCount":2,"downloads":263919,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["27B"],"minMemoryUsageBytes":15600000000,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[128000]},"url":"https://lmstudio.ai/models/google/gemma-3-27b"},{"owner":"google","name":"gemma-3-12b","revisionNumber":7,"createdAt":1746848444627,"updatedAt":1751565663247,"staffPickedAt":1741804380000,"staffPickedDirectives":"[]","description":"State-of-the-art image + text input models from Google, built from the same research and tech used to create the Gemini models","likeCount":51,"forkCount":3,"downloads":579311,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["12B"],"minMemoryUsageBytes":10737418240,"trainedForToolUse":true,"vision":true,"reasoning":false,"fim":false,"contextLengths":[4096]},"url":"https://lmstudio.ai/models/google/gemma-3-12b"},{"owner":"google","name":"gemma-3-4b","revisionNumber":4,"createdAt":1747167416277,"updatedAt":1748119362121,"staffPickedAt":1741804361000,"staffPickedDirectives":"[{\"type\":\"onboarding\",\"message\":\"\"}]","description":"State-of-the-art image + text input models from Google, built from the same research and tech used to create the Gemini models","likeCount":52,"forkCount":3,"downloads":1021463,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["4B"],"minMemoryUsageBytes":2400000000,"trainedForToolUse":false,"vision":true,"reasoning":false,"fim":false,"contextLengths":[4096]},"url":"https://lmstudio.ai/models/google/gemma-3-4b"},{"owner":"google","name":"gemma-3-1b","revisionNumber":4,"createdAt":1747167398608,"updatedAt":1747516818140,"staffPickedAt":1741804336000,"description":"Tiny text-only variant of Gemma 3: Google's latest open-weight model family","likeCount":21,"forkCount":1,"downloads":82244,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma3"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["1B"],"minMemoryUsageBytes":754974720,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[]},"url":"https://lmstudio.ai/models/google/gemma-3-1b"},{"owner":"qwen","name":"qwq-32b","revisionNumber":8,"createdAt":1745556607556,"updatedAt":1748119464043,"staffPickedAt":1741203802000,"description":"Reasoning model from the Qwen family, rivaling DeepSeek R1 on benchmarks.","likeCount":7,"forkCount":0,"downloads":37623,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen2"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["32B"],"minMemoryUsageBytes":18700000000,"trainedForToolUse":true,"vision":false,"reasoning":true,"fim":false,"contextLengths":[40960]},"url":"https://lmstudio.ai/models/qwen/qwq-32b"},{"owner":"ibm","name":"granite-3.2-8b","revisionNumber":4,"createdAt":1746848462417,"updatedAt":1748119367222,"staffPickedAt":1740606179000,"description":"A small and capable LLM from IBM","likeCount":5,"forkCount":0,"downloads":17194,"isPrivate":false,"metadata":{"type":"llm","architectures":["granite"],"compatibilityTypes":["gguf"],"paramsStrings":["8B"],"minMemoryUsageBytes":4600000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/ibm/granite-3.2-8b"},{"owner":"qwen","name":"qwen2.5-vl-7b","revisionNumber":2,"createdAt":1747516937011,"updatedAt":1747517631517,"staffPickedAt":1740520183000,"description":"a 7B Vision Language Model (VLM) from the Qwen2.5 family","likeCount":18,"forkCount":2,"downloads":140472,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen2vl"],"compatibilityTypes":["gguf"],"paramsStrings":["7B"],"minMemoryUsageBytes":5368709120,"trainedForToolUse":false,"vision":true,"reasoning":false,"fim":false,"contextLengths":[128000]},"url":"https://lmstudio.ai/models/qwen/qwen2.5-vl-7b"},{"owner":"microsoft","name":"phi-4","revisionNumber":5,"createdAt":1746019242377,"updatedAt":1748119392746,"staffPickedAt":1736350723000,"description":"The latest in the Phi model series: suitable for chats with a context of up to 16K tokens","likeCount":9,"forkCount":0,"downloads":30964,"isPrivate":false,"metadata":{"type":"llm","architectures":["phi"],"compatibilityTypes":["gguf"],"paramsStrings":["14B"],"minMemoryUsageBytes":8300000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[16384]},"url":"https://lmstudio.ai/models/microsoft/phi-4"},{"owner":"ibm","name":"granite-3.1-8b","revisionNumber":4,"createdAt":1746848454459,"updatedAt":1748119364706,"staffPickedAt":1734534666000,"description":"Dense LLM from IBM supporting up to 128K context length, trained on 12T tokens. Suitable for general instructions following and can be used to build AI assistants","likeCount":5,"forkCount":0,"downloads":12532,"isPrivate":false,"metadata":{"type":"llm","architectures":["granite"],"compatibilityTypes":["gguf"],"paramsStrings":["8B"],"minMemoryUsageBytes":4600000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/ibm/granite-3.1-8b"},{"owner":"meta","name":"llama-3.3-70b","revisionNumber":4,"createdAt":1746848470383,"updatedAt":1748119377603,"staffPickedAt":1733507881000,"description":"Meta's latest Llama 70B model, matches the performance of Llama 3.2 405B","likeCount":19,"forkCount":0,"downloads":57638,"isPrivate":false,"metadata":{"type":"llm","architectures":["llama"],"compatibilityTypes":["gguf"],"paramsStrings":["70B"],"minMemoryUsageBytes":40300000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/meta/llama-3.3-70b"},{"owner":"qwen","name":"qwen2.5-coder-32b","revisionNumber":6,"createdAt":1746019271370,"updatedAt":1748119440132,"staffPickedAt":1731259197000,"description":"32B version of the code-specific Qwen 2.5 for code generation, code reasoning and code fixing.","likeCount":6,"forkCount":0,"downloads":34611,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen2"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["32B"],"minMemoryUsageBytes":18700000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/qwen/qwen2.5-coder-32b"},{"owner":"qwen","name":"qwen2.5-coder-14b","revisionNumber":5,"createdAt":1746848478064,"updatedAt":1748119437414,"staffPickedAt":1731259197000,"description":"14B version of the code-specific Qwen 2.5 for code generation, code reasoning and code fixing.","likeCount":13,"forkCount":1,"downloads":109304,"isPrivate":false,"metadata":{"type":"llm","architectures":["qwen2"],"compatibilityTypes":["gguf","safetensors"],"paramsStrings":["14B"],"minMemoryUsageBytes":8400000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/qwen/qwen2.5-coder-14b"},{"owner":"mistralai","name":"mistral-nemo-instruct-2407","revisionNumber":5,"createdAt":1746019227997,"updatedAt":1748119408074,"staffPickedAt":1721340348000,"description":"A slightly larger 12B parameter model from Mistral AI, NeMo offers a long 128k token context length, advanced world knowledge, and function calling for developers.","likeCount":7,"forkCount":0,"downloads":41988,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf"],"paramsStrings":["12B"],"minMemoryUsageBytes":6900000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[131072]},"url":"https://lmstudio.ai/models/mistralai/mistral-nemo-instruct-2407"},{"owner":"google","name":"gemma-2-9b","revisionNumber":5,"createdAt":1746848436133,"updatedAt":1748119356771,"staffPickedAt":1721151367000,"description":"The mid-sized option of the Gemma 2 model family. Built by Google, using from the same research and technology used to create the Gemini models","likeCount":3,"forkCount":0,"downloads":26909,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma2"],"compatibilityTypes":["gguf"],"paramsStrings":["9B"],"minMemoryUsageBytes":5200000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[8192]},"url":"https://lmstudio.ai/models/google/gemma-2-9b"},{"owner":"google","name":"gemma-2-27b","revisionNumber":5,"createdAt":1746848418362,"updatedAt":1748119351161,"staffPickedAt":1719945813000,"description":"The large option of the Gemma 2 model family. Built by Google, using from the same research and technology used to create the Gemini models","likeCount":3,"forkCount":1,"downloads":13486,"isPrivate":false,"metadata":{"type":"llm","architectures":["gemma2"],"compatibilityTypes":["gguf"],"paramsStrings":["27B"],"minMemoryUsageBytes":15500000000,"trainedForToolUse":true,"vision":false,"reasoning":false,"fim":false,"contextLengths":[8192]},"url":"https://lmstudio.ai/models/google/gemma-2-27b"},{"owner":"mistralai","name":"codestral-22b-v0.1","revisionNumber":7,"createdAt":1746018926653,"updatedAt":1748119397779,"staffPickedAt":1719871548000,"description":"Mistral AI's latest coding model, Codestral can handle both instructions and code completions with ease in over 80 programming languages.","likeCount":28,"forkCount":0,"downloads":50124,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf"],"paramsStrings":["22B"],"minMemoryUsageBytes":12700000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/mistralai/codestral-22b-v0.1"},{"owner":"mistralai","name":"mistral-7b-instruct-v0.3","revisionNumber":5,"createdAt":1746019225157,"updatedAt":1748119403008,"staffPickedAt":1716415548000,"description":"One of the most popular open-source LLMs, Mistral's 7B Instruct model's balance of speed, size, and performance makes it a great general-purpose daily driver. ","likeCount":44,"forkCount":1,"downloads":122792,"isPrivate":false,"metadata":{"type":"llm","architectures":["mistral"],"compatibilityTypes":["gguf"],"paramsStrings":["7B"],"minMemoryUsageBytes":4100000000,"trainedForToolUse":false,"vision":false,"reasoning":false,"fim":false,"contextLengths":[32768]},"url":"https://lmstudio.ai/models/mistralai/mistral-7b-instruct-v0.3"}]