From 168db055b18f537202e301785e957fd5cba97901 Mon Sep 17 00:00:00 2001 From: mudler <2420543+mudler@users.noreply.github.com> Date: Sat, 25 Apr 2026 15:21:07 +0000 Subject: [PATCH] chore(model gallery): :robot: add new models via gallery agent Signed-off-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- gallery/index.yaml | 66 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/gallery/index.yaml b/gallery/index.yaml index 380bc83e8228..e29a25690e59 100644 --- a/gallery/index.yaml +++ b/gallery/index.yaml @@ -1,4 +1,70 @@ --- +- name: "qwen3.6-35b-a3b-uncensored-wasserstein" + url: "github:mudler/LocalAI/gallery/virtual.yaml@master" + urls: + - https://huggingface.co/LuffyTheFox/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF + description: | + # Qwen3.6-35B-A3B-Uncensored-HauhauCS-Aggressive + + > **Join the Discord** for updates, roadmaps, projects, or just to chat. + + Qwen3.6-35B-A3B uncensored by HauhauCS. **0/465 Refusals.** + + > **HuggingFace's "Hardware Compatibility" widget doesn't recognize K_P quants** — it may show fewer files than actually exist. Click **"View +X variants"** or go to **Files and versions** to see all available downloads. + + ## About + + No changes to datasets or capabilities. Fully functional, 100% of what the original authors intended - just without the refusals. + + These are meant to be the best lossless uncensored models out there. + + ## Aggressive Variant + + Stronger uncensoring — model is fully unlocked and won't refuse prompts. May occasionally append short disclaimers (baked into base model training, not refusals) but full content is always generated. + + For a more conservative uncensor that keeps some safety guardrails, check the Balanced variant when it's available. + + ## Downloads + + All quants generated with importance matrix (imatrix) for optimal quality preservation on abliterated weights. + + ## What are K_P quants? + + ... + license: "apache-2.0" + tags: + - llm + - gguf + - vision + - multimodal + overrides: + backend: llama-cpp + function: + automatic_tool_parsing_fallback: true + grammar: + disable: true + known_usecases: + - chat + mmproj: llama-cpp/mmproj/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/mmproj-Qwen3.6-35B-A3B-Uncensored.f16.gguf + options: + - use_jinja:true + parameters: + min_p: 0 + model: llama-cpp/models/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/Qwen3.6-35B-A3B-Uncensored.Q8_K_P.gguf + presence_penalty: 1.5 + repeat_penalty: 1 + temperature: 0.7 + top_k: 20 + top_p: 0.8 + template: + use_tokenizer_template: true + files: + - filename: llama-cpp/models/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/Qwen3.6-35B-A3B-Uncensored.Q8_K_P.gguf + sha256: 4f8c5b468d456bae2f53ae655591c3fd8901ea41cb9b7050606bd216a6e05992 + uri: https://huggingface.co/LuffyTheFox/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/resolve/main/Qwen3.6-35B-A3B-Uncensored.Q8_K_P.gguf + - filename: llama-cpp/mmproj/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/mmproj-Qwen3.6-35B-A3B-Uncensored.f16.gguf + sha256: c8e702344a81f8c226a914aa980ed6e1f604bce9374f1fed8e65c896908af414 + uri: https://huggingface.co/LuffyTheFox/Qwen3.6-35B-A3B-Uncensored-Wasserstein-GGUF/resolve/main/mmproj-Qwen3.6-35B-A3B-Uncensored.f16.gguf - name: "kimi-k2.6" url: "github:mudler/LocalAI/gallery/virtual.yaml@master" urls: