llama-cpp: maybe use vulkan?
All checks were successful
Build and Deploy / deploy (push) Successful in 8m30s

This commit is contained in:
2026-04-06 02:12:46 -04:00
parent 3e46c5bfa5
commit 0a927ea893
3 changed files with 11 additions and 10 deletions

12
flake.lock generated
View File

@@ -325,16 +325,16 @@
]
},
"locked": {
"lastModified": 1775236905,
"narHash": "sha256-tHshzR/k6D/r5UhJCfJ9b/mJgsbn7ODtnZrDlimhOOI=",
"owner": "TheTom",
"lastModified": 1774922513,
"narHash": "sha256-TKk1i8AZzxy4/z0MkqKxoGf/CQDvoL+jo8JDtZeCRy8=",
"owner": "apollosenvy",
"repo": "llama-cpp-turboquant",
"rev": "bc05a6803e48f17e0f2c7a99fce9b50d03882de7",
"rev": "9e80e93ceb115bc5055997c373d8c09bfa47a565",
"type": "github"
},
"original": {
"owner": "TheTom",
"ref": "feature/turboquant-kv-cache",
"owner": "apollosenvy",
"ref": "pr/vulkan-turbo3",
"repo": "llama-cpp-turboquant",
"type": "github"
}