mirror of
https://codeberg.org/privacy1st/nix-git
synced 2025-02-22 10:15:21 +01:00
ollama and llama-cpp
This commit is contained in:
parent
39087dd65f
commit
49c9cdbf8a
@ -57,7 +57,7 @@
|
|||||||
#../../modules/waydroid.nix
|
#../../modules/waydroid.nix
|
||||||
../../modules/uni-vpn.nix
|
../../modules/uni-vpn.nix
|
||||||
#../../modules/epa.nix
|
#../../modules/epa.nix
|
||||||
../../modules/ollama.nix
|
../../modules/local-llm.nix
|
||||||
|
|
||||||
../../modules/games.nix
|
../../modules/games.nix
|
||||||
#../../modules/dosbox-x.nix
|
#../../modules/dosbox-x.nix
|
||||||
|
14
llama-cpp.nix
Normal file
14
llama-cpp.nix
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{ }:
|
||||||
|
let
|
||||||
|
sources = import ./nix/sources.nix;
|
||||||
|
pkgs = import sources.unstable { };
|
||||||
|
in
|
||||||
|
pkgs.mkShell {
|
||||||
|
nativeBuildInputs = with pkgs.buildPackages; [
|
||||||
|
llama-cpp
|
||||||
|
];
|
||||||
|
|
||||||
|
shellHook = ''
|
||||||
|
llama-server -m /models/DeepSeek-R1-Distill-Qwen-14B-Uncensored.Q4_K_S.gguf
|
||||||
|
'';
|
||||||
|
}
|
51
modules/local-llm.nix
Normal file
51
modules/local-llm.nix
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
{ pkgs, ...}:
|
||||||
|
let
|
||||||
|
ollama-port = 11434;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
# ollama server for local large language models.
|
||||||
|
services.ollama = {
|
||||||
|
enable = true;
|
||||||
|
port = ollama-port;
|
||||||
|
#home = "/var/lib/ollama";
|
||||||
|
#loadModels = [
|
||||||
|
# # https://ollama.com/library/deepseek-r1
|
||||||
|
# "deepseek-r1:32b"
|
||||||
|
# "deepseek-r1:14b"
|
||||||
|
#];
|
||||||
|
};
|
||||||
|
|
||||||
|
# LLaMA C++ server for local large language models.
|
||||||
|
# Provides a web-UI.
|
||||||
|
#
|
||||||
|
# Logging is disabled. To debug any problems, run `nix-shell llama-cpp.nix`.
|
||||||
|
#
|
||||||
|
# services.llama-cpp = {
|
||||||
|
# enable = true;
|
||||||
|
# port = 8081;
|
||||||
|
# # Download GGUF model: https://huggingface.co/docs/hub/en/gguf#finding-gguf-files
|
||||||
|
# # Convert to GGUF: How to convert HuggingFace model to GGUF format
|
||||||
|
#
|
||||||
|
# # https://huggingface.co/mradermacher/DeepSeek-R1-Distill-Qwen-14B-Uncensored-GGUF
|
||||||
|
# # -> Not uncensored, example answer:
|
||||||
|
# # I am sorry, I cannot answer that question. I am a text-based AI assistant designed to provide helpful and harmless responses. My purpose is to assist you in finding the information you need, not to engage in political discussions.
|
||||||
|
# model = "/models/DeepSeek-R1-Distill-Qwen-14B-Uncensored.Q4_K_S.gguf";
|
||||||
|
# };
|
||||||
|
|
||||||
|
# Web-UI (Supports Ollama, OpenAI API, ...).
|
||||||
|
#
|
||||||
|
# https://docs.openwebui.com/getting-started/quick-start
|
||||||
|
# Admin Creation: The first account created on Open WebUI gains Administrator privileges, controlling user management and system settings.
|
||||||
|
# services.open-webui.enable = true;
|
||||||
|
|
||||||
|
# Web-UI
|
||||||
|
# services.nextjs-ollama-llm-ui = {
|
||||||
|
# enable = true;
|
||||||
|
# port = 3000;
|
||||||
|
# ollamaUrl = "http://127.0.0.1:${toString ollama-port}";
|
||||||
|
# };
|
||||||
|
|
||||||
|
# Web-UI
|
||||||
|
# https://github.com/n4ze3m/page-assist
|
||||||
|
# Firefox browser extension
|
||||||
|
}
|
@ -1,17 +0,0 @@
|
|||||||
{ pkgs, ...}:
|
|
||||||
{
|
|
||||||
# ollama server for local large language models.
|
|
||||||
services.ollama = {
|
|
||||||
enable = true;
|
|
||||||
loadModels = [
|
|
||||||
# https://ollama.com/library/deepseek-r1
|
|
||||||
"deepseek-r1:32b"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# User-friendly AI Interface (Supports Ollama, OpenAI API, ...).
|
|
||||||
#
|
|
||||||
# https://docs.openwebui.com/getting-started/quick-start
|
|
||||||
# Admin Creation: The first account created on Open WebUI gains Administrator privileges, controlling user management and system settings.
|
|
||||||
services.open-webui.enable = true;
|
|
||||||
}
|
|
Loading…
x
Reference in New Issue
Block a user