diff --git a/jan-v1-4b/Readme.md b/jan-v1-4b/Readme.md new file mode 100644 index 0000000..33e629f --- /dev/null +++ b/jan-v1-4b/Readme.md @@ -0,0 +1,31 @@ +# Jan-v1-4B Node + +This configuration sets up a GaiaNet node to run the `Jan-v1-4B` model, a versatile and efficient model suitable for a wide range of conversational tasks. + +## Step 1: Install GaiaNet node + +The GaiaNet node software version should be 0.5.4 or higher. + +```bash +curl -sSfL 'https://github.com/GaiaNet-AI/gaianet-node/releases/latest/download/install.sh' | bash +``` + +## Step 2: Init with the Jan-v1-4B model + +```bash +gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/jan-v1-4b/config.json +``` + +## Step 3: Start the node + +```bash +gaianet start +``` + +Now you can use the node as a web-based chatbot or as an OpenAI API drop-in replacement. + +## References + +* **Official Base Model:** [janhq/jan-v1-4b](https://huggingface.co/janhq/jan-v1-4b) +* **GGUF Formatted Model:** [Tobivictor/Jan-v1-4B-GGUF](https://huggingface.co/Tobivictor/Jan-v1-4B-GGUF) +* **GaiaNet Node Quick Start Guide:** https://github.com/GaiaNet-AI/gaianet-node \ No newline at end of file diff --git a/jan-v1-4b/config.json b/jan-v1-4b/config.json new file mode 100644 index 0000000..e271f2b --- /dev/null +++ b/jan-v1-4b/config.json @@ -0,0 +1,28 @@ +{ + "address": "", + "chat": "https://huggingface.co/Tobivictor/Jan-v1-4B-GGUF/resolve/main/Jan-v1-4B-Q4_K_M.gguf", + "chat_batch_size": "128", + "chat_ctx_size": "8192", + "chat_name": "Jan-v1-4B", + "chat_ubatch_size": "128", + "context_window": "1", + "description": "GaiaNet node config with Jan-v1-4B model for local brunch recommendations and general chat.", + "domain": "gaia.domains", + "embedding": "https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf", + "embedding_batch_size": "8192", + "embedding_collection_name": "default", + "embedding_ctx_size": "8192", + "embedding_name": "Nomic-embed-text-v1.5", + "embedding_ubatch_size": "8192", + "llamaedge_chat_port": "9068", + "llamaedge_embedding_port": "9069", + "llamaedge_port": "8080", + "prompt_template": "chatml", + "qdrant_limit": "1", + "qdrant_score_threshold": "0.5", + "rag_policy": "system-message", + "rag_prompt": "Use the following information to answer the question.\n----------------\n", + "reverse_prompt": "", + "snapshot": "", + "system_prompt": "You are a helpful AI assistant. Please answer questions as clearly and concisely as possible." +} \ No newline at end of file diff --git a/qwen2.5-omni-7b/Readme.md b/qwen2.5-omni-7b/Readme.md new file mode 100644 index 0000000..880e6a7 --- /dev/null +++ b/qwen2.5-omni-7b/Readme.md @@ -0,0 +1,31 @@ +# Qwen2.5-Omni-7B Node + +This configuration sets up a GaiaNet node to run the `Qwen2.5-Omni-7B` model, a powerful and versatile language model suitable for general chat and specialized tasks. + +## Step 1: Install GaiaNet node + +The GaiaNet node software version should be 0.5.4 or higher. + +```bash +curl -sSfL 'https://github.com/GaiaNet-AI/gaianet-node/releases/latest/download/install.sh' | bash +``` + +## Step 2: Init with the Qwen2.5-Omni-7B model + +```bash +gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/qwen2.5-omni-7b/config.json +``` + +## Step 3: Start the node + +```bash +gaianet start +``` + +Now you can use the node as a web-based chatbot or as an OpenAI API drop-in replacement. + +## References + +* **Official Base Model:** [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) +* **GGUF Formatted Model:** [Tobivictor/Qwen2.5-Omni-7B-GGUF](https://huggingface.co/Tobivictor/Qwen2.5-Omni-7B-GGUF) +* **GaiaNet Node Quick Start Guide:** https://github.com/GaiaNet-AI/gaianet-node \ No newline at end of file diff --git a/qwen2.5-omni-7b/config.json b/qwen2.5-omni-7b/config.json new file mode 100644 index 0000000..0b616e9 --- /dev/null +++ b/qwen2.5-omni-7b/config.json @@ -0,0 +1,28 @@ +{ + "address": "", + "chat": "https://huggingface.co/Tobivictor/Qwen2.5-Omni-7B-GGUF/resolve/main/Qwen2.5-Omni-7B-Q4_K_M.gguf", + "chat_batch_size": "128", + "chat_ctx_size": "8192", + "chat_name": "Qwen2.5-Omni-7B", + "chat_ubatch_size": "128", + "context_window": "1", + "description": "GaiaNet node config with Qwen2.5-Omni-7B model for local brunch recommendations and general chat.", + "domain": "gaia.domains", + "embedding": "https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf", + "embedding_batch_size": "8192", + "embedding_collection_name": "default", + "embedding_ctx_size": "8192", + "embedding_name": "Nomic-embed-text-v1.5", + "embedding_ubatch_size": "8192", + "llamaedge_chat_port": "9068", + "llamaedge_embedding_port": "9069", + "llamaedge_port": "8080", + "prompt_template": "chatml", + "qdrant_limit": "1", + "qdrant_score_threshold": "0.5", + "rag_policy": "system-message", + "rag_prompt": "Use the following information to answer the question.\n----------------\n", + "reverse_prompt": "", + "snapshot": "", + "system_prompt": "You are a helpful AI assistant. Please answer questions as clearly and concisely as possible." +} \ No newline at end of file diff --git a/qwen3-coder-30b/Readme.md b/qwen3-coder-30b/Readme.md new file mode 100644 index 0000000..4301ef6 --- /dev/null +++ b/qwen3-coder-30b/Readme.md @@ -0,0 +1,31 @@ +# Qwen3-Coder-30B-A3B-Instruct Node + +This configuration sets up a GaiaNet node to run `Qwen3-Coder-30B-A3B-Instruct`, a powerful model from the Qwen3 family specifically fine-tuned for coding tasks and general conversation. + +## Step 1: Install GaiaNet node + +The GaiaNet node software version should be 0.5.4 or higher. + +```bash +curl -sSfL 'https://github.com/GaiaNet-AI/gaianet-node/releases/latest/download/install.sh' | bash +``` + +## Step 2: Init with the Qwen3-Coder-30B-A3B-Instruct model + +```bash +gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/qwen3-coder-30b/config.json +``` + +## Step 3: Start the node + +```bash +gaianet start +``` + +Now you can use the node as a web-based chatbot or as an OpenAI API drop-in replacement for your development tasks. + +## References + +* **Official Base Model:** [Qwen/Qwen3-Coder-30B-A3B-Instruct](https://huggingface.co/Qwen/Qwen3-Coder-30B-A3B-Instruct) +* **GGUF Formatted Model:** [Tobivictor/Qwen3-Coder-30B-A3B-Instruct-GGUF](https://huggingface.co/Tobivictor/Qwen3-Coder-30B-A3B-Instruct-GGUF) +* **GaiaNet Node Quick Start Guide:** [https://github.com/GaiaNet-AI/gaianet-node](https://github.com/GaiaNet-AI/gaianet-node) \ No newline at end of file diff --git a/qwen3-coder-30b/config.json b/qwen3-coder-30b/config.json new file mode 100644 index 0000000..944a094 --- /dev/null +++ b/qwen3-coder-30b/config.json @@ -0,0 +1,28 @@ +{ + "address": "", + "chat": "https://huggingface.co/Tobivictor/Qwen3-Coder-30B-A3B-Instruct-GGUF/resolve/main/Qwen3-Coder-30B-A3B-Instruct-Q2_K.gguf", + "chat_batch_size": "128", + "chat_ctx_size": "8192", + "chat_name": "Qwen3-Coder-30B-A3B-Instruct", + "chat_ubatch_size": "128", + "context_window": "1", + "description": "GaiaNet node config with Qwen3-Coder-30B-A3B-Instruct model for local brunch recommendations and general chat.", + "domain": "gaia.domains", + "embedding": "https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf", + "embedding_batch_size": "8192", + "embedding_collection_name": "default", + "embedding_ctx_size": "8192", + "embedding_name": "Nomic-embed-text-v1.5", + "embedding_ubatch_size": "8192", + "llamaedge_chat_port": "9068", + "llamaedge_embedding_port": "9069", + "llamaedge_port": "8080", + "prompt_template": "chatml", + "qdrant_limit": "1", + "qdrant_score_threshold": "0.5", + "rag_policy": "system-message", + "rag_prompt": "Use the following information to answer the question.\n----------------\n", + "reverse_prompt": "", + "snapshot": "", + "system_prompt": "You are a helpful AI assistant. Please answer questions as clearly and concisely as possible." +} \ No newline at end of file diff --git a/silly-v0.2/Readme.md b/silly-v0.2/Readme.md new file mode 100644 index 0000000..392b850 --- /dev/null +++ b/silly-v0.2/Readme.md @@ -0,0 +1,31 @@ +# silly-v0.2 Node + +This configuration sets up a GaiaNet node to run `silly-v0.2`, a highly capable fine-tuned model designed for creative, conversational, and role-playing interactions. + +## Step 1: Install GaiaNet node + +The GaiaNet node software version should be 0.5.4 or higher. + +```bash +curl -sSfL 'https://github.com/GaiaNet-AI/gaianet-node/releases/latest/download/install.sh' | bash +``` + +## Step 2: Init with the silly-v0.2 model + +```bash +gaianet init --config https://raw.githubusercontent.com/GaiaNet-AI/node-configs/main/silly-v0.2/config.json +``` + +## Step 3: Start the node + +```bash +gaianet start +``` + +Now you can use the node as a web-based chatbot or as an OpenAI API drop-in replacement. + +## References + +* **Official Base Model:** [wave-on-discord/silly-v0.2](https://huggingface.co/wave-on-discord/silly-v0.2) +* **GGUF Formatted Model:** [Tobivictor/silly-v0.2-GGUF](https://huggingface.co/Tobivictor/silly-v0.2-GGUF) +* **GaiaNet Node Quick Start Guide:** https://github.com/GaiaNet-AI/gaianet-node \ No newline at end of file diff --git a/silly-v0.2/config.json b/silly-v0.2/config.json new file mode 100644 index 0000000..06e8cf0 --- /dev/null +++ b/silly-v0.2/config.json @@ -0,0 +1,28 @@ +{ + "address": "0x3d221e1350edabdf023148a1be678617e148f35c", + "chat": "https://huggingface.co/Tobivictor/silly-v0.2-GGUF/resolve/main/silly-v0.2-Q4_K_M.gguf", + "chat_batch_size": "128", + "chat_ctx_size": "8192", + "chat_name": "silly-v0.2", + "chat_ubatch_size": "128", + "context_window": "1", + "description": "GaiaNet node config with silly-v0.2 model, for creative and conversational tasks.", + "domain": "gaia.domains", + "embedding": "https://huggingface.co/gaianet/Nomic-embed-text-v1.5-Embedding-GGUF/resolve/main/nomic-embed-text-v1.5.f16.gguf", + "embedding_batch_size": "8192", + "embedding_collection_name": "default", + "embedding_ctx_size": "8192", + "embedding_name": "Nomic-embed-text-v1.5", + "embedding_ubatch_size": "8192", + "llamaedge_chat_port": "9068", + "llamaedge_embedding_port": "9069", + "llamaedge_port": "8080", + "prompt_template": "chatml", + "qdrant_limit": "1", + "qdrant_score_threshold": "0.5", + "rag_policy": "system-message", + "rag_prompt": "Use the following information to answer the question.\n----------------\n", + "reverse_prompt": "", + "snapshot": "", + "system_prompt": "You are a helpful AI assistant. Please answer questions as clearly and concisely as possible." +} \ No newline at end of file