diff --git a/.claude/settings.local.json b/.claude/settings.local.json deleted file mode 100644 index 07d387ba9..000000000 --- a/.claude/settings.local.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "permissions": { - "allow": [ - "Bash(where bun)", - "Bash(bun:*)", - "Bash(echo $ANDROID_HOME)", - "Bash(echo $JAVA_HOME)", - "Bash(copy:*)", - "WebSearch", - "Bash(.gradlew.bat:*)", - "Bash(powershell:*)", - "Bash(git add:*)", - "Bash(git commit:*)", - "Bash(git status:*)", - "Bash(git diff:*)", - "mcp__context7__resolve-library-id", - "mcp__context7__get-library-docs", - "WebFetch(domain:v2.tauri.app)", - "WebFetch(domain:docs.rs)", - "Bash(cargo check:*)", - "Bash(npm run dev:*)", - "WebFetch(domain:github.com)", - "Bash(gh issue list:*)", - "Bash(gh issue view:*)", - "Bash(gh issue comment:*)", - "Bash(gh repo view:*)", - "Bash(gh api:*)", - "Bash(rg:*)", - "Bash(del \"E:\\code\\blinko\\app\\src-tauri\\src\\desktop\\autostart.rs\")", - "Bash(del \"E:\\code\\blinko\\app\\src\\pages\\test-ai-settings.tsx\")", - "Bash(del \"E:\\code\\blinko\\app\\src\\components\\BlinkoSettings\\AiSetting\\ProviderModal.tsx\")", - "WebFetch(domain:icons.lobehub.com)", - "mcp__ide__getDiagnostics", - "WebFetch(domain:raw.githubusercontent.com)", - "Read(//e/code/mastra/packages/mcp/src/server/**)", - "Bash(find:*)", - "WebFetch(domain:crates.io)", - "WebFetch(domain:lib.rs)", - "Bash(cat:*)" - ], - "deny": [], - "ask": [] - } -} \ No newline at end of file diff --git a/.github/workflows/app-release.yml b/.github/workflows/app-release.yml index 120245fb9..0136a408a 100644 --- a/.github/workflows/app-release.yml +++ b/.github/workflows/app-release.yml @@ -117,8 +117,14 @@ jobs: args: '--target x86_64-apple-darwin' - platform: 'ubuntu-22.04' # Linux Platform args: '' - - platform: 'windows-latest' # Windows Platform + - platform: 'windows-latest' # Windows Platform (CPU) args: '' + features: '--features whisper-cpu' + variant: 'cpu' + - platform: 'windows-latest' # Windows Platform (CUDA) + args: '' + features: '--features whisper-cuda' + variant: 'cuda' runs-on: ${{ matrix.platform }} steps: @@ -132,19 +138,61 @@ jobs: name: tauri-config path: app/src-tauri/ - - name: Install CUDA Toolkit (Windows) - if: matrix.platform == 'windows-latest' - uses: Jimver/cuda-toolkit@v0.2.15 + - name: Install CUDA Toolkit (Windows CUDA) + if: matrix.platform == 'windows-latest' && matrix.variant == 'cuda' + uses: Jimver/cuda-toolkit@v0.2.24 + id: cuda-toolkit with: - cuda: '12.1.0' + cuda: '12.5.0' method: 'network' - sub-packages: '[ "nvcc", "cudart", "cublas", "cublas_dev", "curand", "curand_dev" ]' + sub-packages: '[ "nvcc", "cudart", "cublas", "cublas_dev", "curand", "curand_dev", "visual_studio_integration", "thrust" ]' - - name: Set CUDA environment variables (Windows) - if: matrix.platform == 'windows-latest' + - name: Set CUDA environment variables (Windows CUDA) + if: matrix.platform == 'windows-latest' && matrix.variant == 'cuda' + run: | + echo "Installed cuda version is: ${{steps.cuda-toolkit.outputs.cuda}}" + echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}" + echo "CUDA_PATH=${{steps.cuda-toolkit.outputs.CUDA_PATH}}" >> $GITHUB_ENV + echo "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin" >> $GITHUB_PATH + nvcc -V + + + - name: Fix CUDA Visual Studio Integration (Windows CUDA) + if: matrix.platform == 'windows-latest' && matrix.variant == 'cuda' run: | - echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1" >> $GITHUB_ENV - echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin" >> $GITHUB_PATH + $cudaPath = "${{steps.cuda-toolkit.outputs.CUDA_PATH}}" + echo "CUDA Path: $cudaPath" + + # Source: CUDA Visual Studio integration files + $sourceDir = "$cudaPath\extras\visual_studio_integration\MSBuildExtensions" + echo "Source: $sourceDir" + + # Find Visual Studio installation + $vsPaths = @( + "C:\Program Files\Microsoft Visual Studio\2022\Enterprise", + "C:\Program Files\Microsoft Visual Studio\2022\Community", + "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools" + ) + + foreach ($vsPath in $vsPaths) { + $destDir = "$vsPath\MSBuild\Microsoft\VC\v170\BuildCustomizations" + if (Test-Path $destDir) { + echo "Found VS at: $vsPath" + echo "Destination: $destDir" + + if (Test-Path $sourceDir) { + echo "Copying CUDA integration files..." + Copy-Item "$sourceDir\*" $destDir -Force -Verbose + echo "Successfully copied CUDA integration files to $destDir" + } else { + echo "ERROR: Source directory not found: $sourceDir" + } + break + } + } + + # Set environment variable for CMake to use CUDA toolset + echo "CMAKE_GENERATOR_TOOLSET=cuda=$cudaPath" >> $GITHUB_ENV - name: Fix version format for Windows MSI if: matrix.platform == 'windows-latest' @@ -190,6 +238,7 @@ jobs: uses: dtolnay/rust-toolchain@stable with: targets: ${{ matrix.platform == 'macos-latest' && 'aarch64-apple-darwin,x86_64-apple-darwin' || '' }} + components: ${{ matrix.platform == 'windows-latest' && 'rustfmt' || '' }} - name: Rust Cache uses: Swatinem/rust-cache@v2 @@ -211,6 +260,14 @@ jobs: bun install cd app && bun install + + # Copy CUDA config for CUDA builds (with installer checks) + - name: Use CUDA config (Windows CUDA) + if: matrix.platform == 'windows-latest' && matrix.variant == 'cuda' + run: | + Copy-Item "app\src-tauri\tauri.cuda.conf.json" "app\src-tauri\tauri.conf.json" -Force + echo "Using CUDA configuration with installer CUDA detection" + # Using official Tauri Action to build and publish - name: Build and Publish Desktop App uses: tauri-apps/tauri-action@v0 @@ -221,9 +278,9 @@ jobs: with: projectPath: 'app' tauriScript: '../node_modules/.bin/tauri' - args: ${{ matrix.args }} + args: ${{ matrix.args }} ${{ matrix.features || '' }} tagName: ${{ needs.set-version.outputs.version }} - releaseName: Blinko ${{ needs.set-version.outputs.version }} + releaseName: Blinko ${{ needs.set-version.outputs.version }}${{ matrix.variant && format(' ({0})', matrix.variant) || '' }} releaseBody: "Under construction, full changelog will be updated after build completion..." releaseDraft: false prerelease: false diff --git a/.github/workflows/windows-test-release.yml b/.github/workflows/windows-test-release.yml index a30a836c3..8a1f8dc64 100644 --- a/.github/workflows/windows-test-release.yml +++ b/.github/workflows/windows-test-release.yml @@ -96,6 +96,14 @@ jobs: needs: [set-version, update-version] permissions: contents: write + strategy: + fail-fast: false + matrix: + include: + - variant: 'cpu' + features: '--features whisper-cpu' + - variant: 'cuda' + features: '--features whisper-cuda' runs-on: windows-latest steps: - uses: actions/checkout@v4 @@ -108,17 +116,61 @@ jobs: name: tauri-config path: app/src-tauri/ - - name: Install CUDA Toolkit (Windows) - uses: Jimver/cuda-toolkit@v0.2.15 + - name: Install CUDA Toolkit (Windows CUDA) + if: matrix.variant == 'cuda' + uses: Jimver/cuda-toolkit@v0.2.24 + id: cuda-toolkit with: - cuda: '12.1.0' + cuda: '12.5.0' method: 'network' - sub-packages: '[ "nvcc", "cudart", "cublas", "cublas_dev", "curand", "curand_dev" ]' + sub-packages: '[ "nvcc", "cudart", "cublas", "cublas_dev", "curand", "curand_dev", "nvrtc", "nvrtc_dev", "visual_studio_integration", "thrust" ]' - - name: Set CUDA environment variables (Windows) + - name: Set CUDA environment variables (Windows CUDA) + if: matrix.variant == 'cuda' run: | - echo "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1" >> $GITHUB_ENV - echo "C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v12.1\bin" >> $GITHUB_PATH + echo "Installed cuda version is: ${{steps.cuda-toolkit.outputs.cuda}}" + echo "Cuda install location: ${{steps.cuda-toolkit.outputs.CUDA_PATH}}" + echo "CUDA_PATH=${{steps.cuda-toolkit.outputs.CUDA_PATH}}" >> $GITHUB_ENV + echo "${{steps.cuda-toolkit.outputs.CUDA_PATH}}\bin" >> $GITHUB_PATH + nvcc -V + + + - name: Fix CUDA Visual Studio Integration + if: matrix.variant == 'cuda' + run: | + $cudaPath = "${{steps.cuda-toolkit.outputs.CUDA_PATH}}" + echo "CUDA Path: $cudaPath" + + # Source: CUDA Visual Studio integration files + $sourceDir = "$cudaPath\extras\visual_studio_integration\MSBuildExtensions" + echo "Source: $sourceDir" + + # Find Visual Studio installation + $vsPaths = @( + "C:\Program Files\Microsoft Visual Studio\2022\Enterprise", + "C:\Program Files\Microsoft Visual Studio\2022\Community", + "C:\Program Files (x86)\Microsoft Visual Studio\2022\BuildTools" + ) + + foreach ($vsPath in $vsPaths) { + $destDir = "$vsPath\MSBuild\Microsoft\VC\v170\BuildCustomizations" + if (Test-Path $destDir) { + echo "Found VS at: $vsPath" + echo "Destination: $destDir" + + if (Test-Path $sourceDir) { + echo "Copying CUDA integration files..." + Copy-Item "$sourceDir\*" $destDir -Force -Verbose + echo "Successfully copied CUDA integration files to $destDir" + } else { + echo "ERROR: Source directory not found: $sourceDir" + } + break + } + } + + # Set environment variable for CMake to use CUDA toolset + echo "CMAKE_GENERATOR_TOOLSET=cuda=$cudaPath" >> $GITHUB_ENV - name: Fix version format for Windows MSI run: | @@ -155,6 +207,8 @@ jobs: - name: Install Rust Stable uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt - name: Rust Cache uses: Swatinem/rust-cache@v2 @@ -176,12 +230,20 @@ jobs: bun install cd app && bun install + + # Copy CUDA config for CUDA builds (with installer checks) + - name: Use CUDA config (Windows CUDA) + if: matrix.variant == 'cuda' + run: | + Copy-Item "app\src-tauri\tauri.cuda.conf.json" "app\src-tauri\tauri.conf.json" -Force + echo "Using CUDA configuration with installer CUDA detection" + # Build Windows App (without publishing) - - name: Build Windows App + - name: Build Windows App (${{ matrix.variant }}) run: | cd app - echo "Starting Windows build..." - ../node_modules/.bin/tauri build --no-bundle - echo "Windows build completed successfully!" + echo "Starting Windows ${{ matrix.variant }} build..." + ../node_modules/.bin/tauri build --no-bundle ${{ matrix.features }} + echo "Windows ${{ matrix.variant }} build completed successfully!" echo "Build artifacts location: src-tauri/target/release/" - ls -la src-tauri/target/release/ \ No newline at end of file + Get-ChildItem src-tauri/target/release/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index 802d025df..c47ad3b98 100644 --- a/.gitignore +++ b/.gitignore @@ -30,4 +30,5 @@ keystore.properties dev-dist .claudeconfig .claude/* +.claude/settings.local.json target \ No newline at end of file diff --git a/README.md b/README.md index 1ef848e53..94c260627 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,17 @@ Blinko is an AI-powered card note-taking project. Designed for individuals who w - 🔓**Open for Collaboration** :As an open-source project, Blinko invites contributions from the community. All code is transparent and available on GitHub, fostering a spirit of collaboration and constant improvement. +## 🎤 Offline Voice Recognition (Windows) + +The Windows desktop version supports offline voice recognition powered by Whisper, allowing you to convert speech to text without internet connectivity. + +### Available Versions +- **Blinko.exe** - CPU-only version for all systems +- **Blinko(CUDA).exe** - GPU-accelerated version for NVIDIA graphics cards + - **Requires [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads) to be installed, otherwise installation will fail due to missing runtime environment** + - Provides significantly faster transcription performance + - Requires manual download of Whisper models from [Hugging Face](https://huggingface.co/ggerganov/whisper.cpp/tree/main) + ## 📦Start with Docker in seconds ```bash diff --git a/README.zh-CN.md b/README.zh-CN.md index 64405ac93..6eb8ca49d 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -48,6 +48,17 @@ Blinko 是一个AI驱动的卡片笔记项目,专为那些想要快速捕捉 - 🔓**开放协作**:作为开源项目,Blinko 欢迎社区贡献。所有代码都在 GitHub 上公开透明,培养协作和持续改进的精神。 +## 🎤 离线语音识别 (Windows) + +Windows 桌面版支持基于 Whisper 的离线语音识别功能,让您无需网络连接即可将语音转换为文字。 + +### 可用版本 +- **Blinko.exe** - CPU版本,适用于所有系统 +- **Blinko(CUDA).exe** - GPU加速版本,专为NVIDIA显卡优化 + - **必须先安装 [CUDA工具包](https://developer.nvidia.com/cuda-downloads),否则会因缺少运行环境导致安装报错** + - 提供显著更快的转录性能 + - 需要手动从 [Hugging Face](https://huggingface.co/ggerganov/whisper.cpp/tree/main) 下载 Whisper 模型 + ## 🤖 AI 模型支持 ### OpenAI - 支持 OpenAI API diff --git a/app/public/locales/ar/translation.json b/app/public/locales/ar/translation.json index c83013dc4..d4b518b4a 100644 --- a/app/public/locales/ar/translation.json +++ b/app/public/locales/ar/translation.json @@ -734,5 +734,6 @@ "voice-recognition-hotkey": "مفتاح الإدخال الصوتي السريع", "local-voice-recognition": "التفريغ الصوتي المحلي", "cuda-acceleration": "تسريع CUDA", - "voice-tip": "اضغط مع الاستمرار على مفتاح الاختصار للتحدث، لإجراء التحويل الصوتي إلى نص، وعند تحريره سيتم إدراج المحتوى المُحول في صندوق النص." + "voice-tip": "اضغط مع الاستمرار على مفتاح الاختصار للتحدث، لإجراء التحويل الصوتي إلى نص، وعند تحريره سيتم إدراج المحتوى المُحول في صندوق النص.", + "dragging": "جاري السحب..." } diff --git a/app/public/locales/de/translation.json b/app/public/locales/de/translation.json index 41ab93458..46cee2003 100644 --- a/app/public/locales/de/translation.json +++ b/app/public/locales/de/translation.json @@ -734,5 +734,6 @@ "voice-recognition-hotkey": "Spracheingabe-Hotkey", "local-voice-recognition": "Lokale Sprachtranskription", "cuda-acceleration": "CUDA-Beschleunigung", - "voice-tip": "Halten Sie die Schnelltaste gedrückt, um zu sprechen und eine Sprachtranskription durchzuführen. Wenn Sie loslassen, wird der transkribierte Inhalt in das Textfeld eingefügt." + "voice-tip": "Halten Sie die Schnelltaste gedrückt, um zu sprechen und eine Sprachtranskription durchzuführen. Wenn Sie loslassen, wird der transkribierte Inhalt in das Textfeld eingefügt.", + "dragging": "Wird gerade gezogen..." } diff --git a/app/public/locales/en/translation.json b/app/public/locales/en/translation.json index f7f89d12c..873365434 100644 --- a/app/public/locales/en/translation.json +++ b/app/public/locales/en/translation.json @@ -791,5 +791,22 @@ "audio-tips": "Press and hold the shortcut key to enter, release to insert into the text box.", "local-voice-recognition": "Local Voice Recognition", "cuda-acceleration": "CUDA Acceleration", - "voice-tip": "Press and hold the shortcut key to speak for voice transcription, and release it to insert the transcribed content into the text box." + "voice-tip": "Press and hold the shortcut key to speak for voice transcription, and release it to insert the transcribed content into the text box.", + "dragging": "Dragging...", + "enhanced-authentication": "Enhanced Authentication", + "recommended": "Recommended", + "authentication-type": "Authentication Type", + "no-authentication-required": "No Authentication Required", + "no-auth-description": "This endpoint doesn't require authentication. Suitable for local or public APIs.", + "header-name": "Header Name", + "enter-your-api-key": "Enter your API key", + "header-name-placeholder": "e.g., Authorization, X-API-Key", + "bearer-token-header-description": "The header name for the Bearer token. Default: Authorization", + "api-key-header-description": "The header name for the API key. Examples: X-API-Key, api-key, Authorization", + "custom-headers": "Custom Headers", + "custom-headers-description": "Add custom HTTP headers for authentication. You can add multiple headers.", + "header-name": "Header Name", + "header-value": "Header Value", + "example-configurations": "Example Configurations", + "legacy-api-key-description": "For backward compatibility. It's recommended to use Enhanced Authentication above." } diff --git a/app/public/locales/es/translation.json b/app/public/locales/es/translation.json index abcef41ec..5eb8a3448 100644 --- a/app/public/locales/es/translation.json +++ b/app/public/locales/es/translation.json @@ -736,5 +736,6 @@ "voice-recognition-hotkey": "Teclas de acceso rápido para entrada de voz", "local-voice-recognition": "Transcripción de voz local", "cuda-acceleration": "Aceleración CUDA", - "voice-tip": "Mantén presionada la tecla de acceso rápido para hablar y realizar la transcripción por voz. Al soltar, el contenido transcrito se insertará en el cuadro de texto." + "voice-tip": "Mantén presionada la tecla de acceso rápido para hablar y realizar la transcripción por voz. Al soltar, el contenido transcrito se insertará en el cuadro de texto.", + "dragging": "Arrastrando..." } diff --git a/app/public/locales/fr/translation.json b/app/public/locales/fr/translation.json index 06b82a441..2e243c393 100644 --- a/app/public/locales/fr/translation.json +++ b/app/public/locales/fr/translation.json @@ -637,7 +637,7 @@ "import-from-markdown": "Importer à partir d'un fichier Markdown", "import-from-markdown-tip": "Importer à partir d'un simple fichier .md ou d'une archive .zip contenant des fichiers .md", "not-a-markdown-or-zip-file": "Ce n'est pas un fichier Markdown ou zip. Veuillez sélectionner un fichier .md ou .zip.", - "todo": "Procuration", + "todo": "Tâches", "restore": "Rétablissement", "complete": "terminé", "today": "Aujourd'hui", @@ -736,5 +736,6 @@ "voice-recognition-hotkey": "Raccourci de saisie vocale", "local-voice-recognition": "Transcription vocale locale", "cuda-acceleration": "Accélération CUDA", - "voice-tip": "Maintenez enfoncé le raccourci pour parler et effectuer la transcription vocale. Lorsque vous relâchez, le contenu transcrit sera inséré dans la zone de texte." + "voice-tip": "Maintenez enfoncé le raccourci pour parler et effectuer la transcription vocale. Lorsque vous relâchez, le contenu transcrit sera inséré dans la zone de texte.", + "dragging": "En train de glisser..." } diff --git a/app/public/locales/ka/translation.json b/app/public/locales/ka/translation.json index e621dbd07..9058f822c 100644 --- a/app/public/locales/ka/translation.json +++ b/app/public/locales/ka/translation.json @@ -695,5 +695,6 @@ "voice-recognition-hotkey": "ხმოვანი შეყვანის სოკო клавиши", "local-voice-recognition": "ადგილობრივი ხმოვან տրանսկրիփցիя", "cuda-acceleration": "CUDA აჩქარება", - "voice-tip": "დააჭირე სწრაფ ღილა­­­­­кис қოшოбаs, խօսեք, տեքсті аудио транскрипцияга айналдыру üçün, босатқанда транскрипцияланған мәтіні тексt кутисине енгизиледи." + "voice-tip": "დააჭირე სწრაფ ღილა­­­­­кис қოшოбаs, խօսեք, տեքсті аудио транскрипцияга айналдыру üçün, босатқанда транскрипцияланған мәтіні тексt кутисине енгизиледи.", + "dragging": "მიმდინარეობს გადაწო..." } diff --git a/app/public/locales/kab/translation.json b/app/public/locales/kab/translation.json index 7717e10d7..ce5aac887 100644 --- a/app/public/locales/kab/translation.json +++ b/app/public/locales/kab/translation.json @@ -767,5 +767,6 @@ "voice-recognition-hotkey": "Tansaḍt n usnulfu n tujjut", "local-voice-recognition": "Tutlayt tamezgant n temda", "cuda-acceleration": "CUDA asersi", - "voice-tip": "Sserḥed tamara n umernu ara ad tettalkem, ad d-yeqqen amagrad nniṣnen, ma yella tebdaḍ ad tt-inserteɣ-d aɣbalu yettwakcem deg udrum n tefyar." + "voice-tip": "Sserḥed tamara n umernu ara ad tettalkem, ad d-yeqqen amagrad nniṣnen, ma yella tebdaḍ ad tt-inserteɣ-d aɣbalu yettwakcem deg udrum n tefyar.", + "dragging": "Dduklen..." } diff --git a/app/public/locales/ko/translation.json b/app/public/locales/ko/translation.json index 8b65ffd18..358e7f14c 100644 --- a/app/public/locales/ko/translation.json +++ b/app/public/locales/ko/translation.json @@ -731,5 +731,6 @@ "voice-recognition-hotkey": "음성 입력 단축키", "local-voice-recognition": "현지 음성 전사", "cuda-acceleration": "CUDA 가속", - "voice-tip": "길게 누르고 단축키를 말하면 음성을 텍스트로 변환하고, 손을 떼면 변환된 내용이 텍스트 상자에 삽입됩니다." + "voice-tip": "길게 누르고 단축키를 말하면 음성을 텍스트로 변환하고, 손을 떼면 변환된 내용이 텍스트 상자에 삽입됩니다.", + "dragging": "드래그 중..." } diff --git a/app/public/locales/nl/translation.json b/app/public/locales/nl/translation.json index 688240039..be43b56af 100644 --- a/app/public/locales/nl/translation.json +++ b/app/public/locales/nl/translation.json @@ -743,5 +743,6 @@ "voice-recognition-hotkey": "Spraakopname sneltoets", "local-voice-recognition": "lokale spraaktranscriptie", "cuda-acceleration": "CUDA-versnelling", - "voice-tip": "Houd de sneltoets ingedrukt om te spreken en voer spraak-naar-tekst uit. Wanneer je loslaat, wordt de getranscribeerde inhoud in het tekstvak ingevoegd." + "voice-tip": "Houd de sneltoets ingedrukt om te spreken en voer spraak-naar-tekst uit. Wanneer je loslaat, wordt de getranscribeerde inhoud in het tekstvak ingevoegd.", + "dragging": "Bezig met slepen..." } diff --git a/app/public/locales/pl/translation.json b/app/public/locales/pl/translation.json index 486d77f31..286c2b42a 100644 --- a/app/public/locales/pl/translation.json +++ b/app/public/locales/pl/translation.json @@ -727,5 +727,6 @@ "voice-recognition-hotkey": "Skrót klawiszowy do wprowadzania głosowego", "local-voice-recognition": "Lokalne przepisywanie głosu", "cuda-acceleration": "CUDA przyspieszenie", - "voice-tip": "Przytrzymaj długo skrót klawiszowy, aby mówić i przeprowadzić transkrypcję głosu. Po zwolnieniu zostanie wstawiona zawartość transkrypcji do pola tekstowego." + "voice-tip": "Przytrzymaj długo skrót klawiszowy, aby mówić i przeprowadzić transkrypcję głosu. Po zwolnieniu zostanie wstawiona zawartość transkrypcji do pola tekstowego.", + "dragging": "Trwa przeciąganie..." } diff --git a/app/public/locales/pt/translation.json b/app/public/locales/pt/translation.json index f96c588df..9cc9cbbb6 100644 --- a/app/public/locales/pt/translation.json +++ b/app/public/locales/pt/translation.json @@ -729,5 +729,6 @@ "audio-tips": "Pressione e segure a tecla de atalho para gravar, solte para inserir na caixa de texto.", "voice-recognition-hotkey": "Teclas de atalho para entrada de voz", "local-voice-recognition": "Transcrição de voz local", - "cuda-acceleration": "Aceleração CUDA" + "cuda-acceleration": "Aceleração CUDA", + "dragging": "Arrastando..." } diff --git a/app/public/locales/ru/translation.json b/app/public/locales/ru/translation.json index f1f7c9556..285602e1e 100644 --- a/app/public/locales/ru/translation.json +++ b/app/public/locales/ru/translation.json @@ -729,5 +729,6 @@ "voice-recognition-hotkey": "Горячие клавиши для ввода голосом", "local-voice-recognition": "Местная голосовая транскрипция", "cuda-acceleration": "CUDA ускорение", - "voice-tip": "Долгое нажатие на горячую клавишу для разговора, выполнение голосовой транскрипции, после отпускания содержимое транскрипции будет вставлено в текстовое поле." + "voice-tip": "Долгое нажатие на горячую клавишу для разговора, выполнение голосовой транскрипции, после отпускания содержимое транскрипции будет вставлено в текстовое поле.", + "dragging": "Перетаскивается..." } diff --git a/app/public/locales/tr/translation.json b/app/public/locales/tr/translation.json index 8a5e9408c..df3bdbaa8 100644 --- a/app/public/locales/tr/translation.json +++ b/app/public/locales/tr/translation.json @@ -736,5 +736,6 @@ "voice-recognition-hotkey": "Ses kaydı kısayol tuşu", "local-voice-recognition": "Yerel ses dökümü", "cuda-acceleration": "CUDA hızlandırma", - "voice-tip": "Kısayol tuşuna uzun basarak konuşun, sesli diktat yapın, bıraktığınızda diktat içeriği metin kutusuna eklenecektir." + "voice-tip": "Kısayol tuşuna uzun basarak konuşun, sesli diktat yapın, bıraktığınızda diktat içeriği metin kutusuna eklenecektir.", + "dragging": "Sürükleniyor..." } diff --git a/app/public/locales/zh-TW/translation.json b/app/public/locales/zh-TW/translation.json index 146e59bc5..a4dc456ec 100644 --- a/app/public/locales/zh-TW/translation.json +++ b/app/public/locales/zh-TW/translation.json @@ -736,5 +736,6 @@ "voice-recognition-hotkey": "語音錄入快捷鍵", "local-voice-recognition": "本地語音轉寫", "cuda-acceleration": "CUDA加速", - "voice-tip": "長按快捷鍵說話,進行語音轉寫,鬆開的時候會將轉寫內容插入到文字框中。" + "voice-tip": "長按快捷鍵說話,進行語音轉寫,鬆開的時候會將轉寫內容插入到文字框中。", + "dragging": "正在拖曳..." } diff --git a/app/public/locales/zh/translation.json b/app/public/locales/zh/translation.json index b1f82368e..a8cbbd1fc 100644 --- a/app/public/locales/zh/translation.json +++ b/app/public/locales/zh/translation.json @@ -808,5 +808,6 @@ "audio-tips": "长按快捷键录入,松开插入文本框中", "local-voice-recognition": "本地语音转写", "cuda-acceleration": "CUDA加速", - "voice-tip": "长按快捷键说话,进行语音转写,松开的时候会将转写内容插入到文本框中" + "voice-tip": "长按快捷键说话,进行语音转写,松开的时候会将转写内容插入到文本框中", + "dragging": "正在拖拽..." } diff --git a/app/src-tauri/Cargo.toml b/app/src-tauri/Cargo.toml index 1ad111cd0..23a3ee226 100644 --- a/app/src-tauri/Cargo.toml +++ b/app/src-tauri/Cargo.toml @@ -42,12 +42,18 @@ enigo = "0.3" rdev = "0.3" sys-locale = "0.3" +[features] +default = ["whisper-cpu"] +whisper-cuda = ["dep:whisper-rs", "whisper-rs/cuda"] +whisper-cpu = ["dep:whisper-rs"] + [target.'cfg(target_os = "windows")'.dependencies] tokio = { version = "1", features = ["rt", "rt-multi-thread", "sync"] } -whisper-rs = { version = "0.15.1" , features = ["cuda"]} cpal = "0.16.0" crossbeam-channel = "0.5" parking_lot = "0.12" +whisper-rs = { version = "0.15.1", optional = true } + [target.'cfg(target_os = "macos")'.dependencies] -macos-accessibility-client = "0.0.1" \ No newline at end of file +macos-accessibility-client = "0.0.1" diff --git a/app/src-tauri/src/desktop/setup.rs b/app/src-tauri/src/desktop/setup.rs index 4a113390d..1b4409af2 100644 --- a/app/src-tauri/src/desktop/setup.rs +++ b/app/src-tauri/src/desktop/setup.rs @@ -6,7 +6,7 @@ use tauri::{AppHandle, Manager}; use tauri_plugin_global_shortcut::{ShortcutState, ShortcutEvent}; use crate::desktop::{HotkeyConfig, setup_system_tray, toggle_quicknote_window, toggle_quickai_window, toggle_quicktool_window, restore_main_window_state, setup_window_state_monitoring}; -#[cfg(target_os = "windows")] +#[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] use crate::voice::{load_voice_config, VoiceProcessor, VOICE_STATE}; pub fn setup_app(app: &mut tauri::App) -> Result<(), Box> { @@ -63,50 +63,72 @@ pub fn setup_app(app: &mut tauri::App) -> Result<(), Box> // Initialize voice recognition if enabled (Windows only, non-blocking) #[cfg(target_os = "windows")] { - let voice_config = load_voice_config(&app_handle); - if voice_config.enabled && std::path::Path::new(&voice_config.model_path).exists() { - println!("🎤 Voice recognition enabled, initializing in background..."); + // Check if whisper-rs is available (either CUDA or CPU version) + #[cfg(any(feature = "whisper-cuda", feature = "whisper-cpu"))] + { + let voice_config = load_voice_config(&app_handle); - // Clone voice config for the background thread - let voice_config_clone = voice_config.clone(); + // Print build configuration info + #[cfg(feature = "whisper-cuda")] + println!("🚀 Voice recognition built with CUDA acceleration support"); + #[cfg(all(feature = "whisper-cpu", not(feature = "whisper-cuda")))] + println!("🖥️ Voice recognition built with CPU-only support"); - // Use std::thread::spawn instead of tokio::spawn to avoid runtime issues - std::thread::spawn(move || { - match VoiceProcessor::new(voice_config_clone.clone()) { - Ok(processor) => { - println!("✅ Voice recognition initialized successfully"); + if voice_config.enabled && std::path::Path::new(&voice_config.model_path).exists() { + println!("🎤 Voice recognition enabled, initializing in background..."); - // Update global state - { - let mut state = VOICE_STATE.lock(); - state.processor = Some(std::sync::Arc::new(processor)); - state.is_initialized = true; - *state.config.lock() = voice_config_clone.clone(); - } + // Clone voice config for the background thread + let voice_config_clone = voice_config.clone(); + + // Use std::thread::spawn instead of tokio::spawn to avoid runtime issues + std::thread::spawn(move || { + match VoiceProcessor::new(voice_config_clone.clone()) { + Ok(processor) => { + #[cfg(feature = "whisper-cuda")] + println!("✅ Voice recognition initialized successfully with CUDA support"); + #[cfg(all(feature = "whisper-cpu", not(feature = "whisper-cuda")))] + println!("✅ Voice recognition initialized successfully with CPU support"); + + // Update global state + { + let mut state = VOICE_STATE.lock(); + state.processor = Some(std::sync::Arc::new(processor)); + state.is_initialized = true; + *state.config.lock() = voice_config_clone.clone(); + } - // Start the voice recognition service - if let Some(ref processor) = VOICE_STATE.lock().processor { - if let Err(e) = processor.start() { - eprintln!("❌ Failed to start voice recognition: {}", e); - println!("💡 Voice recognition failed to start, but application will continue normally"); - } else { - println!("🚀 Voice recognition service started successfully"); + // Start the voice recognition service + if let Some(ref processor) = VOICE_STATE.lock().processor { + if let Err(e) = processor.start() { + eprintln!("❌ Failed to start voice recognition: {}", e); + println!("💡 Voice recognition failed to start, but application will continue normally"); + } else { + println!("🚀 Voice recognition service started successfully"); + } } } + Err(e) => { + eprintln!("❌ Failed to initialize voice recognition: {}", e); + #[cfg(feature = "whisper-cuda")] + println!("💡 If you see CUDA errors, try the CPU-only version or install CUDA toolkit"); + println!("💡 Please check model path and configuration in voice settings"); + println!("💡 Application will continue to run normally without voice recognition"); + } } - Err(e) => { - eprintln!("❌ Failed to initialize voice recognition: {}", e); - println!("💡 Please check model path and configuration in voice settings"); - println!("💡 Application will continue to run normally without voice recognition"); - } - } - }); - } else if voice_config.enabled && !std::path::Path::new(&voice_config.model_path).exists() { - println!("⚠️ Voice recognition enabled but model file not found: {}", voice_config.model_path); - println!("💡 Please download a model file and update the path in voice settings"); - println!("💡 Application will continue to run normally without voice recognition"); - } else { - println!("🔇 Voice recognition disabled in configuration"); + }); + } else if voice_config.enabled && !std::path::Path::new(&voice_config.model_path).exists() { + println!("⚠️ Voice recognition enabled but model file not found: {}", voice_config.model_path); + println!("💡 Please download a model file and update the path in voice settings"); + println!("💡 Application will continue to run normally without voice recognition"); + } else { + println!("🔇 Voice recognition disabled in configuration"); + } + } + + // If whisper-rs is not available in this build + #[cfg(not(any(feature = "whisper-cuda", feature = "whisper-cpu")))] + { + println!("🔇 Voice recognition not available in this build (no whisper features enabled)"); } } #[cfg(not(target_os = "windows"))] diff --git a/app/src-tauri/src/lib.rs b/app/src-tauri/src/lib.rs index c1e42b414..051cee1d1 100644 --- a/app/src-tauri/src/lib.rs +++ b/app/src-tauri/src/lib.rs @@ -1,10 +1,10 @@ #[cfg(not(any(target_os = "android", target_os = "ios")))] mod desktop; -#[cfg(target_os = "windows")] +#[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] mod voice; #[cfg(not(any(target_os = "android", target_os = "ios")))] use desktop::*; -#[cfg(target_os = "windows")] +#[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] use voice::*; use tauri::Manager; @@ -77,19 +77,21 @@ pub fn run() { show_quicktool, set_desktop_theme, set_desktop_colors, - // Voice recognition commands (Windows only) - #[cfg(target_os = "windows")] + // Voice recognition commands (Windows only with whisper features) + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] get_voice_config, - #[cfg(target_os = "windows")] + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] save_voice_config_cmd, - #[cfg(target_os = "windows")] + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] initialize_voice_recognition, - #[cfg(target_os = "windows")] + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] start_voice_recognition, - #[cfg(target_os = "windows")] + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] stop_voice_recognition, - #[cfg(target_os = "windows")] - get_voice_status + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] + get_voice_status, + #[cfg(all(target_os = "windows", any(feature = "whisper-cuda", feature = "whisper-cpu")))] + is_cuda_available ]) .setup(|app| { #[cfg(not(any(target_os = "android", target_os = "ios")))] diff --git a/app/src-tauri/src/voice/commands.rs b/app/src-tauri/src/voice/commands.rs index 3f83d6987..93660bc4b 100644 --- a/app/src-tauri/src/voice/commands.rs +++ b/app/src-tauri/src/voice/commands.rs @@ -152,3 +152,14 @@ pub async fn get_voice_status() -> Result { }) } +/// Check if CUDA support is available in this build +#[tauri::command] +pub async fn is_cuda_available() -> Result { + // Return true if built with CUDA feature, false otherwise + #[cfg(feature = "whisper-cuda")] + return Ok(true); + + #[cfg(not(feature = "whisper-cuda"))] + return Ok(false); +} + diff --git a/app/src-tauri/src/voice/transcriber.rs b/app/src-tauri/src/voice/transcriber.rs index 9443db2d5..520e90f5e 100644 --- a/app/src-tauri/src/voice/transcriber.rs +++ b/app/src-tauri/src/voice/transcriber.rs @@ -19,8 +19,13 @@ impl WhisperTranscriber { } /// Transcribe audio data to text - pub fn transcribe(&self, audio_data: &[f32], language: Option<&str>) -> Result> { - if audio_data.len() < 1600 { // At least 0.1 seconds of audio at 16kHz + pub fn transcribe( + &self, + audio_data: &[f32], + language: Option<&str>, + ) -> Result> { + if audio_data.len() < 1600 { + // At least 0.1 seconds of audio at 16kHz return Ok(String::new()); } @@ -66,17 +71,21 @@ fn detect_cuda_support() -> (bool, String) { match std::process::Command::new("nvidia-smi") .arg("--query-gpu=name") .arg("--format=csv,noheader,nounits") - .output() { + .output() + { Ok(output) if output.status.success() => { let gpu_names = String::from_utf8_lossy(&output.stdout); let gpu_list: Vec<&str> = gpu_names.lines().collect(); if !gpu_list.is_empty() { (true, format!("NVIDIA GPU: {}", gpu_list.join(", "))) } else { - (false, "NVIDIA driver installed but no GPU detected".to_string()) + ( + false, + "NVIDIA driver installed but no GPU detected".to_string(), + ) } } - _ => (false, "NVIDIA GPU or driver not detected".to_string()) + _ => (false, "NVIDIA GPU or driver not detected".to_string()), } } #[cfg(not(target_os = "windows"))] @@ -125,7 +134,11 @@ fn detect_gpu_capabilities() -> (bool, String) { let has_gpu = !gpu_info.is_empty(); let info = if has_gpu { - format!("GPU support detected: {} | {}", gpu_info.join(", "), detailed_info.join(" | ")) + format!( + "GPU support detected: {} | {}", + gpu_info.join(", "), + detailed_info.join(" | ") + ) } else { "No GPU support detected".to_string() }; @@ -134,7 +147,10 @@ fn detect_gpu_capabilities() -> (bool, String) { } /// Create WhisperContext with automatic GPU/CPU fallback -fn create_whisper_context_with_auto_fallback(model_path: &str, prefer_gpu: bool) -> Result<(WhisperContext, String), Box> { +fn create_whisper_context_with_auto_fallback( + model_path: &str, + prefer_gpu: bool, +) -> Result<(WhisperContext, String), Box> { let (has_gpu, gpu_info) = detect_gpu_capabilities(); println!("🔍 {}", gpu_info); @@ -143,35 +159,29 @@ fn create_whisper_context_with_auto_fallback(model_path: &str, prefer_gpu: bool) println!("🚀 GPU support detected, attempting to enable GPU acceleration..."); // Check which GPU features are compiled in - #[cfg(feature = "cuda")] - { - let mut ctx_params = WhisperContextParameters::default(); - ctx_params.use_gpu(true); + let mut ctx_params = WhisperContextParameters::default(); + ctx_params.use_gpu(true); - match WhisperContext::new_with_params(model_path, ctx_params) { - Ok(ctx) => { - println!("✅ GPU mode enabled successfully (CUDA acceleration)"); - return Ok((ctx, "GPU (CUDA)".to_string())); - } - Err(e) => { - println!("⚠️ GPU mode failed: {}", e); - println!("💡 Possible reasons:"); - println!(" - Incompatible CUDA runtime version"); - println!(" - Insufficient GPU memory"); - println!(" - Model file incompatible with GPU version"); - println!("🔄 Auto-fallback to CPU mode"); - } + match WhisperContext::new_with_params(model_path, ctx_params) { + Ok(ctx) => { + println!("✅ GPU mode enabled successfully (CUDA acceleration)"); + return Ok((ctx, "GPU (CUDA)".to_string())); } - } - #[cfg(not(feature = "cuda"))] - { - if prefer_gpu && has_gpu { - println!("⚡ GPU hardware detected, but CUDA feature not enabled"); - println!("💡 To enable GPU acceleration on Windows:"); - println!(" Add 'cuda' feature to build"); - println!("🔄 Using CPU mode"); + Err(e) => { + println!("⚠️ GPU mode failed: {}", e); + println!("💡 Possible reasons:"); + println!(" - Incompatible CUDA runtime version"); + println!(" - Insufficient GPU memory"); + println!(" - Model file incompatible with GPU version"); + println!("🔄 Auto-fallback to CPU mode"); } } + if prefer_gpu && has_gpu { + println!("⚡ GPU hardware detected, but CUDA feature not enabled"); + println!("💡 To enable GPU acceleration on Windows:"); + println!(" Add 'cuda' feature to build"); + println!("🔄 Using CPU mode"); + } } else if prefer_gpu && !has_gpu { println!("🔧 GPU acceleration requested but no GPU support detected, using CPU mode"); } else { @@ -184,4 +194,4 @@ fn create_whisper_context_with_auto_fallback(model_path: &str, prefer_gpu: bool) let ctx = WhisperContext::new_with_params(model_path, ctx_params)?; println!("✅ CPU mode enabled successfully"); Ok((ctx, "CPU".to_string())) -} \ No newline at end of file +} diff --git a/app/src-tauri/tauri.conf.json b/app/src-tauri/tauri.conf.json index 0ac125b8e..325f1424b 100644 --- a/app/src-tauri/tauri.conf.json +++ b/app/src-tauri/tauri.conf.json @@ -1,7 +1,7 @@ { "$schema": "https://schema.tauri.app/config/2", "productName": "Blinko", - "version": "1.6.3", + "version": "1.6.4", "identifier": "com.blinko.app", "build": { "beforeDevCommand": "bun run dev", diff --git a/app/src-tauri/tauri.cuda.conf.json b/app/src-tauri/tauri.cuda.conf.json new file mode 100644 index 000000000..100793ea0 --- /dev/null +++ b/app/src-tauri/tauri.cuda.conf.json @@ -0,0 +1,112 @@ +{ + "$schema": "https://schema.tauri.app/config/2", + "productName": "Blinko(CUDA)", + "version": "1.6.3", + "identifier": "com.blinko.app", + "build": { + "beforeDevCommand": "bun run dev", + "devUrl": "http://localhost:1111", + "beforeBuildCommand": "bun run build:no-pwa", + "frontendDist": "../../dist/public" + }, + "app": { + "withGlobalTauri": true, + "security": { + "csp": null + }, + "windows": [ + { + "title": "Blinko", + "width": 1920, + "height": 1080, + "minWidth": 600, + "minHeight": 300, + "fullscreen": false, + "resizable": true, + "focus": true, + "hiddenTitle": true, + "decorations": true, + "visible": false + }, + { + "label": "quicknote", + "title": "Quick Note", + "width": 600, + "height": 125, + "maxHeight": 600, + "fullscreen": false, + "resizable": false, + "focus": true, + "center": true, + "visible": false, + "alwaysOnTop": true, + "skipTaskbar": true, + "titleBarStyle": "Overlay", + "transparent": true, + "hiddenTitle": true, + "decorations": false, + "shadow": true, + "url": "/quicknote" + }, + { + "label": "quickai", + "title": "Quick AI", + "width": 600, + "height": 125, + "maxHeight": 600, + "fullscreen": false, + "resizable": false, + "focus": true, + "center": true, + "visible": false, + "alwaysOnTop": true, + "skipTaskbar": true, + "titleBarStyle": "Overlay", + "transparent": true, + "hiddenTitle": true, + "decorations": false, + "shadow": true, + "url": "/quickai" + }, + { + "label": "quicktool", + "title": "Quick Tool", + "width": 190, + "height": 34, + "fullscreen": false, + "resizable": false, + "focus": false, + "center": false, + "visible": false, + "alwaysOnTop": true, + "skipTaskbar": true, + "titleBarStyle": "Overlay", + "transparent": true, + "hiddenTitle": true, + "decorations": false, + "shadow": true, + "url": "/quicktool" + } + ] + }, + "bundle": { + "createUpdaterArtifacts": true, + "active": true, + "targets": "all", + "icon": [ + "icons/32x32.png", + "icons/128x128.png", + "icons/128x128@2x.png", + "icons/icon.icns", + "icons/icon.ico" + ] + }, + "plugins": { + "updater": { + "pubkey": "dW50cnVzdGVkIGNvbW1lbnQ6IG1pbmlzaWduIHB1YmxpYyBrZXk6IENBNzZBMzZDRTUxQUM4RjcKUldUM3lCcmxiS04yeXYyOGZ0RVVBbE42WDMxUXFiQTI0R3RqT0ZBbkZEcFFRNlZTWVhwZzlwRmkK", + "endpoints": [ + "https://github.com/blinkospace/blinko/releases/latest/download/latest.json" + ] + } + } +} \ No newline at end of file diff --git a/app/src/components/BlinkoCard/index.tsx b/app/src/components/BlinkoCard/index.tsx index 25d5ba3ac..3b7bc2b63 100644 --- a/app/src/components/BlinkoCard/index.tsx +++ b/app/src/components/BlinkoCard/index.tsx @@ -46,7 +46,7 @@ export const BlinkoCard = observer(({ blinkoItem, account, isShareMode = false, const pluginApi = RootStore.Get(PluginApiStore); const [isExpanded, setIsExpanded] = useState(defaultExpanded); const { pathname } = useLocation(); - + useHistoryBack({ state: isExpanded, onStateChange: () => setIsExpanded(false), @@ -112,9 +112,9 @@ export const BlinkoCard = observer(({ blinkoItem, account, isShareMode = false, shadow='none' className={` flex flex-col p-4 ${glassEffect ? 'bg-transparent' : 'bg-background'} !transition-all group/card - ${isExpanded ? 'h-screen overflow-y-scroll rounded-none' : ''} - ${isPc && !isExpanded && !blinkoItem.isShare && !withoutHoverAnimation ? 'hover:translate-y-1' : ''} - ${blinkoItem.isBlog ? 'cursor-pointer' : ''} + ${isExpanded ? 'h-screen overflow-y-scroll rounded-none' : ''} + ${isPc && !isExpanded && !blinkoItem.isShare && !withoutHoverAnimation ? 'hover:translate-y-1' : ''} + ${blinkoItem.isBlog ? 'cursor-pointer' : ''} ${blinko.curMultiSelectIds?.includes(blinkoItem.id!) ? 'border-2 border-primary' : ''} ${className} `} diff --git a/app/src/components/BlinkoSettings/AiSetting/AuthenticationConfig.tsx b/app/src/components/BlinkoSettings/AiSetting/AuthenticationConfig.tsx new file mode 100644 index 000000000..1e559df3a --- /dev/null +++ b/app/src/components/BlinkoSettings/AiSetting/AuthenticationConfig.tsx @@ -0,0 +1,343 @@ +import { observer } from 'mobx-react-lite'; +import { Button, Input, Select, SelectItem, Card, CardBody, Chip, Textarea } from '@heroui/react'; +import { Icon } from '@/components/Common/Iconify/icons'; +import { useTranslation } from 'react-i18next'; +import { useState, useEffect } from 'react'; +import { AuthenticationConfig } from '@/store/aiSettingStore'; + +interface AuthenticationConfigProps { + authConfig?: AuthenticationConfig; + onAuthConfigChange: (config: AuthenticationConfig) => void; + provider?: string; +} + +// Default authentication configurations for common providers +const DEFAULT_AUTH_CONFIGS: { [key: string]: AuthenticationConfig } = { + openai: { + type: 'bearer', + headerName: 'Authorization' + }, + anthropic: { + type: 'api-key', + headerName: 'x-api-key' + }, + azure: { + type: 'api-key', + headerName: 'api-key' + }, + google: { + type: 'api-key', + headerName: 'x-goog-api-key' + }, + voyageai: { + type: 'api-key', + headerName: 'Authorization' + }, + zhipuai: { + type: 'bearer', + headerName: 'Authorization' + }, + ollama: { + type: 'none' + }, + custom: { + type: 'bearer', + headerName: 'Authorization' + } +}; + +export default observer(function AuthenticationConfig({ + authConfig, + onAuthConfigChange, + provider = 'custom' +}: AuthenticationConfigProps) { + const { t } = useTranslation(); + const [localConfig, setLocalConfig] = useState(() => { + if (authConfig) { + return authConfig; + } + + // Use default config for the provider type + return DEFAULT_AUTH_CONFIGS[provider] || DEFAULT_AUTH_CONFIGS.custom; + }); + + const [customHeaders, setCustomHeaders] = useState<{ [key: string]: string }>( + authConfig?.customHeaders || {} + ); + const [customHeaderKey, setCustomHeaderKey] = useState(''); + const [customHeaderValue, setCustomHeaderValue] = useState(''); + + useEffect(() => { + if (authConfig) { + setLocalConfig(authConfig); + setCustomHeaders(authConfig.customHeaders || {}); + } else { + const defaultConfig = DEFAULT_AUTH_CONFIGS[provider] || DEFAULT_AUTH_CONFIGS.custom; + setLocalConfig(defaultConfig); + setCustomHeaders(defaultConfig.customHeaders || {}); + } + }, [authConfig, provider]); + + const updateConfig = (updates: Partial) => { + const newConfig = { ...localConfig, ...updates }; + setLocalConfig(newConfig); + onAuthConfigChange(newConfig); + }; + + const addCustomHeader = () => { + if (customHeaderKey.trim() && customHeaderValue.trim()) { + const newHeaders = { ...customHeaders, [customHeaderKey.trim()]: customHeaderValue.trim() }; + setCustomHeaders(newHeaders); + updateConfig({ customHeaders: newHeaders }); + setCustomHeaderKey(''); + setCustomHeaderValue(''); + } + }; + + const removeCustomHeader = (key: string) => { + const newHeaders = { ...customHeaders }; + delete newHeaders[key]; + setCustomHeaders(newHeaders); + updateConfig({ customHeaders: newHeaders }); + }; + + const renderBearerConfig = () => ( +
+ updateConfig({ apiKey: value })} + /> + + updateConfig({ headerName: value })} + description={t('bearer-token-header-description')} + /> +
+ ); + + const renderApiKeyConfig = () => ( +
+ updateConfig({ apiKey: value })} + /> + + updateConfig({ headerName: value })} + description={t('api-key-header-description')} + /> +
+ ); + + const renderCustomConfig = () => ( +
+
+ +
+ {Object.entries(customHeaders).map(([key, value]) => ( +
+ + {key} + + { + const newHeaders = { ...customHeaders, [key]: newValue }; + setCustomHeaders(newHeaders); + updateConfig({ customHeaders: newHeaders }); + }} + className="flex-1" + /> + +
+ ))} + +
+ + + +
+
+

+ {t('custom-headers-description')} +

+
+ +
+ +
+ + + +
+
+
+ ); + + const renderNoneConfig = () => ( +
+ + +
+
+ +
+
+

{t('no-authentication-required')}

+

{t('no-auth-description')}

+
+
+
+
+
+ ); + + return ( +
+
+ + +
+ + {localConfig.type === 'bearer' && renderBearerConfig()} + {localConfig.type === 'api-key' && renderApiKeyConfig()} + {localConfig.type === 'custom' && renderCustomConfig()} + {localConfig.type === 'none' && renderNoneConfig()} +
+ ); +}); \ No newline at end of file diff --git a/app/src/components/BlinkoSettings/AiSetting/ProviderDialogContent.tsx b/app/src/components/BlinkoSettings/AiSetting/ProviderDialogContent.tsx index 107f4848a..288fb7252 100644 --- a/app/src/components/BlinkoSettings/AiSetting/ProviderDialogContent.tsx +++ b/app/src/components/BlinkoSettings/AiSetting/ProviderDialogContent.tsx @@ -6,9 +6,10 @@ import { useState, useEffect } from 'react'; import { RootStore } from '@/store'; import { DialogStore } from '@/store/module/Dialog'; import { ProviderIcon } from '@/components/BlinkoSettings/AiSetting/AIIcon'; -import { AiProvider, AiSettingStore } from '@/store/aiSettingStore'; +import { AiProvider, AiSettingStore, AuthenticationConfig } from '@/store/aiSettingStore'; import { PROVIDER_TEMPLATES } from './constants'; import { Copy } from '@/components/Common/Copy'; +import AuthenticationConfigComponent from './AuthenticationConfig'; interface ProviderDialogContentProps { provider?: AiProvider; @@ -57,15 +58,25 @@ export default observer(function ProviderDialogContent({ provider }: ProviderDia baseURL: '', apiKey: '', sortOrder: 0, - models: [] + models: [], + authConfig: undefined }; }); + const [authConfig, setAuthConfig] = useState(() => { + // Try to get auth config from provider config first, then from authConfig property + const providerAuthConfig = provider?.config?.authConfig || provider?.authConfig; + return providerAuthConfig; + }); + // Initialize editing mode if provider exists useEffect(() => { if (provider) { setCurrentStep(2); setSelectedTemplate(provider.provider); + // Load auth config from provider config or authConfig property + const providerAuthConfig = provider.config?.authConfig || provider.authConfig; + setAuthConfig(providerAuthConfig); } }, [provider]); @@ -96,10 +107,19 @@ export default observer(function ProviderDialogContent({ provider }: ProviderDia const handleSaveProvider = async () => { if (!editingProvider) return; + // Prepare provider data with enhanced authentication config + const providerData = { + ...editingProvider, + config: { + ...editingProvider.config, + authConfig: authConfig // Store auth config in the config JSON field + } + }; + if (editingProvider.id) { - await aiSettingStore.updateProvider.call(editingProvider as any); + await aiSettingStore.updateProvider.call(providerData as any); } else { - await aiSettingStore.createProvider.call(editingProvider as any); + await aiSettingStore.createProvider.call(providerData as any); } RootStore.Get(DialogStore).close(); }; @@ -166,7 +186,7 @@ export default observer(function ProviderDialogContent({ provider }: ProviderDia const template = PROVIDER_TEMPLATES.find(t => t.value === selectedTemplate); return ( -
+

@@ -174,51 +194,83 @@ export default observer(function ProviderDialogContent({ provider }: ProviderDia

- { - setEditingProvider(prev => ({ ...prev, title: value })); - }} - /> - - { - setEditingProvider(prev => ({ ...prev, baseURL: value })); - }} - /> + {/* Basic Configuration */} +
+ { + setEditingProvider(prev => ({ ...prev, title: value })); + }} + /> - { - setEditingProvider(prev => ({ ...prev, apiKey: value })); - }} - endContent={} - /> + { + setEditingProvider(prev => ({ ...prev, baseURL: value })); + }} + /> - {(editingProvider.provider === 'azure' || editingProvider.provider === 'azureopenai') && ( + {/* Legacy API Key field for backward compatibility */} { - setEditingProvider(prev => ({ - ...prev, - config: { - ...prev.config, - apiVersion: value - } - })); + setEditingProvider(prev => ({ ...prev, apiKey: value })); + // Also update auth config if it exists + if (authConfig) { + setAuthConfig({ ...authConfig, apiKey: value }); + } + }} + endContent={} + description={t('legacy-api-key-description')} + /> + + {(editingProvider.provider === 'azure' || editingProvider.provider === 'azureopenai') && ( + { + setEditingProvider(prev => ({ + ...prev, + config: { + ...prev.config, + apiVersion: value + } + })); + }} + /> + )} +
+ + {/* Enhanced Authentication Configuration */} +
+
+ +

{t('enhanced-authentication')}

+ + {t('recommended')} + +
+ + { + setAuthConfig(newAuthConfig); + // Also update legacy apiKey field for backward compatibility + if (newAuthConfig.apiKey && !editingProvider.apiKey) { + setEditingProvider(prev => ({ ...prev, apiKey: newAuthConfig.apiKey })); + } }} + provider={selectedTemplate} /> - )} +
); }; diff --git a/app/src/components/BlinkoSettings/VoiceSetting.tsx b/app/src/components/BlinkoSettings/VoiceSetting.tsx index 7a46c370b..4fa478581 100644 --- a/app/src/components/BlinkoSettings/VoiceSetting.tsx +++ b/app/src/components/BlinkoSettings/VoiceSetting.tsx @@ -7,7 +7,7 @@ import { useTranslation } from 'react-i18next'; import { Item, ItemWithTooltip } from './Item'; import { useEffect, useState } from 'react'; import { invoke } from '@tauri-apps/api/core'; -import { isDesktop, isInTauri } from '@/lib/tauriHelper'; +import { isDesktop, isInTauri, isWindows } from '@/lib/tauriHelper'; import { CollapsibleCard } from '../Common/CollapsibleCard'; import { ToastPlugin } from '@/store/module/Toast/Toast'; import { VoiceRecognitionConfig } from '@/../../shared/lib/types'; @@ -46,6 +46,7 @@ export const VoiceSetting = observer(() => { const [voiceConfig, setVoiceConfig] = useState(null); const [voiceStatus, setVoiceStatus] = useState(null); const [isVoiceInitializing, setIsVoiceInitializing] = useState(false); + const [isCudaAvailable, setIsCudaAvailable] = useState(false); // Check if running on Tauri desktop const isTauriDesktop = isInTauri() && isDesktop(); @@ -61,6 +62,11 @@ export const VoiceSetting = observer(() => { // Load voice status const status = await invoke('get_voice_status'); setVoiceStatus(status); + + // Check CUDA availability + const cudaAvailable = await invoke('is_cuda_available'); + setIsCudaAvailable(cudaAvailable); + console.log('CUDA support available:', cudaAvailable); } catch (error) { console.error('Failed to load voice config:', error); } @@ -263,8 +269,8 @@ export const VoiceSetting = observer(() => { type="col" /> - {/* CUDA acceleration switch (Windows only) */} - {typeof window !== 'undefined' && navigator.platform.indexOf('Win') > -1 && ( + {/* CUDA acceleration switch (Windows only, when CUDA feature is available) */} + {isWindows() && isCudaAvailable && ( diff --git a/app/src/hooks/useDragCard.tsx b/app/src/hooks/useDragCard.tsx new file mode 100644 index 000000000..8f6118975 --- /dev/null +++ b/app/src/hooks/useDragCard.tsx @@ -0,0 +1,177 @@ +import { useState, useRef, useEffect } from 'react'; +import { DragEndEvent, DragStartEvent, MouseSensor, TouchSensor, useSensor, useSensors, closestCenter, useDroppable, useDraggable } from '@dnd-kit/core'; +import { arrayMove } from '@dnd-kit/sortable'; +import { CSS } from '@dnd-kit/utilities'; +import { api } from '@/lib/trpc'; +import { BlinkoCard } from '@/components/BlinkoCard'; +import { useTranslation } from 'react-i18next'; + +interface UseDragCardProps { + notes: any[] | undefined; + onNotesUpdate?: (notes: any[]) => void; + activeId: number | null; + setActiveId: (id: number | null) => void; + insertPosition: number | null; + setInsertPosition: (position: number | null) => void; +} + +export const useDragCard = ({ notes, onNotesUpdate, activeId, setActiveId, insertPosition, setInsertPosition }: UseDragCardProps) => { + const [localNotes, setLocalNotes] = useState([]); + const isDraggingRef = useRef(false); + + // Update local notes when the list changes (but not during drag operations) + useEffect(() => { + if (notes && !isDraggingRef.current) { + // Sort by sortOrder to maintain the correct order from the database + const sortedNotes = [...notes].sort((a, b) => a.sortOrder - b.sortOrder); + setLocalNotes(sortedNotes); + onNotesUpdate?.(sortedNotes); + } + }, [notes]); + + const sensors = useSensors( + useSensor(MouseSensor, { + activationConstraint: { + delay: 250, + tolerance: 5, + }, + }), + useSensor(TouchSensor, { + activationConstraint: { + delay: 250, + tolerance: 5, + }, + }) + ); + + const handleDragStart = (event: any) => { + setActiveId(event.active.id as number); + }; + + const handleDragEnd = (event: any) => { + const { active, over } = event; + + if (over) { + const dropTargetId = over.id.toString(); + const dragItemId = active.id; + + // Extract the note ID from the droppable ID + const targetNoteId = parseInt(dropTargetId.replace('drop-', '')); + + if (dragItemId !== targetNoteId) { + const oldIndex = localNotes.findIndex((note) => note.id === dragItemId); + const newIndex = localNotes.findIndex((note) => note.id === targetNoteId); + + if (oldIndex !== -1 && newIndex !== -1) { + const newNotes = [...localNotes]; + const [movedNote] = newNotes.splice(oldIndex, 1); + newNotes.splice(newIndex, 0, movedNote); + + // Update sortOrder + const updatedNotes = newNotes.map((note, index) => ({ + ...note, + sortOrder: index, + })); + + // Call the original hook's update logic + setLocalNotes(updatedNotes); + + // Update server + const updates = updatedNotes.map((note) => ({ + id: note.id, + sortOrder: note.sortOrder, + })); + + api.notes.updateNotesOrder.mutate({ updates }); + } + } + } + + setActiveId(null); + setInsertPosition(null); + }; + + const handleDragOver = (event: any) => { + const { over } = event; + if (over) { + const targetNoteId = parseInt(over.id.toString().replace('drop-', '')); + setInsertPosition(targetNoteId); + } + }; + + return { + localNotes, + sensors, + setLocalNotes, + isDraggingRef, + handleDragStart, + handleDragEnd, + handleDragOver + }; +}; + +interface DraggableBlinkoCardProps { + blinkoItem: any; + showInsertLine?: boolean; + insertPosition?: 'top' | 'bottom'; +} + +export const DraggableBlinkoCard = ({ blinkoItem, showInsertLine, insertPosition }: DraggableBlinkoCardProps) => { + const { setNodeRef: setDroppableRef, isOver } = useDroppable({ + id: `drop-${blinkoItem.id}`, + }); + const { t } = useTranslation() + + const { + attributes, + listeners, + setNodeRef: setDraggableRef, + transform, + isDragging, + } = useDraggable({ + id: blinkoItem.id, + }); + + const dragStyle = { + transform: CSS.Transform.toString(transform), + }; + + return ( +
+ {showInsertLine && insertPosition === 'top' && ( +
+ )} + + {/* Droppable area - always visible, shows placeholder when dragging */} +
+ {isDragging ? ( +
+
+
{t('dragging')}
+
+
+ ) : ( + // Draggable area - long press to drag using dnd-kit's activationConstraint +
+ +
+ )} +
+ + {showInsertLine && insertPosition === 'bottom' && ( +
+ )} +
+ ); +}; \ No newline at end of file diff --git a/app/src/pages/index.tsx b/app/src/pages/index.tsx index 57d57d644..b5eda185a 100644 --- a/app/src/pages/index.tsx +++ b/app/src/pages/index.tsx @@ -10,10 +10,12 @@ import { useMediaQuery } from 'usehooks-ts'; import { BlinkoAddButton } from '@/components/BlinkoAddButton'; import { LoadingAndEmpty } from '@/components/Common/LoadingAndEmpty'; import { useSearchParams } from 'react-router-dom'; -import { useMemo } from 'react'; +import { useMemo, useState } from 'react'; import dayjs from '@/lib/dayjs'; import { NoteType } from '@shared/lib/types'; import { Icon } from '@/components/Common/Iconify/icons'; +import { DndContext, closestCenter, DragOverlay } from '@dnd-kit/core'; +import { useDragCard, DraggableBlinkoCard } from '@/hooks/useDragCard'; interface TodoGroup { displayDate: string; @@ -32,6 +34,8 @@ const Home = observer(() => { const isArchivedView = searchParams.get('path') === 'archived'; const isTrashView = searchParams.get('path') === 'trash'; const isAllView = searchParams.get('path') === 'all'; + const [activeId, setActiveId] = useState(null); + const [insertPosition, setInsertPosition] = useState(null); const currentListState = useMemo(() => { if (isNotesView) { @@ -49,6 +53,15 @@ const Home = observer(() => { } }, [isNotesView, isTodoView, isArchivedView, isTrashView, isAllView, blinko]); + // Use drag card hook only for non-todo views + const { localNotes, sensors, setLocalNotes, handleDragStart, handleDragEnd, handleDragOver } = useDragCard({ + notes: isTodoView ? [] : currentListState.value, + activeId, + setActiveId, + insertPosition, + setInsertPosition + }); + const store = RootStore.Local(() => ({ editorHeight: 30, get showEditor() { @@ -150,20 +163,45 @@ const Home = observer(() => {
) : ( <> - - { - currentListState?.value?.map(i => { - return - }) - } - + + + { + localNotes?.map((i, index) => { + const showInsertLine = insertPosition === i.id && activeId !== i.id; + return ( + + ); + }) + } + + + {activeId ? ( +
+ n.id === activeId)} + /> +
+ ) : null} +
+
)} diff --git a/app/src/store/aiSettingStore.tsx b/app/src/store/aiSettingStore.tsx index c8acf4d74..532756b5d 100644 --- a/app/src/store/aiSettingStore.tsx +++ b/app/src/store/aiSettingStore.tsx @@ -9,6 +9,21 @@ import { ToastPlugin } from './module/Toast/Toast'; import i18n from '@/lib/i18n'; import { defaultUrlTransform } from 'react-markdown'; +// Enhanced authentication configuration types +export type AuthenticationType = 'bearer' | 'api-key' | 'custom' | 'none'; + +export interface AuthenticationConfig { + type: AuthenticationType; + apiKey?: string; + headerName?: string; + customHeaders?: { [key: string]: string }; + options?: { + includeContentType?: boolean; + userAgent?: string; + queryParams?: { [key: string]: string }; + }; +} + export interface ModelCapabilities { inference: boolean; tools: boolean; @@ -27,7 +42,7 @@ export interface ProviderModel { capabilities: ModelCapabilities; } -export type AiProvider = aiProviders & { models?: AiModel[] }; +export type AiProvider = aiProviders & { models?: AiModel[]; authConfig?: AuthenticationConfig }; export type AiModel = aiModels & { provider?: AiProvider; capabilities: ModelCapabilities }; export class AiSettingStore implements Store { @@ -110,17 +125,73 @@ export class AiSettingStore implements Store { }, }); - // Provider model fetching + // Provider model fetching with enhanced authentication support fetchProviderModels = new PromiseState({ successMsg: i18n.t('model-list-updated'), function: async (provider: AiProvider) => { try { let modelList: any = []; + const authConfig = provider.authConfig; + + // Helper function to build headers from authentication config + const buildHeaders = (): { [key: string]: string } => { + const headers: { [key: string]: string } = {}; + + if (!authConfig || authConfig.type === 'none') { + return headers; + } + + switch (authConfig.type) { + case 'bearer': + if (authConfig.apiKey) { + const headerName = authConfig.headerName || 'Authorization'; + headers[headerName] = `Bearer ${authConfig.apiKey}`; + } + break; + + case 'api-key': + if (authConfig.apiKey && authConfig.headerName) { + headers[authConfig.headerName] = authConfig.apiKey; + } + break; + + case 'custom': + if (authConfig.customHeaders) { + Object.assign(headers, authConfig.customHeaders); + } + break; + } + + // Add content-type if specified + if (authConfig.options?.includeContentType) { + headers['Content-Type'] = 'application/json'; + } + + // Add user agent if specified + if (authConfig.options?.userAgent) { + headers['User-Agent'] = authConfig.options.userAgent; + } + + return headers; + }; + + // Helper function to build URL with query parameters + const buildUrl = (baseUrl: string, path: string): string => { + const url = new URL(path, baseUrl); + + if (authConfig?.options?.queryParams) { + Object.entries(authConfig.options.queryParams).forEach(([key, value]) => { + url.searchParams.append(key, value); + }); + } + + return url.toString(); + }; switch (provider.provider) { case 'ollama': { const endpoint = provider.baseURL || 'http://127.0.0.1:11434'; - const response = await fetch(`${endpoint}/api/tags`); + const response = await fetch(buildUrl(endpoint, '/api/tags')); const data = await response.json(); modelList = data.models.map((model: any) => ({ id: model.name, @@ -132,11 +203,14 @@ export class AiSettingStore implements Store { } case 'openai': { const endpoint = provider.baseURL || 'https://api.openai.com/v1'; - const response = await fetch(`${endpoint}/models`, { - headers: { - 'Authorization': `Bearer ${provider.apiKey}` - } - }); + const headers = buildHeaders(); + + // If no auth config, fall back to legacy auth + if (!authConfig && provider.apiKey) { + headers['Authorization'] = `Bearer ${provider.apiKey}`; + } + + const response = await fetch(buildUrl(endpoint, '/models'), { headers }); const data = await response.json(); modelList = data.data.map((model: any) => ({ id: model.id, @@ -172,7 +246,17 @@ export class AiSettingStore implements Store { } case 'google': { const endpoint = provider.baseURL || 'https://generativelanguage.googleapis.com/v1beta'; - const response = await fetch(`${endpoint}/models?key=${provider.apiKey}`); + const headers = buildHeaders(); + + // If no auth config, fall back to legacy auth with query param + let url: string; + if (!authConfig && provider.apiKey) { + url = `${endpoint}/models?key=${provider.apiKey}`; + } else { + url = buildUrl(endpoint, '/models'); + } + + const response = await fetch(url, { headers }); const data = await response.json(); modelList = data.models?.map((model: any) => ({ id: model.name.replace('models/', ''), @@ -184,11 +268,14 @@ export class AiSettingStore implements Store { } case 'azure': { const endpoint = provider.baseURL; - const response = await fetch(`${endpoint}/openai/models?api-version=2024-02-01`, { - headers: { - 'api-key': provider.apiKey || '' - } - }); + const headers = buildHeaders(); + + // If no auth config, fall back to legacy auth + if (!authConfig && provider.apiKey) { + headers['api-key'] = provider.apiKey; + } + + const response = await fetch(buildUrl(endpoint, '/openai/models?api-version=2024-02-01'), { headers }); const data = await response.json(); modelList = data.data.map((model: any) => ({ id: model.id, @@ -198,28 +285,52 @@ export class AiSettingStore implements Store { })); break; } + case 'custom': default: { - const endpoint = provider.baseURL; - const response = await fetch(`${endpoint}/models`, { - headers: { - 'Authorization': `Bearer ${provider.apiKey}` + // Enhanced custom provider with flexible authentication + if (provider.baseURL) { + const endpoint = provider.baseURL; + const headers = buildHeaders(); + + // If no auth config, fall back to legacy auth + if (!authConfig && provider.apiKey) { + headers['Authorization'] = `Bearer ${provider.apiKey}`; } - }); - const data = await response.json(); - modelList = data.data.map((model: any) => ({ - id: model.id, - name: model.id, - description: '', - capabilities: this.inferModelCapabilities(model.id) - })); + + try { + const response = await fetch(buildUrl(endpoint, '/models'), { headers }); + const data = await response.json(); + + // Handle different response formats + if (data.data && Array.isArray(data.data)) { + modelList = data.data.map((model: any) => ({ + id: model.id, + name: model.id, + description: model.description || '', + capabilities: this.inferModelCapabilities(model.id) + })); + } else if (Array.isArray(data)) { + modelList = data.map((model: any) => ({ + id: model.id || model.name, + name: model.name || model.id, + description: model.description || '', + capabilities: this.inferModelCapabilities(model.id || model.name) + })); + } + } catch (error) { + console.warn(`Failed to fetch models from ${endpoint}:`, error); + // Continue with empty model list + } + } break; } } - // Save models to provider config + // Save models and auth config to provider config const updatedConfig = { ...provider.config, - models: modelList + models: modelList, + authConfig: authConfig // Save auth config for future use }; await this.updateProvider.call({ diff --git a/bun.lock b/bun.lock index 0b398604b..ad01ae9f7 100644 --- a/bun.lock +++ b/bun.lock @@ -4,6 +4,9 @@ "": { "name": "blinko-monorepo", "dependencies": { + "@dnd-kit/core": "^6.3.1", + "@dnd-kit/sortable": "^10.0.0", + "@dnd-kit/utilities": "^3.2.2", "@tauri-apps/plugin-process": "^2.2.1", "@tauri-apps/plugin-updater": "^2.7.1", "dotenv": "^16.5.0", @@ -4164,7 +4167,7 @@ "rc-checkbox": ["rc-checkbox@3.5.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.3.2", "rc-util": "^5.25.2" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-aOAQc3E98HteIIsSqm6Xk2FPKIER6+5vyEFMZfo73TqM+VVAIqOkHoPjgKLqSNtVLWScoaM7vY2ZrGEheI79yg=="], - "rc-collapse": ["rc-collapse@3.9.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA=="], + "rc-collapse": ["rc-collapse@4.0.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-SwoOByE39/3oIokDs/BnkqI+ltwirZbP8HZdq1/3SkPSBi7xDdvWHTp7cpNI9ullozkR6mwTWQi6/E/9huQVrA=="], "rc-dialog": ["rc-dialog@9.6.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "@rc-component/portal": "^1.0.0-8", "classnames": "^2.2.6", "rc-motion": "^2.3.0", "rc-util": "^5.21.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-ApoVi9Z8PaCQg6FsUzS8yvBEQy0ZL2PkuvAgrmohPkN3okps5WZ5WQWPc1RNuiOKaAYv8B97ACdsFU5LizzCqg=="], @@ -5478,8 +5481,6 @@ "@lobehub/ui/lucide-react": ["lucide-react@0.543.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-fpVfuOQO0V3HBaOA1stIiP/A2fPCXHIleRZL16Mx3HmjTYwNSbimhnFBygs2CAfU1geexMX5ItUcWBGUaqw5CA=="], - "@lobehub/ui/rc-collapse": ["rc-collapse@4.0.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-SwoOByE39/3oIokDs/BnkqI+ltwirZbP8HZdq1/3SkPSBi7xDdvWHTp7cpNI9ullozkR6mwTWQi6/E/9huQVrA=="], - "@lobehub/ui/url-join": ["url-join@5.0.0", "", {}, "sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA=="], "@lobehub/ui/uuid": ["uuid@11.1.0", "", { "bin": { "uuid": "dist/esm/bin/uuid" } }, "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A=="], @@ -5898,6 +5899,8 @@ "antd/@babel/runtime": ["@babel/runtime@7.28.4", "", {}, "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ=="], + "antd/rc-collapse": ["rc-collapse@3.9.0", "", { "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.3.4", "rc-util": "^5.27.0" }, "peerDependencies": { "react": ">=16.9.0", "react-dom": ">=16.9.0" } }, "sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA=="], + "antd/scroll-into-view-if-needed": ["scroll-into-view-if-needed@3.1.0", "", { "dependencies": { "compute-scroll-into-view": "^3.0.2" } }, "sha512-49oNpRjWRvnU8NyGVmUaYG4jtTkNonFZI86MmGRDqBphEK2EXT9gdEUoQPZhuBM8yWHxCWbobltqYO5M4XrUvQ=="], "antd-style/@babel/runtime": ["@babel/runtime@7.28.4", "", {}, "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ=="], @@ -6876,8 +6879,6 @@ "@lobehub/ui/framer-motion/motion-utils": ["motion-utils@12.23.6", "", {}, "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ=="], - "@lobehub/ui/rc-collapse/@babel/runtime": ["@babel/runtime@7.28.4", "", {}, "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ=="], - "@mastra/core/pino-pretty/pino-abstract-transport": ["pino-abstract-transport@2.0.0", "", { "dependencies": { "split2": "^4.0.0" } }, "sha512-F63x5tizV6WCh4R6RHyi2Ml+M70DNRXt/+HANowMflpgGFMAym/VKm6G7ZOQRjqN7XbGxK1Lg9t6ZrtzOaivMw=="], "@mastra/core/pino-pretty/sonic-boom": ["sonic-boom@4.2.0", "", { "dependencies": { "atomic-sleep": "^1.0.0" } }, "sha512-INb7TM37/mAcsGmc9hyyI6+QR3rR1zVRu36B0NeGXKnOOLiZOfER5SA+N7X7k3yUYRzLWafduTDvJAfDswwEww=="], diff --git a/docs/ENHANCED_AUTHENTICATION.md b/docs/ENHANCED_AUTHENTICATION.md new file mode 100644 index 000000000..282fbeb73 --- /dev/null +++ b/docs/ENHANCED_AUTHENTICATION.md @@ -0,0 +1,263 @@ +# Enhanced Authentication System for AI Providers + +This document describes the enhanced authentication system implemented in Blinko to support flexible authentication methods for custom AI providers, including embedding models. + +## Overview + +The enhanced authentication system allows users to configure custom AI providers with various authentication methods, addressing the limitations of the previous system that only supported basic API key authentication. + +## Supported Authentication Types + +### 1. Bearer Token Authentication +- **Type**: `bearer` +- **Format**: `Authorization: Bearer ` +- **Use Case**: Standard OAuth2/JWT tokens +- **Example**: OpenAI, Anthropic Claude, Zhipu AI + +### 2. API Key Authentication +- **Type**: `api-key` +- **Format**: Custom header with API key +- **Use Case**: Providers using non-standard header names +- **Examples**: + - `X-API-Key: ` + - `api-key: ` + - `Authorization: ` + +### 3. Custom Headers +- **Type**: `custom` +- **Format**: Full control over HTTP headers +- **Use Case**: Complex authentication requirements +- **Examples**: + - Multiple headers + - Custom authentication schemes + - Special headers for specific providers + +### 4. No Authentication +- **Type**: `none` +- **Use Case**: Local or public APIs +- **Example**: Ollama, local development servers + +## Configuration Examples + +### Zhipu AI (智谱AI) Example +```bash +# Using Bearer Token authentication +curl --request POST \ + --url https://open.bigmodel.cn/api/paas/v4/embeddings \ + --header 'Authorization: Bearer ' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "embedding-3", + "input": "你好,今天天气怎么样.", + "dimensions": 2 + }' +``` + +**Configuration in Blinko:** +- **Authentication Type**: Bearer Token +- **API Key**: Your Zhipu AI token +- **Header Name**: Authorization (default) +- **Base URL**: `https://open.bigmodel.cn/api/paas/v4` + +### Moonshot AI (月之暗面) Example +```bash +# Using Bearer Token authentication +curl --request POST \ + --url https://api.moonshot.cn/v1/embeddings \ + --header 'Authorization: Bearer ' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "moonshot-embedding", + "input": "Hello, world!" + }' +``` + +**Configuration in Blinko:** +- **Authentication Type**: Bearer Token +- **API Key**: Your Moonshot AI key +- **Header Name**: Authorization (default) +- **Base URL**: `https://api.moonshot.cn/v1` + +### Custom Provider with Multiple Headers +Some providers might require multiple headers for authentication: + +```bash +curl --request POST \ + --url https://api.example.com/v1/embeddings \ + --header 'X-API-Key: ' \ + --header 'X-Client-ID: ' \ + --header 'Content-Type: application/json' \ + --data '{...}' +``` + +**Configuration in Blinko:** +- **Authentication Type**: Custom Headers +- **Custom Headers**: + - `X-API-Key`: Your API key + - `X-Client-ID`: Your client ID + +## Implementation Details + +### Backend Changes + +1. **Authentication Types** (`/server/aiServer/authTypes.ts`): + - Defines `AuthenticationConfig` interface + - Provides helper functions for building headers + - Includes validation logic + +2. **Enhanced Providers**: + - `EmbeddingProvider`: Supports custom authentication for embedding models + - `LLMProvider`: Supports custom authentication for language models + +3. **API Updates**: + - Test connection functionality uses enhanced authentication + - Provider configuration stores auth config in JSON field + +### Frontend Changes + +1. **Authentication Configuration Component** (`/app/src/components/BlinkoSettings/AiSetting/AuthenticationConfig.tsx`): + - UI for selecting authentication type + - Dynamic form fields based on selected type + - Example configurations for common providers + +2. **Enhanced Provider Dialog** (`/app/src/components/BlinkoSettings/AiSetting/ProviderDialogContent.tsx`): + - Integrated authentication configuration + - Backward compatibility with legacy API key field + +3. **Store Updates** (`/app/src/store/aiSettingStore.tsx`): + - Enhanced `fetchProviderModels` function + - Support for flexible authentication when fetching model lists + +## Usage Instructions + +### Adding a Custom Provider with Enhanced Authentication + +1. **Navigate to AI Settings**: + - Go to Settings → AI Settings + - Click "Add Provider" + +2. **Select Custom Configuration**: + - Choose "Custom Configuration" option + - Enter basic provider information (name, base URL) + +3. **Configure Authentication**: + - Scroll to "Enhanced Authentication" section + - Select appropriate authentication type + - Fill in required fields: + - **Bearer Token**: API key and optional header name + - **API Key**: API key and header name + - **Custom Headers**: Add multiple headers as needed + - **No Auth**: No additional configuration needed + +4. **Test Connection**: + - Add a model with the correct capabilities + - Use "Test Connection" to verify configuration + - Fetch model list if available + +5. **Save Configuration**: + - Click "Create" to save the provider + - The provider will be available for use in AI features + +### Backward Compatibility + +The enhanced authentication system maintains backward compatibility with existing providers: + +- Existing providers continue to work with legacy API key configuration +- The system falls back to legacy authentication when enhanced config is not available +- Migration to enhanced authentication is optional but recommended + +## Technical Architecture + +### Authentication Flow + +1. **Configuration Storage**: + - Authentication config stored in `config.authConfig` field + - JSON format allows flexible schema evolution + +2. **Header Generation**: + - `buildAuthHeaders()` function creates headers from config + - Supports all authentication types with proper formatting + +3. **Request Interception**: + - Custom fetch functions inject authentication headers + - Maintains compatibility with AI SDK requirements + +4. **Error Handling**: + - Graceful fallback to legacy authentication + - Clear error messages for misconfiguration + +### Security Considerations + +- API keys and tokens are stored securely in database +- No sensitive information logged in console output +- Custom headers allow for any authentication scheme +- Input validation prevents header injection + +## Troubleshooting + +### Common Issues and Solutions + +1. **Authentication Failed**: + - Verify API key/token is correct + - Check header name matches provider requirements + - Ensure base URL is correct (no trailing slashes) + +2. **Model List Fetch Failed**: + - Check if provider supports `/models` endpoint + - Verify authentication headers are correct + - Try manual test with curl command + +3. **Test Connection Failed**: + - Ensure model capabilities match actual provider capabilities + - Check network connectivity to provider endpoint + - Review provider documentation for correct format + +### Debug Mode + +Enable debug logging to see actual HTTP requests: + +```typescript +// In provider configuration +{ + "auth": { + "type": "custom", + "customHeaders": { + "Authorization": "Bearer ", + "X-Debug": "true" + }, + "options": { + "includeContentType": true + } + } +} +``` + +## Future Enhancements + +Planned improvements to the authentication system: + +1. **OAuth2 Support**: Full OAuth2 flow implementation +2. **Certificate Authentication**: Support for client certificates +3. **Rate Limiting**: Built-in rate limiting for custom providers +4. **Retry Logic**: Automatic retry with exponential backoff +5. **Authentication Testing**: Dedicated authentication testing endpoint + +## Contributing + +When adding support for new providers: + +1. Add default authentication configuration to `DEFAULT_AUTH_CONFIGS` +2. Update provider templates in frontend constants +3. Add example configurations to documentation +4. Test with actual provider endpoints +5. Update translation files for new UI elements + +## Support + +For issues related to the enhanced authentication system: + +1. Check this documentation first +2. Review provider's API documentation +3. Test configuration with curl commands +4. Enable debug logging for detailed error information +5. Report issues with provider details and configuration \ No newline at end of file diff --git a/package.json b/package.json index fbf9a0670..4a3c70ac9 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "blinko-monorepo", - "version": "1.6.2", + "version": "1.6.4", "private": true, "packageManager": "bun@1.2.8", "workspaces": [ @@ -40,6 +40,9 @@ "node": ">=20.0.0" }, "dependencies": { + "@dnd-kit/core": "^6.3.1", + "@dnd-kit/sortable": "^10.0.0", + "@dnd-kit/utilities": "^3.2.2", "@tauri-apps/plugin-process": "^2.2.1", "@tauri-apps/plugin-updater": "^2.7.1", "dotenv": "^16.5.0", diff --git a/prisma/migrations/20250930000000_add_sort_order_to_notes/migration.sql b/prisma/migrations/20250930000000_add_sort_order_to_notes/migration.sql new file mode 100644 index 000000000..f1749584c --- /dev/null +++ b/prisma/migrations/20250930000000_add_sort_order_to_notes/migration.sql @@ -0,0 +1,2 @@ +-- AlterTable +ALTER TABLE "notes" ADD COLUMN "sortOrder" INTEGER NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/prisma/schema.prisma b/prisma/schema.prisma index 52bfd41a3..38bac3f85 100644 --- a/prisma/schema.prisma +++ b/prisma/schema.prisma @@ -82,6 +82,7 @@ model notes { shareViewCount Int? @default(0) metadata Json? @db.Json accountId Int? + sortOrder Int @default(0) createdAt DateTime @default(now()) @db.Timestamptz(6) updatedAt DateTime @updatedAt @db.Timestamptz(6) attachments attachments[] diff --git a/server/aiServer/authTypes.ts b/server/aiServer/authTypes.ts new file mode 100644 index 000000000..fb5f9043a --- /dev/null +++ b/server/aiServer/authTypes.ts @@ -0,0 +1,145 @@ +/** + * Authentication configuration types for AI providers + * Supports multiple authentication methods including Bearer tokens, API keys, custom headers, and no authentication + */ + +export type AuthenticationType = 'bearer' | 'api-key' | 'custom' | 'none'; + +export interface AuthenticationConfig { + /** Type of authentication to use */ + type: AuthenticationType; + + /** API key value (used for bearer and api-key types) */ + apiKey?: string; + + /** Custom header name for api-key type (e.g., 'X-API-Key', 'api-key', 'Authorization') */ + headerName?: string; + + /** Custom headers object for custom authentication type */ + customHeaders?: { [key: string]: string }; + + /** Additional configuration options */ + options?: { + /** Whether to include content-type header */ + includeContentType?: boolean; + /** Custom user agent */ + userAgent?: string; + /** Additional query parameters */ + queryParams?: { [key: string]: string }; + }; +} + +export interface EnhancedProviderConfig { + /** Enhanced authentication configuration */ + auth?: AuthenticationConfig; + + /** Timeout configuration in milliseconds */ + timeout?: number; + + /** Retry configuration */ + retry?: { + attempts: number; + delay: number; + }; + + /** Provider-specific configuration */ + providerConfig?: any; +} + +/** + * Default authentication configurations for common providers + */ +export const DEFAULT_AUTH_CONFIGS: { [key: string]: AuthenticationConfig } = { + openai: { + type: 'bearer', + headerName: 'Authorization' + }, + anthropic: { + type: 'api-key', + headerName: 'x-api-key' + }, + azure: { + type: 'api-key', + headerName: 'api-key' + }, + google: { + type: 'api-key', + headerName: 'x-goog-api-key' + }, + voyageai: { + type: 'api-key', + headerName: 'Authorization' + }, + ollama: { + type: 'none' + }, + custom: { + type: 'bearer', + headerName: 'Authorization' + } +}; + +/** + * Validates authentication configuration + */ +export function validateAuthConfig(config: AuthenticationConfig): boolean { + if (!config || !config.type) { + return false; + } + + switch (config.type) { + case 'bearer': + case 'api-key': + return !!config.apiKey; + case 'custom': + return config.customHeaders && Object.keys(config.customHeaders).length > 0; + case 'none': + return true; + default: + return false; + } +} + +/** + * Builds headers object from authentication configuration + */ +export function buildAuthHeaders(config: AuthenticationConfig): { [key: string]: string } { + const headers: { [key: string]: string } = {}; + + if (!config || config.type === 'none') { + return headers; + } + + switch (config.type) { + case 'bearer': + if (config.apiKey) { + const headerName = config.headerName || 'Authorization'; + headers[headerName] = `Bearer ${config.apiKey}`; + } + break; + + case 'api-key': + if (config.apiKey && config.headerName) { + headers[config.headerName] = config.apiKey; + } + break; + + case 'custom': + if (config.customHeaders) { + Object.assign(headers, config.customHeaders); + } + break; + } + + // Add content-type if specified + if (config.options?.includeContentType) { + headers['Content-Type'] = 'application/json'; + } + + // Add user agent if specified + if (config.options?.userAgent) { + headers['User-Agent'] = config.options.userAgent; + } + + return headers; +} \ No newline at end of file diff --git a/server/aiServer/providers/EmbeddingProvider.ts b/server/aiServer/providers/EmbeddingProvider.ts index e4fad5962..29d7f27ec 100644 --- a/server/aiServer/providers/EmbeddingProvider.ts +++ b/server/aiServer/providers/EmbeddingProvider.ts @@ -4,6 +4,7 @@ import { createAzure } from '@ai-sdk/azure'; import { createVoyage } from 'voyage-ai-provider'; import { createOllama } from 'ollama-ai-provider'; import { BaseProvider } from './BaseProvider'; +import { AuthenticationConfig, buildAuthHeaders } from '../authTypes'; interface EmbeddingConfig { provider: string; @@ -11,6 +12,7 @@ interface EmbeddingConfig { baseURL?: any; modelKey: string; apiVersion?: any; + auth?: AuthenticationConfig; } export class EmbeddingProvider extends BaseProvider { @@ -18,11 +20,15 @@ export class EmbeddingProvider extends BaseProvider { async getEmbeddingModel(config: EmbeddingConfig): Promise| null> { await this.initializeFetch(); + // Use enhanced authentication if provided + const apiKey = config.auth?.apiKey || config.apiKey; + const baseURL = config.baseURL; + switch (config.provider.toLowerCase()) { case 'openai': return createOpenAI({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, fetch: this.proxiedFetch }).textEmbeddingModel(config.modelKey); @@ -30,30 +36,58 @@ export class EmbeddingProvider extends BaseProvider { return null; case 'azure': return createAzure({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, apiVersion: config.apiVersion || undefined, fetch: this.proxiedFetch }).textEmbeddingModel(config.modelKey); case 'voyageai': return createVoyage({ - apiKey: config.apiKey, + apiKey: apiKey, fetch: this.proxiedFetch }).textEmbeddingModel(config.modelKey); case 'ollama': return createOllama({ - baseURL: config.baseURL?.trim() || undefined, + baseURL: baseURL?.trim() || undefined, fetch: this.proxiedFetch }).textEmbeddingModel(config.modelKey); case 'custom': default: - // Default to OpenAI-compatible API + // Enhanced custom provider with flexible authentication + if (config.auth && config.auth.type !== 'none') { + // Create custom fetch with enhanced authentication + const customFetch = async (url: string, options: RequestInit = {}) => { + const authHeaders = buildAuthHeaders(config.auth!); + const headers = { + ...options.headers, + ...authHeaders + }; + + const fetchOptions = { + ...options, + headers + }; + + return this.proxiedFetch ? + this.proxiedFetch(url, fetchOptions) : + fetch(url, fetchOptions); + }; + + // Create OpenAI-compatible provider with custom authentication + return createOpenAI({ + apiKey: apiKey || 'dummy-key', // Required by AI SDK but won't be used in headers + baseURL: baseURL || undefined, + fetch: customFetch + }).textEmbeddingModel(config.modelKey); + } + + // Fallback to OpenAI-compatible API with basic auth return createOpenAI({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, fetch: this.proxiedFetch }).textEmbeddingModel(config.modelKey); } diff --git a/server/aiServer/providers/LLMProvider.ts b/server/aiServer/providers/LLMProvider.ts index 054fc9f90..326ef14c2 100644 --- a/server/aiServer/providers/LLMProvider.ts +++ b/server/aiServer/providers/LLMProvider.ts @@ -8,6 +8,7 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import { createXai } from '@ai-sdk/xai'; import { createAzure } from '@ai-sdk/azure'; import { BaseProvider } from './BaseProvider'; +import { AuthenticationConfig, buildAuthHeaders } from '../authTypes'; interface LLMConfig { provider: string; @@ -15,72 +16,107 @@ interface LLMConfig { baseURL?: any; modelKey: string; apiVersion?: any; + auth?: AuthenticationConfig; } export class LLMProvider extends BaseProvider { async getLanguageModel(config: LLMConfig): Promise { await this.ensureInitialized(); + + // Use enhanced authentication if provided + const apiKey = config.auth?.apiKey || config.apiKey; + const baseURL = config.baseURL; + switch (config.provider.toLowerCase()) { case 'openai': return createOpenAI({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'anthropic': return createAnthropic({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'gemini': case 'google': return createGoogleGenerativeAI({ - apiKey: config.apiKey, + apiKey: apiKey, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'ollama': return createOllama({ - baseURL: config.baseURL?.trim().replace(/\/api$/, '') + '/api' || undefined, + baseURL: baseURL?.trim().replace(/\/api$/, '') + '/api' || undefined, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'deepseek': return createDeepSeek({ - apiKey: config.apiKey, + apiKey: apiKey, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'openrouter': return createOpenRouter({ - apiKey: config.apiKey, + apiKey: apiKey, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'grok': case 'xai': return createXai({ - apiKey: config.apiKey, + apiKey: apiKey, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'azureopenai': case 'azure': return createAzure({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, apiVersion: config.apiVersion || undefined, fetch: this.proxiedFetch }).languageModel(config.modelKey); case 'custom': default: + // Enhanced custom provider with flexible authentication + if (config.auth && config.auth.type !== 'none') { + // Create custom fetch with enhanced authentication + const customFetch = async (url: string, options: RequestInit = {}) => { + const authHeaders = buildAuthHeaders(config.auth!); + const headers = { + ...options.headers, + ...authHeaders + }; + + const fetchOptions = { + ...options, + headers + }; + + return this.proxiedFetch ? + this.proxiedFetch(url, fetchOptions) : + fetch(url, fetchOptions); + }; + + // Create OpenAI-compatible provider with custom authentication + return createOpenAI({ + apiKey: apiKey || 'dummy-key', // Required by AI SDK but won't be used in headers + baseURL: baseURL || undefined, + fetch: customFetch + }).languageModel(config.modelKey); + } + + // Fallback to OpenAI-compatible API with basic auth return createOpenAI({ - apiKey: config.apiKey, - baseURL: config.baseURL || undefined, + apiKey: apiKey, + baseURL: baseURL || undefined, fetch: this.proxiedFetch }).languageModel(config.modelKey); } diff --git a/server/routerExpress/file/upload.ts b/server/routerExpress/file/upload.ts index 7f9bd48c3..98ae6b582 100644 --- a/server/routerExpress/file/upload.ts +++ b/server/routerExpress/file/upload.ts @@ -67,6 +67,9 @@ router.options('/', cors({ */ router.post('/', async (req, res) => { try { + req.setTimeout(0); // 0 = no timeout + res.setTimeout(0); // 0 = no timeout + const token = await getTokenFromRequest(req); if (!token) { return res.status(401).json({ error: "Unauthorized" }); @@ -77,7 +80,9 @@ router.post('/', async (req, res) => { return res.status(400).json({ error: "Content type must be multipart/form-data" }); } - const bb = busboy({ headers: req.headers }); + const bb = busboy({ + headers: req.headers + }); let fileInfo: { stream: PassThrough | null, diff --git a/server/routerTrpc/ai.ts b/server/routerTrpc/ai.ts index 5389a0cc1..5fd7be399 100644 --- a/server/routerTrpc/ai.ts +++ b/server/routerTrpc/ai.ts @@ -298,7 +298,8 @@ export const aiRouter = router({ apiKey: provider.apiKey, baseURL: provider.baseURL, modelKey, - apiVersion: (provider.config as any)?.apiVersion + apiVersion: (provider.config as any)?.apiVersion, + auth: (provider.config as any)?.authConfig // Use enhanced auth config if available }); // Test simple generation @@ -323,7 +324,8 @@ export const aiRouter = router({ apiKey: provider.apiKey, baseURL: provider.baseURL, modelKey, - apiVersion: (provider.config as any)?.apiVersion + apiVersion: (provider.config as any)?.apiVersion, + auth: (provider.config as any)?.authConfig // Use enhanced auth config if available }); const { embed } = await import('ai'); diff --git a/server/routerTrpc/note.ts b/server/routerTrpc/note.ts index f1b59ae55..2a8cac5b6 100644 --- a/server/routerTrpc/note.ts +++ b/server/routerTrpc/note.ts @@ -195,7 +195,7 @@ export const noteRouter = router({ const notes = await prisma.notes.findMany({ where, - orderBy: [{ isTop: 'desc' }, timeOrderBy], + orderBy: [{ isTop: 'desc' }, { sortOrder: 'asc' }, timeOrderBy], skip: (page - 1) * size, take: size, include: { @@ -1766,6 +1766,36 @@ export const noteRouter = router({ internalShares: undefined, // Remove this field from the response }))); }), + updateNotesOrder: authProcedure + .meta({ openapi: { method: 'POST', path: '/v1/note/update-order', summary: 'Update notes order', protect: true, tags: ['Note'] } }) + .input( + z.object({ + updates: z.array( + z.object({ + id: z.number(), + sortOrder: z.number(), + }), + ), + }), + ) + .output(z.object({ success: z.boolean() })) + .mutation(async function ({ input, ctx }) { + const { updates } = input; + + await Promise.all( + updates.map(({ id, sortOrder }) => + prisma.notes.updateMany({ + where: { + id, + accountId: Number(ctx.id), + }, + data: { sortOrder }, + }), + ), + ); + + return { success: true }; + }), }); let insertNoteReference = async ({ fromNoteId, toNoteId, accountId }) => { diff --git a/shared/lib/prismaZodType.ts b/shared/lib/prismaZodType.ts index 51b543219..01b7b809d 100644 --- a/shared/lib/prismaZodType.ts +++ b/shared/lib/prismaZodType.ts @@ -78,6 +78,7 @@ export const notesSchema = z.object({ shareMaxView: z.number().nullable().optional(), shareViewCount: z.number().nullable().optional(), metadata: z.any(), + sortOrder: z.number().nullable().optional(), accountId: z.union([z.number().int(), z.null()]), createdAt: z.coerce.date(), updatedAt: z.coerce.date(),