From b3a6018bbf792e42f06b15cc83350fd8c556f2e2 Mon Sep 17 00:00:00 2001 From: Judd Date: Wed, 19 Feb 2025 14:43:42 +0800 Subject: [PATCH 1/2] fix building with MSVC + SDL2 --- examples/command/command.cpp | 1 + examples/lsp/lsp.cpp | 1 + examples/server/server.cpp | 1 + examples/stream/stream.cpp | 5 ++++- examples/talk-llama/talk-llama.cpp | 1 + examples/wchess/libwchess/WChess.cpp | 1 + 6 files changed, 9 insertions(+), 1 deletion(-) diff --git a/examples/command/command.cpp b/examples/command/command.cpp index 11ed9ed6bd3..49f4034211e 100644 --- a/examples/command/command.cpp +++ b/examples/command/command.cpp @@ -21,6 +21,7 @@ #include #include #include +#include // command-line parameters struct whisper_params { diff --git a/examples/lsp/lsp.cpp b/examples/lsp/lsp.cpp index 803cd6d55da..5d65aced33b 100644 --- a/examples/lsp/lsp.cpp +++ b/examples/lsp/lsp.cpp @@ -11,6 +11,7 @@ #include #include #include +#include using json = nlohmann::json; diff --git a/examples/server/server.cpp b/examples/server/server.cpp index beef57dde0a..21ae2594777 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -12,6 +12,7 @@ #include #include #include +#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data diff --git a/examples/stream/stream.cpp b/examples/stream/stream.cpp index 190f68a2c3b..5f7387f78fe 100644 --- a/examples/stream/stream.cpp +++ b/examples/stream/stream.cpp @@ -12,7 +12,7 @@ #include #include #include - +#include // command-line parameters struct whisper_params { @@ -157,6 +157,7 @@ int main(int argc, char ** argv) { cparams.use_gpu = params.use_gpu; cparams.flash_attn = params.flash_attn; + fprintf(stderr, "whisper_init_from_file_with_params ...\n"); struct whisper_context * ctx = whisper_init_from_file_with_params(params.model.c_str(), cparams); std::vector pcmf32 (n_samples_30s, 0.0f); @@ -166,6 +167,8 @@ int main(int argc, char ** argv) { std::vector prompt_tokens; // print some info about the processing + fprintf(stderr, "whisper_init_from_file_with_params ok\n"); + { fprintf(stderr, "\n"); if (!whisper_is_multilingual(ctx)) { diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index dcdaec487cb..51658700ce5 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -15,6 +15,7 @@ #include #include #include +#include static std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) { const llama_model * model = llama_get_model(ctx); diff --git a/examples/wchess/libwchess/WChess.cpp b/examples/wchess/libwchess/WChess.cpp index d9f066962ad..5da0345a610 100644 --- a/examples/wchess/libwchess/WChess.cpp +++ b/examples/wchess/libwchess/WChess.cpp @@ -3,6 +3,7 @@ #include "grammar-parser.h" #include "common.h" #include +#include WChess::WChess(whisper_context * ctx, const whisper_full_params & wparams, From 00ddb10fe256fd23c502ddf00c24a09d202e0562 Mon Sep 17 00:00:00 2001 From: Judd Date: Wed, 19 Feb 2025 16:40:51 +0800 Subject: [PATCH 2/2] select utf8 codepage on windows --- examples/command/command.cpp | 9 +++++++++ examples/stream/stream.cpp | 9 +++++++++ examples/talk-llama/talk-llama.cpp | 9 +++++++++ 3 files changed, 27 insertions(+) diff --git a/examples/command/command.cpp b/examples/command/command.cpp index 49f4034211e..49f8f755bb4 100644 --- a/examples/command/command.cpp +++ b/examples/command/command.cpp @@ -23,6 +23,11 @@ #include #include +#if defined(_WIN32) +#define NOMINMAX +#include +#endif + // command-line parameters struct whisper_params { int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); @@ -680,6 +685,10 @@ static int process_general_transcription(struct whisper_context * ctx, audio_asy } int main(int argc, char ** argv) { +#if defined(_WIN32) + SetConsoleOutputCP(CP_UTF8); +#endif + whisper_params params; if (whisper_params_parse(argc, argv, params) == false) { diff --git a/examples/stream/stream.cpp b/examples/stream/stream.cpp index 5f7387f78fe..bd7e05c1d17 100644 --- a/examples/stream/stream.cpp +++ b/examples/stream/stream.cpp @@ -14,6 +14,11 @@ #include #include +#if defined(_WIN32) +#define NOMINMAX +#include +#endif + // command-line parameters struct whisper_params { int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); @@ -113,6 +118,10 @@ void whisper_print_usage(int /*argc*/, char ** argv, const whisper_params & para } int main(int argc, char ** argv) { +#if defined(_WIN32) + SetConsoleOutputCP(CP_UTF8); +#endif + whisper_params params; if (whisper_params_parse(argc, argv, params) == false) { diff --git a/examples/talk-llama/talk-llama.cpp b/examples/talk-llama/talk-llama.cpp index 51658700ce5..d40667bb332 100644 --- a/examples/talk-llama/talk-llama.cpp +++ b/examples/talk-llama/talk-llama.cpp @@ -17,6 +17,11 @@ #include #include +#if defined(_WIN32) +#define NOMINMAX +#include +#endif + static std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) { const llama_model * model = llama_get_model(ctx); const llama_vocab * vocab = llama_model_get_vocab(model); @@ -273,6 +278,10 @@ The transcript only includes text, it does not include markup like HTML and Mark {0}{4})"; int main(int argc, char ** argv) { +#if defined(_WIN32) + SetConsoleOutputCP(CP_UTF8); +#endif + whisper_params params; if (whisper_params_parse(argc, argv, params) == false) {