Skip to content

Commit 1cb9215

Browse files
committed
removing random prompt generation
1 parent e95e64b commit 1cb9215

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

chat.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -815,9 +815,9 @@ int main(int argc, char ** argv) {
815815
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
816816

817817
std::mt19937 rng(params.seed);
818-
if (params.prompt.empty()) {
819-
params.prompt = gpt_random_prompt(rng);
820-
}
818+
// if (params.prompt.empty()) {
819+
// params.prompt = gpt_random_prompt(rng);
820+
// }
821821

822822
// params.prompt = R"(// this function checks if the number n is prime
823823
//bool is_prime(int n) {)";
@@ -853,7 +853,7 @@ int main(int argc, char ** argv) {
853853
std::vector<float> logits;
854854

855855
// Add a space in front of the first character to match OG llama tokenizer behavior
856-
params.prompt.insert(0, 1, ' ');
856+
// params.prompt.insert(0, 1, ' ');
857857
// tokenize the prompt
858858
std::vector<gpt_vocab::id> embd_inp;// = ::llama_tokenize(vocab, params.prompt, true);
859859

0 commit comments

Comments
 (0)