Skip to content

Commit

Permalink
Introduce --special flag
Browse files Browse the repository at this point in the history
This flag may be used to print special tokens on the CLI.
  • Loading branch information
jart committed Jun 29, 2024
1 parent 7fd9101 commit 72fb8ca
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 7 deletions.
8 changes: 4 additions & 4 deletions llama.cpp/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -843,8 +843,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.interactive = true;
return true;
}
if (arg == "--interactive-specials") {
params.interactive_specials = true;
if (arg == "-sp" || arg == "--special") {
params.special = true;
return true;
}
if (arg == "--embedding") {
Expand Down Expand Up @@ -1363,8 +1363,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" -h, --help show this help message and exit\n");
printf(" --version show version and build info\n");
printf(" -i, --interactive run in interactive mode\n");
printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
printf(" --interactive-first run in interactive mode and wait for input right away\n");
printf(" -sp, --special special tokens output enabled\n");
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
Expand Down Expand Up @@ -2620,7 +2620,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
fprintf(stream, "interactive_specials: %s # default: false\n", params.interactive_specials ? "true" : "false");
fprintf(stream, "specials: %s # default: false\n", params.special ? "true" : "false");
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
Expand Down
2 changes: 1 addition & 1 deletion llama.cpp/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ struct gpt_params {
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs
bool interactive = false; // interactive mode
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
bool special = false; // enable special token output
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
bool prompt_cache_all = false; // save user input and generations to prompt cache
Expand Down
4 changes: 2 additions & 2 deletions llama.cpp/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -815,7 +815,7 @@ int main(int argc, char ** argv) {
if (input_echo && display) {
for (auto id : embd) {
// [jart] don't print special tokens until its design stabilizes
const std::string token_str = llama_token_to_piece(ctx, id, false);
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
printf("%s", token_str.c_str());

if (embd.size() > 1) {
Expand Down Expand Up @@ -958,7 +958,7 @@ int main(int argc, char ** argv) {
}

const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.interactive_specials);
const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.conversation);
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);

LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
Expand Down

0 comments on commit 72fb8ca

Please sign in to comment.