Fix prompt templating for lmstudio (#10)
Browse files- fix chat template for lmstudio jinja impl with limited syntax (3f4458055ad498d4faac3585243b8bae0c17fc3d)
- tokenizer_config.json +1 -1
tokenizer_config.json
CHANGED
|
@@ -194,7 +194,7 @@
|
|
| 194 |
"<|_unuse_missing_100275|>"
|
| 195 |
],
|
| 196 |
"bos_token": "<|endoftext|>",
|
| 197 |
-
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set merged_content = messages[0]['content'] + ' ' + messages[1]['content'] %}{% set merged_messages = [{'role': messages[1]['role'], 'content': merged_content}] + messages[2:] %}{% else %}{% set merged_messages = messages %}{% endif %}{% for message in merged_messages %}{{('human' if message['role'] == 'user' else message['role']) + ': ' + (message['content'].split('<reasoning>')
|
| 198 |
"clean_up_tokenization_spaces": true,
|
| 199 |
"eos_token": "<|endoftext|>",
|
| 200 |
"model_max_length": 32768,
|
|
|
|
| 194 |
"<|_unuse_missing_100275|>"
|
| 195 |
],
|
| 196 |
"bos_token": "<|endoftext|>",
|
| 197 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set merged_content = messages[0]['content'] + ' ' + messages[1]['content'] %}{% set merged_messages = [{'role': messages[1]['role'], 'content': merged_content}] + messages[2:] %}{% else %}{% set merged_messages = messages %}{% endif %}{% for message in merged_messages %}{{('human' if message['role'] == 'user' else message['role']) + ': ' + (message['content'].split('<reasoning>')|first + message['content'].split('</reasoning>')|last if message['role'] == 'assistant' and '</reasoning>' in message['content'] else message['content'])}}{% if (loop.last and add_generation_prompt and merged_messages[-1]['role'] != 'assistant') or not loop.last %}{{ ' <sep> ' }}{% endif %}{% endfor %}{% if add_generation_prompt and merged_messages[-1]['role'] != 'assistant' %}{{ 'assistant:' }}{% endif %}",
|
| 198 |
"clean_up_tokenization_spaces": true,
|
| 199 |
"eos_token": "<|endoftext|>",
|
| 200 |
"model_max_length": 32768,
|