@@ -49,8 +49,9 @@ def _v3_6_role_prefix(from_role, condition, role_start_token="", role_end_token=
49
49
role_end_token = "<|end_header_id|>" ),
50
50
bos = "<|begin_of_text|>" , # Llama 3 tokenizer needs manually specifing tokenizer
51
51
eot = "<|eot_id|>" ,
52
- inference_condition = "GPT4" ,
53
- message_prefix = "\n \n " )
52
+ inference_condition = "GPT4 Correct" ,
53
+ message_prefix = "\n \n " ),
54
+ hf_chat_template = "{% set loop_messages = messages %}{% for message in loop_messages %}{% if message['role'] in ['user', 'assistant'] %}{% set content = '<|start_header_id|>GPT4 Correct ' + message['role'].title() + '<|end_header_id|>\n \n ' + message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>GPT4 Correct Assistant<|end_header_id|>\n \n ' }}{% endif %}" ,
54
55
),
55
56
56
57
# OpenChat V3.2
@@ -83,7 +84,8 @@ def _v3_6_role_prefix(from_role, condition, role_start_token="", role_end_token=
83
84
conversation_template = partial (ConversationTemplate ,
84
85
role_prefix = _v3_2_role_prefix ,
85
86
eot = "<|end_of_turn|>" ,
86
- inference_condition = "GPT4 Correct" )
87
+ inference_condition = "GPT4 Correct" ),
88
+ hf_chat_template = "{{ bos_token }}{% for message in messages %}{{ 'GPT4 Correct ' + message['role'].title() + ': ' + message['content'] + '<|end_of_turn|>'}}{% endfor %}{% if add_generation_prompt %}{{ 'GPT4 Correct Assistant:' }}{% endif %}"
87
89
),
88
90
89
91
"openchat_v3.2_gemma_new" : ModelConfig (
@@ -100,7 +102,8 @@ def _v3_6_role_prefix(from_role, condition, role_start_token="", role_end_token=
100
102
conversation_template = partial (ConversationTemplate ,
101
103
role_prefix = _v3_2_role_prefix ,
102
104
eot = "<end_of_turn>" ,
103
- inference_condition = "GPT4 Correct" )
105
+ inference_condition = "GPT4 Correct" ),
106
+ hf_chat_template = "{{ bos_token }}{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n ' + message['content'] | trim + '<end_of_turn>\n ' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n '}}{% endif %}"
104
107
),
105
108
106
109
### Other models
0 commit comments