Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
e449135
add template for chat only
khai-meetkai Feb 20, 2025
5f454f3
add template for r1
khai-meetkai Feb 23, 2025
12f4e88
add template for r1-qwen-distilled
khai-meetkai Feb 25, 2025
09fcf04
add template for r1_distilled_llama
khai-meetkai Feb 25, 2025
0d42f10
fix template
khai-meetkai Feb 26, 2025
412fa60
Merge branch 'main' into r1_distill_qwen
khai-meetkai Feb 28, 2025
a77da3b
add prompt template for qwen2.5 text only
khai-meetkai Mar 3, 2025
76bf666
remove post_process
khai-meetkai Mar 3, 2025
0de866a
fix r1_distilled_qwen
khai-meetkai Mar 4, 2025
39df7c2
update streaming for qwen template
khai-meetkai Mar 5, 2025
47454ae
remove saving state
khai-meetkai Mar 6, 2025
99fed6d
prefill <think>
khai-meetkai Mar 10, 2025
e5a2c8d
increase default max length
khai-meetkai Mar 11, 2025
3f3e57c
add gemma3 template
khai-meetkai Mar 13, 2025
1dd0ad2
add gemma3 example
khai-meetkai Mar 13, 2025
2f761be
fix template
khai-meetkai Mar 13, 2025
9a2737b
comment gemma3
khai-meetkai Mar 26, 2025
b24c4a4
change version of flash-attn
khai-meetkai Mar 26, 2025
7cb89c9
Merge branch 'main' into r1_distill_qwen
khai-meetkai Apr 2, 2025
8cd9a1b
update requirements for training
khai-meetkai Apr 3, 2025
1efe410
remve flash-attn from pyproject.toml
khai-meetkai Apr 3, 2025
c366fdc
remove prefill <think> in prompt
khai-meetkai Apr 4, 2025
13e9335
remove imports to avoid errors
khai-meetkai Apr 4, 2025
de90233
upgrade vllm version
khai-meetkai Apr 4, 2025
b88d55c
fix prompt template for gemma3
khai-meetkai Apr 6, 2025
9129d9b
set default top_p as 0.95
khai-meetkai Apr 8, 2025
28b2ac9
add cogito
khai-meetkai Apr 13, 2025
bfd97e4
add files for cogito
khai-meetkai Apr 13, 2025
77f26bc
fix format
khai-meetkai Apr 16, 2025
45f0537
remove unused global
khai-meetkai Apr 16, 2025
3067d1c
fix syntax
khai-meetkai Apr 16, 2025
b867cfb
fix: only test prompt templates that we added to the list, instead of…
khai-meetkai Apr 16, 2025
faca47a
change to public repo
khai-meetkai Apr 16, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion functionary/openai_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ class ChatCompletionRequest(BaseModel):
function_call: Optional[Union[str, Function]] = None
tool_choice: Optional[Union[str, Tool]] = None
temperature: Optional[float] = 0.6
top_p: Optional[float] = 1.0
top_p: Optional[float] = 0.95
n: Optional[int] = 1
max_tokens: Optional[int] = 4096
stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
Expand Down
15 changes: 13 additions & 2 deletions functionary/prompt_template/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,15 @@
from functionary.prompt_template.llava_prompt_template import LlavaLlama
from functionary.prompt_template.prompt_template_v1 import PromptTemplateV1
from functionary.prompt_template.prompt_template_v2 import PromptTemplateV2
from functionary.prompt_template.llama31_reasoning_prompt_template import Llama31ReasoningTemplate

from functionary.prompt_template.llama31_reasoning_prompt_template import (
Llama31ReasoningTemplate,
)
from functionary.prompt_template.r1_original_template import R1Template
from functionary.prompt_template.r1_distilled_qwen import R1DistilledQwen
from functionary.prompt_template.r1_distilled_llama import R1DistilledLlama
from functionary.prompt_template.qwen25_text_only_template import Qwen25TextOnlyPromptTemplate
from functionary.prompt_template.gemma3_prompt_template import Gemma3Template
from functionary.prompt_template.cogito_prompt_template import CogitoPromptTemplate

def get_available_prompt_template_versions() -> List[PromptTemplate]:
"""This function will get all the available prompt templates in the module.
Expand All @@ -30,6 +37,10 @@ def get_available_prompt_template_versions() -> List[PromptTemplate]:
# we don't use get_prompt_template or this will return the parent class
all_templates_obj.append(LlavaLlama.get_prompt_template())
all_templates_obj.append(Llama31ReasoningTemplate.get_prompt_template())
all_templates_obj.append(R1DistilledLlama.get_prompt_template())
all_templates_obj.append(R1DistilledQwen.get_prompt_template())
all_templates_obj.append(Gemma3Template.get_prompt_template())
all_templates_obj.append(CogitoPromptTemplate.get_prompt_template())
return all_templates_obj


Expand Down
3 changes: 1 addition & 2 deletions functionary/prompt_template/base_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,12 @@ def initialize_grammar_sampling_gen_state(self, tool_choice: Optional[Any]) -> D
"""
raise NotImplementedError

@abstractmethod
def get_additional_tokens(self) -> List[str]:
"""return list of added tokens if using this template
Returns:
List[str]: list of tokens, each token is a string
"""
raise NotImplementedError
return []

@abstractmethod
def get_stop_tokens_for_generation(self) -> List[str]:
Expand Down
17 changes: 17 additions & 0 deletions functionary/prompt_template/cogito_prompt_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from functionary.prompt_template.base_template import PromptTemplate
from functionary.prompt_template.qwen25_text_only_template import (
Qwen25TextOnlyPromptTemplate,
)
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
from functionary.prompt_template import prompt_utils
from functionary.openai_types import Function, Tool


class CogitoPromptTemplate(Qwen25TextOnlyPromptTemplate):
version = "cogito"

def get_chat_template_jinja(self) -> str:
path_prefix = "./functionary/prompt_template/jinja_templates/"
with open(f"{path_prefix}cogito.txt", "r") as f:
template = f.read()
return template
38 changes: 38 additions & 0 deletions functionary/prompt_template/gemma3_prompt_template.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import json
from typing import Any, Dict, List, Literal, Optional, Tuple, Union

from functionary.openai_types import Function, Tool
from functionary.prompt_template import prompt_utils
from functionary.prompt_template.qwen25_text_only_template import (
Qwen25TextOnlyPromptTemplate,
)


class Gemma3Template(Qwen25TextOnlyPromptTemplate):
version = "gemma3"

def get_additional_tokens(self) -> List[str]:
return []

def get_assistant_prefixes(self) -> List[str]:
return ["<start_of_turn>model\n"]

def get_stop_tokens_for_generation(self) -> List[str]:
return ["<end_of_turn>"]

def pre_process_messages_before_inference(self, messages: List[Dict]) -> List[Dict]:
"""Order the tool results by the order of tool call ids

Args:
messages (List[Dict]): List of messages

Returns:
List[Dict]: List of messages
"""
return prompt_utils.reorder_tool_messages_by_tool_call_ids(messages)

def get_chat_template_jinja(self) -> str:
path_prefix = "./functionary/prompt_template/jinja_templates/"
with open(f"{path_prefix}{self.version}.txt", "r") as f:
template = f.read()
return template
80 changes: 80 additions & 0 deletions functionary/prompt_template/jinja_templates/cogito.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
{# version=cogito #}{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages and messages[0]['role'] == 'system' %}
{{- 'Enable deep thinking subroutine.\n\n' + messages[0]['content'] }}
{%- else %}
{{- 'Enable deep thinking subroutine.' }}
{%- endif %}
{%- set has_reasoning = tools | selectattr("type", "equalto", "reasoning") | list | length > 0 -%}
{%- if has_reasoning -%}
{%- set tools = tools | rejectattr("type", "equalto", "reasoning") | list -%}
{%- endif -%}
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query." }}
{%- if has_reasoning %}
{{- " Please generate reasoning before deciding which tools to use." }}
{%- else %}
{{- "" }}
{%- endif %}
{{- "\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages and messages[0]['role'] == 'system' %}
{{- '<|im_start|>system\nEnable deep thinking subroutine\n\n' + messages[0]['content'] + '<|im_end|>\n' }}
{%- else %}
{{- '<|im_start|>system\nEnable deep thinking subroutine.<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- for message in messages %}
{%- if message.role == "user" %}
{%- if message['content'] -%}
{%- if message['content'] is string -%}
{{ '<|im_start|>user\n' + message['content'] }}
{%- else -%}
{{ '<|im_start|>user\n' }}
{%- for content in message['content'] -%}
{%- if content['type'] == 'text' -%}
{{ content['text'] }}
{%- else -%}
{{ '<|vision_start|><|image_pad|><|vision_end|>' }}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{{ '<|im_end|>\n' }}
{%- endif -%}
{%- elif (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- if message.content %}
{{- '\n' + message.content }}
{%- endif %}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '\n<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{{- tool_call.arguments }}
{{- '}\n</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}
64 changes: 64 additions & 0 deletions functionary/prompt_template/jinja_templates/gemma3.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
{# version=gemma3 #}{{- bos_token -}}{%- if tools %}
{{- '<start_of_turn>system\nYou are capable of executing available function(s) if required.' }}
{%- set has_reasoning = tools | selectattr("type", "equalto", "reasoning") | list | length > 0 -%}
{%- if has_reasoning -%}
{%- set tools = tools | rejectattr("type", "equalto", "reasoning") | list -%}
{%- endif -%}
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query." }}
{%- if has_reasoning %}
{{- " Please generate reasoning before deciding which tools to use." }}
{%- else %}
{{- "" }}
{%- endif %}
{{- "\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><end_of_turn>\n" }}
{%- else %}
{%- if messages and messages[0]['role'] == 'system' %}
{{- '<start_of_turn>system\n' + messages[0]['content'] + '<end_of_turn>\n' }}
{%- endif %}
{%- endif %}
{%- for message in messages %}
{%- if (message.role == "user") or (message.role == "system") -%}
{{- '<start_of_turn>' + message.role + '\n' + message.content + '<end_of_turn>' + '\n' }}
{%- elif message.role == "assistant" %}
{{- '<start_of_turn>model' }}
{%- if message.content %}
{{- '\n' + message.content }}
{%- endif %}

{%- if "tool_calls" in message and message["tool_calls"] is not none -%}
{%- set tool_calls = message["tool_calls"] -%}
{%-else-%}
{%- set tool_calls = [] -%}
{%- endif -%}

{%- for tool_call in tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '\n<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{{- tool_call.arguments }}
{{- '}\n</tool_call>' }}
{%- endfor %}
{{- '<end_of_turn>\n' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
{{- '<start_of_turn>tool' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<end_of_turn>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<start_of_turn>model\n' }}
{%- endif %}
80 changes: 80 additions & 0 deletions functionary/prompt_template/jinja_templates/qwen2.5-text-only.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
{# version=qwen2.5-text-only #}{%- if tools %}
{{- '<|im_start|>system\n' }}
{%- if messages and messages[0]['role'] == 'system' %}
{{- messages[0]['content'] }}
{%- else %}
{{- 'You are a helpful assistant.' }}
{%- endif %}
{%- set has_reasoning = tools | selectattr("type", "equalto", "reasoning") | list | length > 0 -%}
{%- if has_reasoning -%}
{%- set tools = tools | rejectattr("type", "equalto", "reasoning") | list -%}
{%- endif -%}
{{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query." }}
{%- if has_reasoning %}
{{- " Please generate reasoning before deciding which tools to use." }}
{%- else %}
{{- "" }}
{%- endif %}
{{- "\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
{%- for tool in tools %}
{{- "\n" }}
{{- tool | tojson }}
{%- endfor %}
{{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
{%- else %}
{%- if messages and messages[0]['role'] == 'system' %}
{{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
{%- else %}
{{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- for message in messages %}
{%- if message.role == "user" %}
{%- if message['content'] -%}
{%- if message['content'] is string -%}
{{ '<|im_start|>user\n' + message['content'] }}
{%- else -%}
{{ '<|im_start|>user\n' }}
{%- for content in message['content'] -%}
{%- if content['type'] == 'text' -%}
{{ content['text'] }}
{%- else -%}
{{ '<|vision_start|><|image_pad|><|vision_end|>' }}
{%- endif -%}
{%- endfor -%}
{%- endif -%}
{{ '<|im_end|>\n' }}
{%- endif -%}
{%- elif (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
{{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
{%- elif message.role == "assistant" %}
{{- '<|im_start|>' + message.role }}
{%- if message.content %}
{{- '\n' + message.content }}
{%- endif %}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- '\n<tool_call>\n{"name": "' }}
{{- tool_call.name }}
{{- '", "arguments": ' }}
{{- tool_call.arguments }}
{{- '}\n</tool_call>' }}
{%- endfor %}
{{- '<|im_end|>\n' }}
{%- elif message.role == "tool" %}
{%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
{{- '<|im_start|>user' }}
{%- endif %}
{{- '\n<tool_response>\n' }}
{{- message.content }}
{{- '\n</tool_response>' }}
{%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
{{- '<|im_end|>\n' }}
{%- endif %}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}
Loading
Loading