Bases: ReasoningParser
 Identity reasoning parser.
 This parser does not attempt to parse or strip out reasoning tokens. It treats the entire model output as content and ignores reasoning.
  Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | class IdentityReasoningParser(ReasoningParser):
    """
    Identity reasoning parser.
    This parser does not attempt to parse or strip out reasoning tokens.
    It treats the entire model output as content and ignores reasoning.
    """
    def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
        super().__init__(tokenizer, *args, **kwargs)
        if not self.model_tokenizer:
            raise ValueError(
                "The model tokenizer must be passed to the ReasoningParser "
                "constructor during construction."
            )
    def is_reasoning_end(self, input_ids: list[int]) -> bool:
        # Always return True, since we never treat reasoning specially
        return True
    def extract_content_ids(self, input_ids: list[int]) -> list[int]:
        # Identity: return all tokens as content
        return input_ids
    def extract_reasoning_content_streaming(
        self,
        previous_text: str,
        current_text: str,
        delta_text: str,
        previous_token_ids: Sequence[int],
        current_token_ids: Sequence[int],
        delta_token_ids: Sequence[int],
    ) -> DeltaMessage | None:
        # Just wrap delta_text as content, ignore reasoning
        if delta_text:
            return DeltaMessage(content=delta_text)
        return None
    def extract_reasoning_content(
        self, model_output: str, request: ChatCompletionRequest
    ) -> tuple[str | None, str | None]:
        # No reasoning separation: return None for reasoning_content,
        # and full model_output as content
        return None, model_output
 | 
     
 __init__(
    tokenizer: PreTrainedTokenizerBase, *args, **kwargs
)
  Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | def __init__(self, tokenizer: PreTrainedTokenizerBase, *args, **kwargs):
    super().__init__(tokenizer, *args, **kwargs)
    if not self.model_tokenizer:
        raise ValueError(
            "The model tokenizer must be passed to the ReasoningParser "
            "constructor during construction."
        )
 | 
           Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | def extract_content_ids(self, input_ids: list[int]) -> list[int]:
    # Identity: return all tokens as content
    return input_ids
 | 
           Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | def extract_reasoning_content(
    self, model_output: str, request: ChatCompletionRequest
) -> tuple[str | None, str | None]:
    # No reasoning separation: return None for reasoning_content,
    # and full model_output as content
    return None, model_output
 | 
           Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | def extract_reasoning_content_streaming(
    self,
    previous_text: str,
    current_text: str,
    delta_text: str,
    previous_token_ids: Sequence[int],
    current_token_ids: Sequence[int],
    delta_token_ids: Sequence[int],
) -> DeltaMessage | None:
    # Just wrap delta_text as content, ignore reasoning
    if delta_text:
        return DeltaMessage(content=delta_text)
    return None
 | 
        
    Source code in vllm/reasoning/identity_reasoning_parser.py
 |  | def is_reasoning_end(self, input_ids: list[int]) -> bool:
    # Always return True, since we never treat reasoning specially
    return True
 |