dataclass  ¶
 Stores cache hit statistics.
Source code in vllm/v1/metrics/stats.py
  
 Metrics for caching with a hit rate of the most recent N requests. Args: interval: The number of the most recent requests to aggregate. Defaults to 1000.
Source code in vllm/v1/metrics/stats.py
  
 __init__(max_recent_requests: int = 1000) -> None
Source code in vllm/v1/metrics/stats.py
  
 observe(stats: BaseCacheStats)
Observe the prefix caching for a set of requests.
This function is called with information gathered when new requests are being scheduled and are looking for computed blocks.
When there are more than max_recent_requests requests, the oldest set of requests are removed from the metrics.
Parameters:
| Name | Type | Description | Default | 
|---|---|---|---|
| stats | BaseCacheStats | The prefix cache stats. | required | 
Source code in vllm/v1/metrics/stats.py
  dataclass  ¶
 Stats associated with a finished request.
Source code in vllm/v1/metrics/stats.py
  class-attribute instance-attribute  ¶
 mean_time_per_output_token: float = 0.0
 
 __init__(
    finish_reason: FinishReason,
    e2e_latency: float = 0.0,
    num_prompt_tokens: int = 0,
    num_generation_tokens: int = 0,
    max_tokens_param: int | None = None,
    queued_time: float = 0.0,
    prefill_time: float = 0.0,
    inference_time: float = 0.0,
    decode_time: float = 0.0,
    mean_time_per_output_token: float = 0.0,
) -> None
 
 Stats associated with a single set of EngineCoreOutputs.
Source code in vllm/v1/metrics/stats.py
 | 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 |  | 
 
  Source code in vllm/v1/metrics/stats.py
  
    
 update_from_events(
    req_id: str,
    events: list[EngineCoreEvent],
    is_prefilling: bool,
    req_stats: RequestStateStats,
    lora_stats: LoRAStats | None,
)
Source code in vllm/v1/metrics/stats.py
  
 update_from_finished_request(
    finish_reason: FinishReason,
    num_prompt_tokens: int,
    max_tokens_param: int | None,
    req_stats: RequestStateStats,
)
Source code in vllm/v1/metrics/stats.py
  
 update_from_output(
    output: EngineCoreOutput,
    engine_core_timestamp: float,
    is_prefilling: bool,
    prompt_len: int,
    req_stats: RequestStateStats,
    lora_stats: LoRAStats | None,
)
Source code in vllm/v1/metrics/stats.py
  
 Per-LoRA request state stats.
Source code in vllm/v1/metrics/stats.py
  
    
 add_request(req_state: RequestState)
 
 finish_request(req_state: RequestState)
 
 get_stats(req_state: RequestState) -> LoRAStats | None
Source code in vllm/v1/metrics/stats.py
  staticmethod  ¶
    staticmethod  ¶
    
 update_iteration_stats(
    iteration_stats: IterationStats | None,
)
Source code in vllm/v1/metrics/stats.py
  dataclass  ¶
   dataclass  ¶
  Bases: BaseCacheStats
Stores multi-modal cache hit statistics. - reset: Whether reset_mm_cache was invoked. - queries: Refers to the number of multi-modal data items that were queried.
Source code in vllm/v1/metrics/stats.py
   dataclass  ¶
  Bases: BaseCacheStats
Stores prefix cache hit statistics. - reset: Whether reset_prefix_cache was invoked. - queries: Refers to the number of tokens that were queried.
Source code in vllm/v1/metrics/stats.py
  class-attribute instance-attribute  ¶
 preempted_hits: int = 0
The hits number for preempted requests.
 class-attribute instance-attribute  ¶
 preempted_queries: int = 0
The queries number for preempted requests.
 class-attribute instance-attribute  ¶
 preempted_requests: int = 0
The number of previously preempted requests in this update.
 
 __init__(
    reset: bool = False,
    requests: int = 0,
    queries: int = 0,
    hits: int = 0,
    preempted_requests: int = 0,
    preempted_queries: int = 0,
    preempted_hits: int = 0,
) -> None
 
  Aggregate request information into the stats.
Source code in vllm/v1/metrics/stats.py
  dataclass  ¶
 Stats that need to be tracked across delta updates.
Source code in vllm/v1/metrics/stats.py
  dataclass  ¶
 Stats associated with the scheduler.
Source code in vllm/v1/metrics/stats.py
  class-attribute instance-attribute  ¶
 connector_prefix_cache_stats: PrefixCacheStats | None = None
 class-attribute instance-attribute  ¶
   class-attribute instance-attribute  ¶
 prefix_cache_stats: PrefixCacheStats = field(
    default_factory=PrefixCacheStats
)
 class-attribute instance-attribute  ¶
 spec_decoding_stats: SpecDecodingStats | None = None
 
 __init__(
    num_running_reqs: int = 0,
    num_waiting_reqs: int = 0,
    step_counter: int = 0,
    current_wave: int = 0,
    kv_cache_usage: float = 0.0,
    prefix_cache_stats: PrefixCacheStats = PrefixCacheStats(),
    connector_prefix_cache_stats: PrefixCacheStats
    | None = None,
    spec_decoding_stats: SpecDecodingStats | None = None,
    kv_connector_stats: dict[str, Any] | None = None,
    num_corrupted_reqs: int = 0,
) -> None