Skip to content

Commit faeb748

Browse files
docs(vertex.md): fix doc
1 parent 12cbac7 commit faeb748

File tree

2 files changed

+98
-0
lines changed

2 files changed

+98
-0
lines changed

docs/my-website/docs/providers/vertex.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1125,6 +1125,9 @@ curl -X POST 'http://0.0.0.0:4000/chat/completions' \
11251125

11261126
```
11271127

1128+
</TabItem>
1129+
</Tabs>
1130+
11281131
#### Calling provider api directly
11291132

11301133
[**Go straight to provider**](../pass_through/vertex_ai.md#context-caching)

litellm/model_prices_and_context_window_backup.json

Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12932,6 +12932,39 @@
1293212932
"supports_tool_choice": true,
1293312933
"supports_vision": true
1293412934
},
12935+
"gpt-5-pro": {
12936+
"input_cost_per_token": 1.5e-05,
12937+
"input_cost_per_token_batches": 7.5e-06,
12938+
"litellm_provider": "openai",
12939+
"max_input_tokens": 400000,
12940+
"max_output_tokens": 272000,
12941+
"max_tokens": 272000,
12942+
"mode": "responses",
12943+
"output_cost_per_token": 1.2e-04,
12944+
"output_cost_per_token_batches": 6e-05,
12945+
"supported_endpoints": [
12946+
"/v1/batch",
12947+
"/v1/responses"
12948+
],
12949+
"supported_modalities": [
12950+
"text",
12951+
"image"
12952+
],
12953+
"supported_output_modalities": [
12954+
"text"
12955+
],
12956+
"supports_function_calling": true,
12957+
"supports_native_streaming": false,
12958+
"supports_parallel_function_calling": true,
12959+
"supports_pdf_input": true,
12960+
"supports_prompt_caching": true,
12961+
"supports_reasoning": true,
12962+
"supports_response_schema": true,
12963+
"supports_system_messages": true,
12964+
"supports_tool_choice": true,
12965+
"supports_vision": true,
12966+
"supports_web_search": true
12967+
},
1293512968
"gpt-5-codex": {
1293612969
"cache_read_input_token_cost": 1.25e-07,
1293712970
"input_cost_per_token": 1.25e-06,
@@ -13249,6 +13282,20 @@
1324913282
"/v1/images/generations"
1325013283
]
1325113284
},
13285+
"gpt-image-1-mini": {
13286+
"cache_read_input_image_token_cost": 2.5e-07,
13287+
"cache_read_input_token_cost": 2e-07,
13288+
"input_cost_per_image_token": 2.5e-06,
13289+
"input_cost_per_token": 2e-06,
13290+
"litellm_provider": "openai",
13291+
"mode": "chat",
13292+
"output_cost_per_image_token": 8e-06,
13293+
"supported_endpoints": [
13294+
"/v1/images/generations",
13295+
"/v1/images/edits"
13296+
],
13297+
"supports_vision": true
13298+
},
1325213299
"gpt-realtime": {
1325313300
"cache_creation_input_audio_token_cost": 4e-07,
1325413301
"cache_read_input_token_cost": 4e-07,
@@ -14683,6 +14730,54 @@
1468314730
"/v1/images/generations"
1468414731
]
1468514732
},
14733+
"low/1024-x-1024/gpt-image-1-mini": {
14734+
"input_cost_per_image": 0.005,
14735+
"litellm_provider": "openai",
14736+
"mode": "image_generation",
14737+
"supported_endpoints": [
14738+
"/v1/images/generations"
14739+
]
14740+
},
14741+
"low/1024-x-1536/gpt-image-1-mini": {
14742+
"input_cost_per_image": 0.006,
14743+
"litellm_provider": "openai",
14744+
"mode": "image_generation",
14745+
"supported_endpoints": [
14746+
"/v1/images/generations"
14747+
]
14748+
},
14749+
"low/1536-x-1024/gpt-image-1-mini": {
14750+
"input_cost_per_image": 0.006,
14751+
"litellm_provider": "openai",
14752+
"mode": "image_generation",
14753+
"supported_endpoints": [
14754+
"/v1/images/generations"
14755+
]
14756+
},
14757+
"medium/1024-x-1024/gpt-image-1-mini": {
14758+
"input_cost_per_image": 0.011,
14759+
"litellm_provider": "openai",
14760+
"mode": "image_generation",
14761+
"supported_endpoints": [
14762+
"/v1/images/generations"
14763+
]
14764+
},
14765+
"medium/1024-x-1536/gpt-image-1-mini": {
14766+
"input_cost_per_image": 0.015,
14767+
"litellm_provider": "openai",
14768+
"mode": "image_generation",
14769+
"supported_endpoints": [
14770+
"/v1/images/generations"
14771+
]
14772+
},
14773+
"medium/1536-x-1024/gpt-image-1-mini": {
14774+
"input_cost_per_image": 0.015,
14775+
"litellm_provider": "openai",
14776+
"mode": "image_generation",
14777+
"supported_endpoints": [
14778+
"/v1/images/generations"
14779+
]
14780+
},
1468614781
"medlm-large": {
1468714782
"input_cost_per_character": 5e-06,
1468814783
"litellm_provider": "vertex_ai-language-models",

0 commit comments

Comments
 (0)