diff --git a/develop-docs/sdk/telemetry/traces/modules/llm-monitoring.mdx b/develop-docs/sdk/telemetry/traces/modules/llm-monitoring.mdx
index ab9a434ea8cca..a2a9242273c0a 100644
--- a/develop-docs/sdk/telemetry/traces/modules/llm-monitoring.mdx
+++ b/develop-docs/sdk/telemetry/traces/modules/llm-monitoring.mdx
@@ -44,7 +44,7 @@ sentry.init(...)
openai = OpenAI()
-@ai_track(description="My AI pipeline")
+@ai_track("My AI pipeline")
def invoke_pipeline():
result = openai.chat.completions.create(
model="some-model", messages=[{"role": "system", "content": "hello"}]
diff --git a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/index.mdx b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/index.mdx
index c0eebc1db2af5..8858d234c52fa 100644
--- a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/index.mdx
+++ b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/index.mdx
@@ -52,10 +52,16 @@ def eat_slice(slice):
def eat_pizza(pizza):
with sentry_sdk.start_transaction(op="task", name="Eat Pizza"):
while pizza.slices > 0:
- with sentry_sdk.start_span(description="Eat Slice"):
+ with sentry_sdk.start_span(name="Eat Slice"):
eat_slice(pizza.slices.pop())
```
+
+
+The parameter `name` in `start_span()` used to be called `description`. In version 2.15.0 `description` was deprecated and from 2.15.0 on, only `name` should be used. `description` will be removed in `3.0.0`.
+
+
+
### Using a Decorator
```python
@@ -88,10 +94,15 @@ def eat_slice(slice):
def eat_pizza(pizza):
with sentry_sdk.start_transaction(op="task", name="Eat Pizza"):
while pizza.slices > 0:
- span = sentry_sdk.start_span(description="Eat Slice")
+ span = sentry_sdk.start_span(name="Eat Slice")
eat_slice(pizza.slices.pop())
span.finish()
```
+
+
+The parameter `name` in `start_span()` used to be called `description`. In version 2.15.0 `description` was deprecated and from 2.15.0 on, only `name` should be used. `description` will be removed in `3.0.0`.
+
+
When you create your span manually, make sure to call `span.finish()` after the block of code you want to wrap in a span to finish the span. If you do not finish the span it will not be sent to Sentry.
@@ -108,11 +119,18 @@ def chew():
...
def eat_slice(slice):
- with sentry_sdk.start_span(description="Eat Slice"):
- with sentry_sdk.start_span(description="Chew"):
+ with sentry_sdk.start_span(name="Eat Slice"):
+ with sentry_sdk.start_span(name="Chew"):
chew()
```
+
+
+The parameter `name` in `start_span()` used to be called `description`. In version 2.15.0 `description` was deprecated and from 2.15.0 on, only `name` should be used. `description` will be removed in `3.0.0`.
+
+
+
+
### Using a Decorator
```python
@@ -136,15 +154,21 @@ def chew():
...
def eat_slice(slice):
- parent_span = sentry_sdk.start_span(description="Eat Slice")
+ parent_span = sentry_sdk.start_span(name="Eat Slice")
- child_span = parent_span.start_child(description="Chew")
+ child_span = parent_span.start_child(name="Chew")
chew()
child_span.finish()
parent_span.finish()
```
+
+
+The parameter `name` in `start_span()` used to be called `description`. In version 2.15.0 `description` was deprecated and from 2.15.0 on, only `name` should be used. `description` will be removed in `3.0.0`.
+
+
+
The parameters of `start_span()` and `start_child()` are the same. See the [API reference](https://getsentry.github.io/sentry-python/api.html#sentry_sdk.api.start_span) for more details.
When you create your span manually, make sure to call `span.finish()` after the block of code you want to wrap in a span to finish the span. If you do not finish the span it will not be sent to Sentry.
diff --git a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/queues-module.mdx b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/queues-module.mdx
index 6bffff837fde5..4eead99a718a5 100644
--- a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/queues-module.mdx
+++ b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/queues-module.mdx
@@ -48,7 +48,7 @@ with sentry_sdk.start_transaction(
# Create the span
with sentry_sdk.start_span(
op="queue.publish",
- description="queue_producer",
+ name="queue_producer",
) as span:
# Set span data
span.set_data("messaging.message.id", message_id)
@@ -118,7 +118,7 @@ with sentry_sdk.start_transaction(transaction):
# Create the span
with sentry_sdk.start_span(
op="queue.process",
- description="queue_consumer",
+ name="queue_consumer",
) as span:
# Set span data
span.set_data("messaging.message.id", message["message_id"])
diff --git a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/requests-module.mdx b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/requests-module.mdx
index 8a89fe96f0010..40a112652f046 100644
--- a/docs/platforms/python/tracing/instrumentation/custom-instrumentation/requests-module.mdx
+++ b/docs/platforms/python/tracing/instrumentation/custom-instrumentation/requests-module.mdx
@@ -22,7 +22,7 @@ import requests
def make_request(method, url):
span = sentry_sdk.start_span(
op="http.client",
- description="%s %s" % (method, url),
+ name="%s %s" % (method, url),
)
span.set_data("http.request.method", method)
diff --git a/docs/product/insights/llm-monitoring/getting-started/index.mdx b/docs/product/insights/llm-monitoring/getting-started/index.mdx
index 52e406bdb35dc..78979941355e8 100644
--- a/docs/product/insights/llm-monitoring/getting-started/index.mdx
+++ b/docs/product/insights/llm-monitoring/getting-started/index.mdx
@@ -50,25 +50,25 @@ from sentry_sdk.ai.monitoring import ai_track, record_token_usage
import sentry_sdk
import requests
-@ai_track(description="AI tool")
+@ai_track("AI tool")
def some_workload_function(**kwargs):
"""
This function is an example of calling arbitrary code with @ai_track so that it shows up in the Sentry trace
"""
time.sleep(5)
-@ai_track(description="LLM")
+@ai_track("LLM")
def some_llm_call():
"""
This function is an example of calling an LLM provider that isn't officially supported by Sentry.
"""
- with sentry_sdk.start_span(op="ai.chat_completions.create.examplecom", description="Example.com LLM") as span:
+ with sentry_sdk.start_span(op="ai.chat_completions.create.examplecom", name="Example.com LLM") as span:
result = requests.get('https://example.com/api/llm-chat?question=say+hello').json()
# this annotates the tokens used by the LLM so that they show up in the graphs in the dashboard
record_token_usage(span, total_tokens=result["usage"]["total_tokens"])
return result["text"]
-@ai_track(description="My AI pipeline")
+@ai_track("My AI pipeline")
def some_pipeline():
"""
The topmost level function with @ai_track gets the operation "ai.pipeline", which makes it show up
diff --git a/platform-includes/performance/add-spans-example/python.mdx b/platform-includes/performance/add-spans-example/python.mdx
index 9f0e3084780fe..b879ec1bdeaf3 100644
--- a/platform-includes/performance/add-spans-example/python.mdx
+++ b/platform-includes/performance/add-spans-example/python.mdx
@@ -9,7 +9,7 @@ import sentry_sdk
def process_item(item):
# omitted code...
- with sentry_sdk.start_span(op="http", description="GET /") as span:
+ with sentry_sdk.start_span(op="http", name="GET /") as span:
response = my_custom_http_library.request("GET", "/")
span.set_tag("http.status_code", response.status_code)
span.set_data("http.foobarsessionid", get_foobar_sessionid())