Skip to content

Commit 4a42158

Browse files
authored
feat(anthropic): add effort support (#34116)
1 parent 7ba3e80 commit 4a42158

File tree

7 files changed

+218
-18
lines changed

7 files changed

+218
-18
lines changed

libs/langchain/tests/unit_tests/chat_models/test_base.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,7 @@ def test_configurable_with_default() -> None:
256256
"max_tokens": 64000,
257257
"temperature": None,
258258
"thinking": None,
259+
"effort": None,
259260
"top_k": None,
260261
"top_p": None,
261262
"default_request_timeout": None,

libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ def test_configurable_with_default() -> None:
251251
"bound": {
252252
"name": None,
253253
"disable_streaming": False,
254+
"effort": None,
254255
"model": "claude-sonnet-4-5-20250929",
255256
"mcp_servers": None,
256257
"max_tokens": 64000,

libs/partners/anthropic/langchain_anthropic/chat_models.py

Lines changed: 80 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -75,8 +75,18 @@
7575

7676

7777
def _get_default_model_profile(model_name: str) -> ModelProfile:
78-
default = _MODEL_PROFILES.get(model_name) or {}
79-
return default.copy()
78+
"""Get the default profile for a model.
79+
80+
Args:
81+
model_name: The model identifier.
82+
83+
Returns:
84+
The model profile dictionary, or an empty dict if not found.
85+
"""
86+
default = _MODEL_PROFILES.get(model_name)
87+
if default:
88+
return default.copy()
89+
return {}
8090

8191

8292
_MODEL_DEFAULT_MAX_OUTPUT_TOKENS: Final[dict[str, int]] = {
@@ -1056,6 +1066,29 @@ def get_weather(location: str) -> str:
10561066
Refer to the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/extended-thinking#differences-in-thinking-across-model-versions)
10571067
for more info.
10581068
1069+
???+ example "Effort"
1070+
1071+
Certain Claude models support an [effort](https://platform.claude.com/docs/en/build-with-claude/effort)
1072+
feature, which will control how many tokens Claude uses when responding.
1073+
1074+
!!! example
1075+
1076+
```python hl_lines="6"
1077+
from langchain_anthropic import ChatAnthropic
1078+
1079+
model = ChatAnthropic(
1080+
model="claude-opus-4-5-20251101",
1081+
max_tokens=4096,
1082+
effort="medium", # Options: "high", "medium", "low"
1083+
)
1084+
1085+
response = model.invoke("Analyze the trade-offs between microservices and monolithic architectures")
1086+
print(response.content)
1087+
```
1088+
1089+
See the [Claude docs](https://platform.claude.com/docs/en/build-with-claude/effort)
1090+
for more detail on when to use different effort levels.
1091+
10591092
???+ example "Prompt caching"
10601093
10611094
Prompt caching reduces processing time and costs for repetitive tasks or prompts
@@ -1638,26 +1671,40 @@ class Joke(BaseModel):
16381671
e.g., `#!python {"type": "enabled", "budget_tokens": 10_000}`
16391672
"""
16401673

1674+
effort: Literal["high", "medium", "low"] | None = None
1675+
"""Control how many tokens Claude uses when responding.
1676+
1677+
This parameter will be merged into the `output_config` parameter when making
1678+
API calls.
1679+
1680+
Example: `effort="medium"`
1681+
1682+
!!! note
1683+
1684+
Setting `effort` to `'high'` produces exactly the same behavior as omitting the
1685+
parameter altogether.
1686+
1687+
!!! note "Model Support"
1688+
1689+
This feature is currently only supported by Claude Opus 4.5.
1690+
1691+
!!! note "Automatic beta header"
1692+
1693+
The required `effort-2025-11-24` beta header is
1694+
automatically appended to the request when using `effort`, so you
1695+
don't need to manually specify it in the `betas` parameter.
1696+
"""
1697+
16411698
mcp_servers: list[dict[str, Any]] | None = None
16421699
"""List of MCP servers to use for the request.
16431700
16441701
Example: `#!python mcp_servers=[{"type": "url", "url": "https://mcp.example.com/mcp",
16451702
"name": "example-mcp"}]`
1646-
1647-
!!! note
1648-
1649-
This feature requires the beta header `'mcp-client-2025-11-20'` to be set in
1650-
[`betas`][langchain_anthropic.chat_models.ChatAnthropic.betas].
16511703
"""
16521704

16531705
context_management: dict[str, Any] | None = None
16541706
"""Configuration for
16551707
[context management](https://platform.claude.com/docs/en/build-with-claude/context-editing).
1656-
1657-
!!! note
1658-
1659-
This feature requires the beta header `'context-management-2025-06-27'` to be
1660-
set in [`betas`][langchain_anthropic.chat_models.ChatAnthropic.betas].
16611708
"""
16621709

16631710
@property
@@ -1868,6 +1915,27 @@ def _get_request_payload(
18681915
if self.thinking is not None:
18691916
payload["thinking"] = self.thinking
18701917

1918+
# Handle output_config and effort parameter
1919+
# Priority: self.effort > payload output_config
1920+
output_config = payload.get("output_config", {})
1921+
output_config = output_config.copy() if isinstance(output_config, dict) else {}
1922+
1923+
if self.effort:
1924+
output_config["effort"] = self.effort
1925+
1926+
if output_config:
1927+
payload["output_config"] = output_config
1928+
1929+
# Auto-append required beta for effort
1930+
if "effort" in output_config:
1931+
required_beta = "effort-2025-11-24"
1932+
if payload["betas"]:
1933+
# Merge with existing betas
1934+
if required_beta not in payload["betas"]:
1935+
payload["betas"] = [*payload["betas"], required_beta]
1936+
else:
1937+
payload["betas"] = [required_beta]
1938+
18711939
if "response_format" in payload:
18721940
# response_format present when using agents.create_agent's ProviderStrategy
18731941
# ---

libs/partners/anthropic/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ authors = []
1212
version = "1.2.0"
1313
requires-python = ">=3.10.0,<4.0.0"
1414
dependencies = [
15-
"anthropic>=0.73.0,<1.0.0",
15+
"anthropic>=0.75.0,<1.0.0",
1616
"langchain-core>=1.1.0,<2.0.0",
1717
"pydantic>=2.7.4,<3.0.0",
1818
]

libs/partners/anthropic/tests/integration_tests/test_chat_models.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1152,6 +1152,30 @@ def test_structured_output_thinking_force_tool_use() -> None:
11521152
llm.invoke("Generate a username for Sally with green hair")
11531153

11541154

1155+
def test_effort_parameter() -> None:
1156+
"""Test that effort parameter can be passed without errors.
1157+
1158+
Only Opus 4.5 supports currently.
1159+
"""
1160+
llm = ChatAnthropic(
1161+
model="claude-opus-4-5-20251101",
1162+
effort="medium",
1163+
max_tokens=100,
1164+
)
1165+
1166+
result = llm.invoke("Say hello in one sentence")
1167+
1168+
# Verify we got a response
1169+
assert isinstance(result.content, str)
1170+
assert len(result.content) > 0
1171+
1172+
# Verify response metadata is present
1173+
assert "model_name" in result.response_metadata
1174+
assert result.usage_metadata is not None
1175+
assert result.usage_metadata["input_tokens"] > 0
1176+
assert result.usage_metadata["output_tokens"] > 0
1177+
1178+
11551179
def test_image_tool_calling() -> None:
11561180
"""Test tool calling with image inputs."""
11571181

libs/partners/anthropic/tests/unit_tests/test_chat_models.py

Lines changed: 107 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
from langchain_core.tools import BaseTool
1717
from langchain_core.tracers.base import BaseTracer
1818
from langchain_core.tracers.schemas import Run
19-
from pydantic import BaseModel, Field, SecretStr
19+
from pydantic import BaseModel, Field, SecretStr, ValidationError
2020
from pytest import CaptureFixture, MonkeyPatch
2121

2222
from langchain_anthropic import ChatAnthropic
@@ -1906,3 +1906,109 @@ async def test_model_profile_not_blocking() -> None:
19061906
with blockbuster_ctx():
19071907
model = ChatAnthropic(model="claude-sonnet-4-5")
19081908
_ = model.profile
1909+
1910+
1911+
def test_effort_parameter_validation() -> None:
1912+
"""Test that effort parameter is validated correctly.
1913+
1914+
The effort parameter is currently in beta and only supported by Claude Opus 4.5.
1915+
"""
1916+
# Valid effort values should work
1917+
model = ChatAnthropic(model="claude-opus-4-5-20251101", effort="high")
1918+
assert model.effort == "high"
1919+
1920+
model = ChatAnthropic(model="claude-opus-4-5-20251101", effort="medium")
1921+
assert model.effort == "medium"
1922+
1923+
model = ChatAnthropic(model="claude-opus-4-5-20251101", effort="low")
1924+
assert model.effort == "low"
1925+
1926+
# Invalid effort values should raise ValidationError
1927+
with pytest.raises(ValidationError, match="Input should be"):
1928+
ChatAnthropic(model="claude-opus-4-5-20251101", effort="invalid") # type: ignore[arg-type]
1929+
1930+
1931+
def test_effort_populates_betas() -> None:
1932+
"""Test that effort parameter auto-populates required betas."""
1933+
model = ChatAnthropic(model="claude-opus-4-5-20251101", effort="medium")
1934+
assert model.effort == "medium"
1935+
1936+
# Test that effort works with dated API ID
1937+
payload = model._get_request_payload("Test query")
1938+
assert payload["output_config"]["effort"] == "medium"
1939+
assert "effort-2025-11-24" in payload["betas"]
1940+
1941+
1942+
def test_effort_in_output_config() -> None:
1943+
"""Test that effort can be specified in `output_config`."""
1944+
# Test valid effort in output_config
1945+
model = ChatAnthropic(
1946+
model="claude-opus-4-5-20251101",
1947+
output_config={"effort": "low"},
1948+
)
1949+
assert model.model_kwargs["output_config"] == {"effort": "low"}
1950+
1951+
1952+
def test_effort_priority() -> None:
1953+
"""Test that top-level effort takes precedence over `output_config`."""
1954+
model = ChatAnthropic(
1955+
model="claude-opus-4-5-20251101",
1956+
effort="high",
1957+
output_config={"effort": "low"},
1958+
)
1959+
1960+
# Top-level effort should take precedence in the payload
1961+
payload = model._get_request_payload("Test query")
1962+
assert payload["output_config"]["effort"] == "high"
1963+
1964+
1965+
def test_effort_beta_header_auto_append() -> None:
1966+
"""Test that effort beta header is automatically appended."""
1967+
# Test with top-level effort parameter
1968+
model = ChatAnthropic(model="claude-opus-4-5-20251101", effort="medium")
1969+
payload = model._get_request_payload("Test query")
1970+
assert "effort-2025-11-24" in payload["betas"]
1971+
1972+
# Test with output_config
1973+
model = ChatAnthropic(
1974+
model="claude-opus-4-5-20251101",
1975+
output_config={"effort": "low"},
1976+
)
1977+
payload = model._get_request_payload("Test query")
1978+
assert "effort-2025-11-24" in payload["betas"]
1979+
1980+
# Test that beta is not duplicated if already present
1981+
model = ChatAnthropic(
1982+
model="claude-opus-4-5-20251101",
1983+
effort="high",
1984+
betas=["effort-2025-11-24"],
1985+
)
1986+
payload = model._get_request_payload("Test query")
1987+
assert payload["betas"].count("effort-2025-11-24") == 1
1988+
1989+
# Test combining effort with other betas
1990+
model = ChatAnthropic(
1991+
model="claude-opus-4-5-20251101",
1992+
effort="medium",
1993+
betas=["context-1m-2025-08-07"],
1994+
)
1995+
payload = model._get_request_payload("Test query")
1996+
assert set(payload["betas"]) == {
1997+
"context-1m-2025-08-07",
1998+
"effort-2025-11-24",
1999+
}
2000+
2001+
2002+
def test_output_config_without_effort() -> None:
2003+
"""Test that output_config can be used without effort."""
2004+
# output_config might have other fields in the future
2005+
model = ChatAnthropic(
2006+
model=MODEL_NAME,
2007+
output_config={"some_future_param": "value"},
2008+
)
2009+
payload = model._get_request_payload("Test query")
2010+
assert payload["output_config"] == {"some_future_param": "value"}
2011+
# No effort beta should be added
2012+
assert payload.get("betas") is None or "effort-2025-11-24" not in payload.get(
2013+
"betas", []
2014+
)

libs/partners/anthropic/uv.lock

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)