@@ -191,6 +191,7 @@ def prepare_request(
191191 ) -> tuple [type [BaseModel ] | None , dict [str , Any ]]:
192192 """Prepare request kwargs for TOOLS mode.
193193
194+ <<<<<<< HEAD
194195 Supports:
195196 - Regular single tool use (single model)
196197 - Parallel tool calling (Iterable[Union[Model1, Model2, ...]])
@@ -200,16 +201,21 @@ def prepare_request(
200201 If thinking is enabled, automatically adjusts tool_choice to "auto"
201202 (required by API constraint).
202203
204+ =======
205+ >>>>>>> 13857221 (feat(v2/anthropic): implement provider with mode registry integration)
203206 Args:
204207 response_model: Pydantic model to extract (or None)
205208 kwargs: Original request kwargs
206209
207210 Returns:
208211 Tuple of (response_model, modified_kwargs)
209212 """
213+ < << << << HEAD
210214 from collections .abc import Iterable
211215 from typing import get_origin
212216
217+ == == == =
218+ >> >> >> > 13857221 (feat (v2 / anthropic ): implement provider with mode registry integration )
213219 new_kwargs = kwargs .copy ()
214220
215221 # Extract and combine system messages BEFORE serializing message content
@@ -235,6 +241,7 @@ def prepare_request(
235241 # Just return with processed messages and extracted system
236242 return None , new_kwargs
237243
244+ < << << << HEAD
238245 # Detect if this is a parallel tools request (Iterable[Union[...]])
239246 is_parallel = False
240247 if get_origin (response_model ) is Iterable :
@@ -283,6 +290,15 @@ def prepare_request(
283290 "type" : "tool" ,
284291 "name" : response_model .__name__ ,
285292 }
293+ == == == =
294+ # Generate tool schema
295+ tool_descriptions = generate_anthropic_schema (response_model )
296+ new_kwargs ["tools" ] = [tool_descriptions ]
297+ new_kwargs ["tool_choice" ] = {
298+ "type" : "tool" ,
299+ "name" : response_model .__name__ ,
300+ }
301+ > >> >> >> 13857221 (feat (v2 / anthropic ): implement provider with mode registry integration )
286302
287303 return response_model , new_kwargs
288304
@@ -358,6 +374,7 @@ def parse_response(
358374 response_model : type[BaseModel ],
359375 validation_context : dict [str , Any ] | None = None ,
360376 strict : bool | None = None ,
377+ << << << < HEAD
361378 ) - > BaseModel | Any :
362379 """Parse TOOLS mode response.
363380
@@ -366,26 +383,39 @@ def parse_response(
366383 - Parallel tool use (returns generator of model instances)
367384 - Extended thinking responses (filters out thinking blocks)
368385
386+ =======
387+ ) -> BaseModel:
388+ """ Parse TOOLS mode response .
389+
390+ >> >> > >> 13857221 (feat (v2 / anthropic ): implement provider with mode registry integration )
369391 Args :
370392 response : Anthropic API response
371393 response_model : Pydantic model to validate against
372394 validation_context : Optional context for validation
373395 strict : Optional strict validation mode
374396
375397 Returns :
398+ << << << < HEAD
376399 Validated Pydantic model instance or generator of instances for parallel
400+ == == == =
401+ Validated Pydantic model instance
402+ >> >> >> > 13857221 (feat (v2 / anthropic ): implement provider with mode registry integration )
377403
378404 Raises :
379405 IncompleteOutputException : If response hit max_tokens
380406 ValidationError : If response doesn 't match model
381407 """
382408 from anthropic.types import Message
409+ <<<<<<< HEAD
383410 from collections.abc import Iterable
384411 from typing import get_origin
412+ =======
413+ >>>>>>> 13857221 (feat(v2/anthropic): implement provider with mode registry integration)
385414
386415 if isinstance(response, Message) and response.stop_reason == "max_tokens":
387416 raise IncompleteOutputException(last_completion=response)
388417
418+ <<<<<<< HEAD
389419 # Check if this is a parallel response (Iterable[Union[...]])
390420 is_parallel = get_origin(response_model) is Iterable
391421
@@ -463,6 +493,27 @@ def prepare_request(
463493 Mode.warn_anthropic_reasoning_tools_deprecation()
464494 # Delegate to parent handler
465495 return super().prepare_request(response_model, kwargs)
496+ =======
497+ # Extract tool calls
498+ tool_calls = [
499+ json.dumps(c.input) for c in response.content if c.type == "tool_use"
500+ ]
501+
502+ # Validate exactly one tool call
503+ tool_calls_validator = TypeAdapter(
504+ Annotated[list[Any], Field(min_length=1, max_length=1)]
505+ )
506+ tool_call = tool_calls_validator.validate_python(tool_calls)[0]
507+
508+ parsed = response_model.model_validate_json(
509+ tool_call, context=validation_context, strict=strict
510+ )
511+
512+ # Attach raw response for access via create_with_completion
513+ parsed._raw_response = response # type: ignore
514+
515+ return parsed
516+ >>>>>>> 13857221 (feat(v2/anthropic): implement provider with mode registry integration)
466517
467518
468519@register_mode_handler(Provider.ANTHROPIC, Mode.JSON)
0 commit comments