Skip to content

Commit cc25ae3

Browse files
chore: speedup initial import
1 parent 9223e75 commit cc25ae3

File tree

1 file changed

+179
-45
lines changed

1 file changed

+179
-45
lines changed

src/llama_api_client/_client.py

Lines changed: 179 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from __future__ import annotations
44

55
import os
6-
from typing import Any, Mapping
6+
from typing import TYPE_CHECKING, Any, Mapping
77
from typing_extensions import Self, override
88

99
import httpx
@@ -20,16 +20,22 @@
2020
not_given,
2121
)
2222
from ._utils import is_given, get_async_library
23+
from ._compat import cached_property
2324
from ._version import __version__
24-
from .resources import models, uploads, moderations
2525
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
2626
from ._exceptions import APIStatusError, LlamaAPIClientError
2727
from ._base_client import (
2828
DEFAULT_MAX_RETRIES,
2929
SyncAPIClient,
3030
AsyncAPIClient,
3131
)
32-
from .resources.chat import chat
32+
33+
if TYPE_CHECKING:
34+
from .resources import chat, models, uploads, moderations
35+
from .resources.models import ModelsResource, AsyncModelsResource
36+
from .resources.uploads import UploadsResource, AsyncUploadsResource
37+
from .resources.chat.chat import ChatResource, AsyncChatResource
38+
from .resources.moderations import ModerationsResource, AsyncModerationsResource
3339

3440
__all__ = [
3541
"Timeout",
@@ -44,13 +50,6 @@
4450

4551

4652
class LlamaAPIClient(SyncAPIClient):
47-
chat: chat.ChatResource
48-
models: models.ModelsResource
49-
uploads: uploads.UploadsResource
50-
moderations: moderations.ModerationsResource
51-
with_raw_response: LlamaAPIClientWithRawResponse
52-
with_streaming_response: LlamaAPIClientWithStreamedResponse
53-
5453
# client options
5554
api_key: str
5655

@@ -105,12 +104,37 @@ def __init__(
105104
_strict_response_validation=_strict_response_validation,
106105
)
107106

108-
self.chat = chat.ChatResource(self)
109-
self.models = models.ModelsResource(self)
110-
self.uploads = uploads.UploadsResource(self)
111-
self.moderations = moderations.ModerationsResource(self)
112-
self.with_raw_response = LlamaAPIClientWithRawResponse(self)
113-
self.with_streaming_response = LlamaAPIClientWithStreamedResponse(self)
107+
@cached_property
108+
def chat(self) -> ChatResource:
109+
from .resources.chat import ChatResource
110+
111+
return ChatResource(self)
112+
113+
@cached_property
114+
def models(self) -> ModelsResource:
115+
from .resources.models import ModelsResource
116+
117+
return ModelsResource(self)
118+
119+
@cached_property
120+
def uploads(self) -> UploadsResource:
121+
from .resources.uploads import UploadsResource
122+
123+
return UploadsResource(self)
124+
125+
@cached_property
126+
def moderations(self) -> ModerationsResource:
127+
from .resources.moderations import ModerationsResource
128+
129+
return ModerationsResource(self)
130+
131+
@cached_property
132+
def with_raw_response(self) -> LlamaAPIClientWithRawResponse:
133+
return LlamaAPIClientWithRawResponse(self)
134+
135+
@cached_property
136+
def with_streaming_response(self) -> LlamaAPIClientWithStreamedResponse:
137+
return LlamaAPIClientWithStreamedResponse(self)
114138

115139
@property
116140
@override
@@ -218,13 +242,6 @@ def _make_status_error(
218242

219243

220244
class AsyncLlamaAPIClient(AsyncAPIClient):
221-
chat: chat.AsyncChatResource
222-
models: models.AsyncModelsResource
223-
uploads: uploads.AsyncUploadsResource
224-
moderations: moderations.AsyncModerationsResource
225-
with_raw_response: AsyncLlamaAPIClientWithRawResponse
226-
with_streaming_response: AsyncLlamaAPIClientWithStreamedResponse
227-
228245
# client options
229246
api_key: str
230247

@@ -279,12 +296,37 @@ def __init__(
279296
_strict_response_validation=_strict_response_validation,
280297
)
281298

282-
self.chat = chat.AsyncChatResource(self)
283-
self.models = models.AsyncModelsResource(self)
284-
self.uploads = uploads.AsyncUploadsResource(self)
285-
self.moderations = moderations.AsyncModerationsResource(self)
286-
self.with_raw_response = AsyncLlamaAPIClientWithRawResponse(self)
287-
self.with_streaming_response = AsyncLlamaAPIClientWithStreamedResponse(self)
299+
@cached_property
300+
def chat(self) -> AsyncChatResource:
301+
from .resources.chat import AsyncChatResource
302+
303+
return AsyncChatResource(self)
304+
305+
@cached_property
306+
def models(self) -> AsyncModelsResource:
307+
from .resources.models import AsyncModelsResource
308+
309+
return AsyncModelsResource(self)
310+
311+
@cached_property
312+
def uploads(self) -> AsyncUploadsResource:
313+
from .resources.uploads import AsyncUploadsResource
314+
315+
return AsyncUploadsResource(self)
316+
317+
@cached_property
318+
def moderations(self) -> AsyncModerationsResource:
319+
from .resources.moderations import AsyncModerationsResource
320+
321+
return AsyncModerationsResource(self)
322+
323+
@cached_property
324+
def with_raw_response(self) -> AsyncLlamaAPIClientWithRawResponse:
325+
return AsyncLlamaAPIClientWithRawResponse(self)
326+
327+
@cached_property
328+
def with_streaming_response(self) -> AsyncLlamaAPIClientWithStreamedResponse:
329+
return AsyncLlamaAPIClientWithStreamedResponse(self)
288330

289331
@property
290332
@override
@@ -392,35 +434,127 @@ def _make_status_error(
392434

393435

394436
class LlamaAPIClientWithRawResponse:
437+
_client: LlamaAPIClient
438+
395439
def __init__(self, client: LlamaAPIClient) -> None:
396-
self.chat = chat.ChatResourceWithRawResponse(client.chat)
397-
self.models = models.ModelsResourceWithRawResponse(client.models)
398-
self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads)
399-
self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations)
440+
self._client = client
441+
442+
@cached_property
443+
def chat(self) -> chat.ChatResourceWithRawResponse:
444+
from .resources.chat import ChatResourceWithRawResponse
445+
446+
return ChatResourceWithRawResponse(self._client.chat)
447+
448+
@cached_property
449+
def models(self) -> models.ModelsResourceWithRawResponse:
450+
from .resources.models import ModelsResourceWithRawResponse
451+
452+
return ModelsResourceWithRawResponse(self._client.models)
453+
454+
@cached_property
455+
def uploads(self) -> uploads.UploadsResourceWithRawResponse:
456+
from .resources.uploads import UploadsResourceWithRawResponse
457+
458+
return UploadsResourceWithRawResponse(self._client.uploads)
459+
460+
@cached_property
461+
def moderations(self) -> moderations.ModerationsResourceWithRawResponse:
462+
from .resources.moderations import ModerationsResourceWithRawResponse
463+
464+
return ModerationsResourceWithRawResponse(self._client.moderations)
400465

401466

402467
class AsyncLlamaAPIClientWithRawResponse:
468+
_client: AsyncLlamaAPIClient
469+
403470
def __init__(self, client: AsyncLlamaAPIClient) -> None:
404-
self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
405-
self.models = models.AsyncModelsResourceWithRawResponse(client.models)
406-
self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads)
407-
self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations)
471+
self._client = client
472+
473+
@cached_property
474+
def chat(self) -> chat.AsyncChatResourceWithRawResponse:
475+
from .resources.chat import AsyncChatResourceWithRawResponse
476+
477+
return AsyncChatResourceWithRawResponse(self._client.chat)
478+
479+
@cached_property
480+
def models(self) -> models.AsyncModelsResourceWithRawResponse:
481+
from .resources.models import AsyncModelsResourceWithRawResponse
482+
483+
return AsyncModelsResourceWithRawResponse(self._client.models)
484+
485+
@cached_property
486+
def uploads(self) -> uploads.AsyncUploadsResourceWithRawResponse:
487+
from .resources.uploads import AsyncUploadsResourceWithRawResponse
488+
489+
return AsyncUploadsResourceWithRawResponse(self._client.uploads)
490+
491+
@cached_property
492+
def moderations(self) -> moderations.AsyncModerationsResourceWithRawResponse:
493+
from .resources.moderations import AsyncModerationsResourceWithRawResponse
494+
495+
return AsyncModerationsResourceWithRawResponse(self._client.moderations)
408496

409497

410498
class LlamaAPIClientWithStreamedResponse:
499+
_client: LlamaAPIClient
500+
411501
def __init__(self, client: LlamaAPIClient) -> None:
412-
self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
413-
self.models = models.ModelsResourceWithStreamingResponse(client.models)
414-
self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads)
415-
self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations)
502+
self._client = client
503+
504+
@cached_property
505+
def chat(self) -> chat.ChatResourceWithStreamingResponse:
506+
from .resources.chat import ChatResourceWithStreamingResponse
507+
508+
return ChatResourceWithStreamingResponse(self._client.chat)
509+
510+
@cached_property
511+
def models(self) -> models.ModelsResourceWithStreamingResponse:
512+
from .resources.models import ModelsResourceWithStreamingResponse
513+
514+
return ModelsResourceWithStreamingResponse(self._client.models)
515+
516+
@cached_property
517+
def uploads(self) -> uploads.UploadsResourceWithStreamingResponse:
518+
from .resources.uploads import UploadsResourceWithStreamingResponse
519+
520+
return UploadsResourceWithStreamingResponse(self._client.uploads)
521+
522+
@cached_property
523+
def moderations(self) -> moderations.ModerationsResourceWithStreamingResponse:
524+
from .resources.moderations import ModerationsResourceWithStreamingResponse
525+
526+
return ModerationsResourceWithStreamingResponse(self._client.moderations)
416527

417528

418529
class AsyncLlamaAPIClientWithStreamedResponse:
530+
_client: AsyncLlamaAPIClient
531+
419532
def __init__(self, client: AsyncLlamaAPIClient) -> None:
420-
self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
421-
self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
422-
self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads)
423-
self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations)
533+
self._client = client
534+
535+
@cached_property
536+
def chat(self) -> chat.AsyncChatResourceWithStreamingResponse:
537+
from .resources.chat import AsyncChatResourceWithStreamingResponse
538+
539+
return AsyncChatResourceWithStreamingResponse(self._client.chat)
540+
541+
@cached_property
542+
def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
543+
from .resources.models import AsyncModelsResourceWithStreamingResponse
544+
545+
return AsyncModelsResourceWithStreamingResponse(self._client.models)
546+
547+
@cached_property
548+
def uploads(self) -> uploads.AsyncUploadsResourceWithStreamingResponse:
549+
from .resources.uploads import AsyncUploadsResourceWithStreamingResponse
550+
551+
return AsyncUploadsResourceWithStreamingResponse(self._client.uploads)
552+
553+
@cached_property
554+
def moderations(self) -> moderations.AsyncModerationsResourceWithStreamingResponse:
555+
from .resources.moderations import AsyncModerationsResourceWithStreamingResponse
556+
557+
return AsyncModerationsResourceWithStreamingResponse(self._client.moderations)
424558

425559

426560
Client = LlamaAPIClient

0 commit comments

Comments
 (0)