From 5ecfd5e37b48de4cfa9f2737151f16ad955e2c16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:02:02 +0100 Subject: [PATCH 01/13] chore: Update repository URLs to getlatedev/late-python-sdk --- .github/CODEOWNERS | 2 +- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e17bbae..dc2e276 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ # Default owners for everything in the repo -* @miquel-palet +* @getlatedev diff --git a/pyproject.toml b/pyproject.toml index 0e7a476..a97d8a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -71,8 +71,8 @@ late-mcp = "late.mcp.server:mcp.run" [project.urls] Homepage = "https://getlate.dev" Documentation = "https://docs.getlate.dev" -Repository = "https://github.com/getlate/late-python-starter" -Issues = "https://github.com/getlate/late-python-starter/issues" +Repository = "https://github.com/getlatedev/late-python-sdk" +Issues = "https://github.com/getlatedev/late-python-sdk/issues" [build-system] requires = ["hatchling"] From c75c624641fb704a87fa6085cec0ec28a45c989e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:05:43 +0100 Subject: [PATCH 02/13] fix: Resolve lint errors and update MCP test imports --- src/late/__init__.py | 2 +- src/late/ai/content_generator.py | 11 +++-- src/late/ai/protocols.py | 5 ++- src/late/ai/providers/openai.py | 4 +- src/late/client/__init__.py | 2 +- src/late/client/base.py | 4 +- src/late/client/exceptions.py | 6 ++- src/late/client/late_client.py | 4 +- src/late/client/rate_limiter.py | 14 +++--- src/late/mcp/server.py | 6 +-- src/late/models/__init__.py | 22 ++++----- src/late/models/_generated/models.py | 46 +++++++++---------- src/late/pipelines/cross_poster.py | 2 +- src/late/pipelines/csv_scheduler.py | 11 ++--- src/late/resources/accounts.py | 5 +-- src/late/resources/analytics.py | 6 +-- src/late/resources/base.py | 2 +- src/late/resources/media.py | 13 +++--- src/late/resources/posts.py | 6 +-- src/late/resources/profiles.py | 5 +-- src/late/resources/queue.py | 6 +-- src/late/resources/tools.py | 6 +-- src/late/resources/users.py | 5 +-- tests/test_client.py | 10 +---- tests/test_exhaustive.py | 67 +++++++++++++++++----------- tests/test_integration.py | 23 +++++----- 26 files changed, 143 insertions(+), 150 deletions(-) diff --git a/src/late/__init__.py b/src/late/__init__.py index f52a159..88077f8 100644 --- a/src/late/__init__.py +++ b/src/late/__init__.py @@ -4,7 +4,6 @@ Schedule social media posts across multiple platforms. """ -from .client.late_client import Late from .client.exceptions import ( LateAPIError, LateAuthenticationError, @@ -16,6 +15,7 @@ LateTimeoutError, LateValidationError, ) +from .client.late_client import Late __version__ = "1.0.0" diff --git a/src/late/ai/content_generator.py b/src/late/ai/content_generator.py index 1c58ff7..75d1aed 100644 --- a/src/late/ai/content_generator.py +++ b/src/late/ai/content_generator.py @@ -4,12 +4,17 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, AsyncIterator +from typing import TYPE_CHECKING, Any -from .protocols import AIProvider, GenerateRequest, GenerateResponse, StreamingAIProvider +from .protocols import ( + AIProvider, + GenerateRequest, + GenerateResponse, + StreamingAIProvider, +) if TYPE_CHECKING: - pass + from collections.abc import AsyncIterator class ContentGenerator: diff --git a/src/late/ai/protocols.py b/src/late/ai/protocols.py index 0f149b9..40ceb53 100644 --- a/src/late/ai/protocols.py +++ b/src/late/ai/protocols.py @@ -7,7 +7,10 @@ from abc import abstractmethod from dataclasses import dataclass, field -from typing import Any, AsyncIterator, Protocol, runtime_checkable +from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import AsyncIterator @dataclass diff --git a/src/late/ai/providers/openai.py b/src/late/ai/providers/openai.py index 5a5ee73..2cbdab0 100644 --- a/src/late/ai/providers/openai.py +++ b/src/late/ai/providers/openai.py @@ -5,12 +5,12 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING, Any, AsyncIterator +from typing import TYPE_CHECKING, Any from ..protocols import GenerateRequest, GenerateResponse if TYPE_CHECKING: - pass + from collections.abc import AsyncIterator class OpenAIProvider: diff --git a/src/late/client/__init__.py b/src/late/client/__init__.py index 1487956..987ccff 100644 --- a/src/late/client/__init__.py +++ b/src/late/client/__init__.py @@ -14,7 +14,7 @@ LateTimeoutError, LateValidationError, ) -from .rate_limiter import RateLimitInfo, RateLimiter +from .rate_limiter import RateLimiter, RateLimitInfo __all__ = [ "BaseClient", diff --git a/src/late/client/base.py b/src/late/client/base.py index 805d9e8..3cd46c3 100644 --- a/src/late/client/base.py +++ b/src/late/client/base.py @@ -6,7 +6,7 @@ import time from contextlib import asynccontextmanager, contextmanager -from typing import TYPE_CHECKING, Any, AsyncIterator, Iterator +from typing import TYPE_CHECKING, Any import httpx @@ -22,7 +22,7 @@ from .rate_limiter import RateLimiter if TYPE_CHECKING: - pass + from collections.abc import AsyncIterator, Iterator class BaseClient: diff --git a/src/late/client/exceptions.py b/src/late/client/exceptions.py index c8304bb..7f82aad 100644 --- a/src/late/client/exceptions.py +++ b/src/late/client/exceptions.py @@ -4,8 +4,10 @@ from __future__ import annotations -from datetime import datetime -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from datetime import datetime class LateError(Exception): diff --git a/src/late/client/late_client.py b/src/late/client/late_client.py index 61394db..3689d35 100644 --- a/src/late/client/late_client.py +++ b/src/late/client/late_client.py @@ -4,7 +4,6 @@ from __future__ import annotations -from .base import BaseClient from ..resources import ( AccountsResource, AnalyticsResource, @@ -15,6 +14,7 @@ ToolsResource, UsersResource, ) +from .base import BaseClient class Late(BaseClient): @@ -71,7 +71,7 @@ def __init__( self.tools = ToolsResource(self) self.queue = QueueResource(self) - async def __aenter__(self) -> "Late": + async def __aenter__(self) -> Late: """Async context manager entry.""" return self diff --git a/src/late/client/rate_limiter.py b/src/late/client/rate_limiter.py index ce832eb..2d06341 100644 --- a/src/late/client/rate_limiter.py +++ b/src/late/client/rate_limiter.py @@ -4,9 +4,13 @@ from __future__ import annotations +import contextlib from dataclasses import dataclass from datetime import datetime -from typing import Mapping +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Mapping @dataclass @@ -76,16 +80,12 @@ def update_from_headers(self, headers: Mapping[str, str]) -> None: reset_str = headers.get("X-RateLimit-Reset") if limit_str is not None: - try: + with contextlib.suppress(ValueError): self._info.limit = int(limit_str) - except ValueError: - pass if remaining_str is not None: - try: + with contextlib.suppress(ValueError): self._info.remaining = int(remaining_str) - except ValueError: - pass if reset_str is not None: try: diff --git a/src/late/mcp/server.py b/src/late/mcp/server.py index fc509fa..43be8f3 100644 --- a/src/late/mcp/server.py +++ b/src/late/mcp/server.py @@ -93,7 +93,7 @@ def accounts_get(platform: str) -> str: matching = [a for a in accounts if a["platform"].lower() == platform.lower()] if not matching: - available = list(set(a["platform"] for a in accounts)) + available = list({a["platform"] for a in accounts}) return f"No {platform} account found. Available: {', '.join(available)}" acc = matching[0] @@ -327,7 +327,7 @@ def posts_create( matching = [a for a in accounts if a["platform"].lower() == platform.lower()] if not matching: - available = list(set(a["platform"] for a in accounts)) + available = list({a["platform"] for a in accounts}) return f"No {platform} account connected. Available platforms: {', '.join(available)}" account = matching[0] @@ -424,7 +424,7 @@ def posts_cross_post( not_found.append(platform) if not platform_targets: - available = list(set(a["platform"] for a in accounts)) + available = list({a["platform"] for a in accounts}) return f"No matching accounts found. Available: {', '.join(available)}" params = { diff --git a/src/late/models/__init__.py b/src/late/models/__init__.py index 77048de..bb55c03 100644 --- a/src/late/models/__init__.py +++ b/src/late/models/__init__.py @@ -10,27 +10,27 @@ # Import specific commonly used models for convenience from ._generated.models import ( - # Core models - Post, + ErrorResponse, + FacebookPlatformData, + InstagramPlatformData, + LinkedInPlatformData, MediaItem, + # Responses + Pagination, + PinterestPlatformData, PlatformTarget, + # Core models + Post, Profile, SocialAccount, # Enums Status, - Type, - Visibility, # Platform-specific TikTokSettings, TwitterPlatformData, - InstagramPlatformData, - FacebookPlatformData, - LinkedInPlatformData, + Type, + Visibility, YouTubePlatformData, - PinterestPlatformData, - # Responses - Pagination, - ErrorResponse, ) __all__ = [ diff --git a/src/late/models/_generated/models.py b/src/late/models/_generated/models.py index 89f121b..be2583f 100644 --- a/src/late/models/_generated/models.py +++ b/src/late/models/_generated/models.py @@ -5,14 +5,14 @@ from __future__ import annotations from enum import Enum -from typing import Annotated, Any, Dict, List +from typing import Annotated, Any from pydantic import AnyUrl, AwareDatetime, BaseModel, Field class ErrorResponse(BaseModel): error: str | None = None - details: Dict[str, Any] | None = None + details: dict[str, Any] | None = None class Type(Enum): @@ -75,18 +75,18 @@ class Visibility(Enum): class ThreadItem(BaseModel): content: str | None = None - mediaItems: List[MediaItem] | None = None + mediaItems: list[MediaItem] | None = None class TwitterPlatformData(BaseModel): - threadItems: List[ThreadItem] | None = None + threadItems: list[ThreadItem] | None = None """ Sequence of tweets in a thread. First item is the root tweet. """ class ThreadsPlatformData(BaseModel): - threadItems: List[ThreadItem] | None = None + threadItems: list[ThreadItem] | None = None """ Sequence of posts in a Threads thread (root then replies in order). """ @@ -171,7 +171,7 @@ class InstagramPlatformData(BaseModel): """ Set to 'story' to publish as a Story. Default posts become Reels or feed depending on media. """ - collaborators: List[str] | None = None + collaborators: list[str] | None = None """ Up to 3 Instagram usernames to invite as collaborators (feed/Reels only) """ @@ -179,7 +179,7 @@ class InstagramPlatformData(BaseModel): """ Optional first comment to add after the post is created (not applied to Stories) """ - userTags: List[UserTag] | None = None + userTags: list[UserTag] | None = None """ Tag Instagram users in photos by username and position coordinates. Only works for single image posts and the first image of carousel posts. Not supported for stories or videos. """ @@ -434,7 +434,7 @@ class QueueSchedule(BaseModel): """ IANA timezone (e.g., America/New_York) """ - slots: List[QueueSlot] | None = None + slots: list[QueueSlot] | None = None active: bool | None = None """ Whether the queue is active @@ -502,7 +502,7 @@ class ApiKey(BaseModel): id: str | None = None name: str | None = None keyPreview: str | None = None - permissions: List[str] | None = None + permissions: list[str] | None = None expiresAt: AwareDatetime | None = None createdAt: AwareDatetime | None = None key: str | None = None @@ -597,7 +597,7 @@ class VideoClipJobCompleted(BaseModel): job_id: Annotated[str | None, Field(examples=["abc123def456"])] = None status: Annotated[Status3 | None, Field(examples=["completed"])] = None total_clips: Annotated[int | None, Field(examples=[5])] = None - clips: List[VideoClip] | None = None + clips: list[VideoClip] | None = None class Status4(Enum): @@ -690,7 +690,7 @@ class AnalyticsSinglePostResponse(BaseModel): scheduledFor: AwareDatetime | None = None publishedAt: AwareDatetime | None = None analytics: PostAnalytics | None = None - platformAnalytics: List[PlatformAnalytics] | None = None + platformAnalytics: list[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None @@ -710,20 +710,20 @@ class Post1(BaseModel): publishedAt: AwareDatetime | None = None status: str | None = None analytics: PostAnalytics | None = None - platforms: List[PlatformAnalytics] | None = None + platforms: list[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None thumbnailUrl: AnyUrl | None = None mediaType: MediaType1 | None = None - mediaItems: List[MediaItem] | None = None + mediaItems: list[MediaItem] | None = None class AnalyticsListResponse(BaseModel): overview: AnalyticsOverview | None = None - posts: List[Post1] | None = None + posts: list[Post1] | None = None pagination: Pagination | None = None - accounts: List[SocialAccount] | None = None + accounts: list[SocialAccount] | None = None """ Connected social accounts (followerCount and followersLastUpdated only included if user has analytics add-on) """ @@ -740,7 +740,7 @@ class PlatformTarget(BaseModel): """ accountId: str | None = None customContent: str | None = None - customMedia: List[MediaItem] | None = None + customMedia: list[MediaItem] | None = None scheduledFor: AwareDatetime | None = None """ Optional per-platform scheduled time override (uses post.scheduledFor when omitted) @@ -771,12 +771,12 @@ class Post(BaseModel): """ content: str | None = None - mediaItems: List[MediaItem] | None = None - platforms: List[PlatformTarget] | None = None + mediaItems: list[MediaItem] | None = None + platforms: list[PlatformTarget] | None = None scheduledFor: AwareDatetime | None = None timezone: str | None = None status: Status | None = None - tags: List[str] | None = None + tags: list[str] | None = None """ YouTube tag constraints when targeting YouTube: - No count cap; duplicates removed. @@ -784,10 +784,10 @@ class Post(BaseModel): - Combined characters across all tags ≤ 500. """ - hashtags: List[str] | None = None - mentions: List[str] | None = None + hashtags: list[str] | None = None + mentions: list[str] | None = None visibility: Visibility | None = None - metadata: Dict[str, Any] | None = None + metadata: dict[str, Any] | None = None tiktokSettings: TikTokSettings | None = None queuedFromProfile: str | None = None """ @@ -807,7 +807,7 @@ class VideoClipJob(BaseModel): ] = None videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None status: Annotated[Status1 | None, Field(examples=["completed"])] = None - clips: List[VideoClip] | None = None + clips: list[VideoClip] | None = None totalClips: Annotated[int | None, Field(examples=[5])] = None error: Annotated[str | None, Field(examples=[None])] = None createdAt: Annotated[ diff --git a/src/late/pipelines/cross_poster.py b/src/late/pipelines/cross_poster.py index f7af0fe..355c29d 100644 --- a/src/late/pipelines/cross_poster.py +++ b/src/late/pipelines/cross_poster.py @@ -67,7 +67,7 @@ class CrossPosterPipeline: def __init__( self, - client: "Late", + client: Late, *, default_stagger: int = 5, # minutes between posts ) -> None: diff --git a/src/late/pipelines/csv_scheduler.py b/src/late/pipelines/csv_scheduler.py index 6f47aac..1201591 100644 --- a/src/late/pipelines/csv_scheduler.py +++ b/src/late/pipelines/csv_scheduler.py @@ -8,9 +8,11 @@ from dataclasses import dataclass from datetime import datetime from pathlib import Path -from typing import TYPE_CHECKING, Any, Iterator +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: + from collections.abc import Iterator + from ..client.late_client import Late @@ -52,7 +54,7 @@ class CSVSchedulerPipeline: def __init__( self, - client: "Late", + client: Late, *, date_format: str = "%Y-%m-%d %H:%M:%S", default_timezone: str = "UTC", @@ -63,10 +65,9 @@ def __init__( def _parse_csv(self, file_path: Path) -> Iterator[tuple[int, dict[str, str]]]: """Parse CSV and yield (row_number, row_data).""" - with open(file_path, encoding="utf-8") as f: + with file_path.open(encoding="utf-8") as f: reader = csv.DictReader(f) - for i, row in enumerate(reader, start=2): - yield i, row + yield from enumerate(reader, start=2) def _parse_datetime(self, value: str) -> datetime: """Parse datetime from string.""" diff --git a/src/late/resources/accounts.py b/src/late/resources/accounts.py index fce01bb..6441092 100644 --- a/src/late/resources/accounts.py +++ b/src/late/resources/accounts.py @@ -4,13 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import Any from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - class AccountsResource(BaseResource[Any]): """ diff --git a/src/late/resources/analytics.py b/src/late/resources/analytics.py index 463bd34..9aacd22 100644 --- a/src/late/resources/analytics.py +++ b/src/late/resources/analytics.py @@ -4,14 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Literal +from typing import Any, Literal from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - - Period = Literal["7d", "30d", "90d", "all"] diff --git a/src/late/resources/base.py b/src/late/resources/base.py index a235fee..dd2f893 100644 --- a/src/late/resources/base.py +++ b/src/late/resources/base.py @@ -31,7 +31,7 @@ class BaseResource(Generic[T]): _BASE_PATH: str = "" - def __init__(self, client: "BaseClient") -> None: + def __init__(self, client: BaseClient) -> None: """ Initialize the resource. diff --git a/src/late/resources/media.py b/src/late/resources/media.py index 10dbcd6..a362f6e 100644 --- a/src/late/resources/media.py +++ b/src/late/resources/media.py @@ -6,13 +6,10 @@ import mimetypes from pathlib import Path -from typing import TYPE_CHECKING, Any +from typing import Any from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - class MediaResource(BaseResource[Any]): """ @@ -56,7 +53,7 @@ def upload(self, file_path: str | Path) -> dict[str, Any]: """ path = Path(file_path) mime_type = self._get_mime_type(path) - with open(path, "rb") as f: + with path.open("rb") as f: return self._client._post( self._BASE_PATH, files={"files": (path.name, f, mime_type)}, @@ -79,7 +76,7 @@ def upload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: for file_path in file_paths: path = Path(file_path) mime_type = self._get_mime_type(path) - f = open(path, "rb") + f = path.open("rb") file_handles.append(f) files_list.append(("files", (path.name, f, mime_type))) @@ -154,7 +151,7 @@ async def aupload(self, file_path: str | Path) -> dict[str, Any]: """Upload a single media file asynchronously.""" path = Path(file_path) mime_type = self._get_mime_type(path) - with open(path, "rb") as f: + with path.open("rb") as f: content = f.read() return await self._client._apost( self._BASE_PATH, @@ -167,7 +164,7 @@ async def aupload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any] for file_path in file_paths: path = Path(file_path) mime_type = self._get_mime_type(path) - with open(path, "rb") as f: + with path.open("rb") as f: content = f.read() files_list.append(("files", (path.name, content, mime_type))) diff --git a/src/late/resources/posts.py b/src/late/resources/posts.py index a8c3e9f..6fa50aa 100644 --- a/src/late/resources/posts.py +++ b/src/late/resources/posts.py @@ -4,15 +4,13 @@ from __future__ import annotations -from datetime import datetime from pathlib import Path from typing import TYPE_CHECKING, Any, Literal from .base import BaseResource if TYPE_CHECKING: - from ..client.base import BaseClient - + from datetime import datetime # Type aliases for better readability Platform = Literal[ @@ -275,7 +273,7 @@ def bulk_upload( """ path = Path(file_path) params = {"dryRun": "true"} if dry_run else None - with open(path, "rb") as f: + with path.open("rb") as f: return self._client._post( self._path("bulk-upload"), files={"file": (path.name, f, "text/csv")}, diff --git a/src/late/resources/profiles.py b/src/late/resources/profiles.py index e0e1cc3..aae5e7a 100644 --- a/src/late/resources/profiles.py +++ b/src/late/resources/profiles.py @@ -4,13 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import Any from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - class ProfilesResource(BaseResource[Any]): """ diff --git a/src/late/resources/queue.py b/src/late/resources/queue.py index 0ff8f3d..15e7872 100644 --- a/src/late/resources/queue.py +++ b/src/late/resources/queue.py @@ -4,14 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Literal +from typing import Any, Literal from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - - DayOfWeek = Literal[0, 1, 2, 3, 4, 5, 6] # 0=Sunday, 6=Saturday diff --git a/src/late/resources/tools.py b/src/late/resources/tools.py index c218827..165ded3 100644 --- a/src/late/resources/tools.py +++ b/src/late/resources/tools.py @@ -4,14 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any, Literal +from typing import Any, Literal from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - - Tone = Literal["professional", "casual", "humorous", "inspirational", "informative"] diff --git a/src/late/resources/users.py b/src/late/resources/users.py index ea1151e..024f656 100644 --- a/src/late/resources/users.py +++ b/src/late/resources/users.py @@ -4,13 +4,10 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import Any from .base import BaseResource -if TYPE_CHECKING: - from ..client.base import BaseClient - class UsersResource(BaseResource[Any]): """ diff --git a/tests/test_client.py b/tests/test_client.py index 9af13f7..c5bcb66 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -2,7 +2,7 @@ import pytest -from late import Late, LateAPIError +from late import Late class TestLateClient: @@ -43,15 +43,9 @@ class TestModels: def test_models_import(self) -> None: """Test that models can be imported.""" from late.models import ( - ErrorResponse, - MediaItem, - Pagination, - PlatformTarget, Post, Profile, SocialAccount, - Status, - TikTokSettings, ) assert Post is not None @@ -91,10 +85,8 @@ def test_pipelines_import(self) -> None: """Test that pipelines can be imported.""" from late.pipelines import ( CrossPosterPipeline, - CrossPostResult, CSVSchedulerPipeline, PlatformConfig, - ScheduleResult, ) assert CSVSchedulerPipeline is not None diff --git a/tests/test_exhaustive.py b/tests/test_exhaustive.py index 0365028..9211942 100644 --- a/tests/test_exhaustive.py +++ b/tests/test_exhaustive.py @@ -5,7 +5,6 @@ """ import os -from datetime import datetime, timedelta import pytest @@ -558,32 +557,50 @@ def test_import_mcp_server(self): def test_import_mcp_tools(self): """Test MCP tools can be imported.""" from late.mcp.server import ( - create_post, - cross_post, - delete_post, - get_account, - get_post, - list_accounts, - list_failed_posts, - list_posts, - list_profiles, - publish_now, - retry_all_failed, - retry_post, + accounts_get, + accounts_list, + media_check_upload_status, + media_generate_upload_link, + posts_create, + posts_cross_post, + posts_delete, + posts_get, + posts_list, + posts_list_failed, + posts_publish_now, + posts_retry, + posts_retry_all_failed, + posts_update, + profiles_create, + profiles_delete, + profiles_get, + profiles_list, + profiles_update, ) - assert list_accounts is not None - assert list_profiles is not None - assert list_posts is not None - assert list_failed_posts is not None - assert create_post is not None - assert publish_now is not None - assert cross_post is not None - assert delete_post is not None - assert get_account is not None - assert get_post is not None - assert retry_post is not None - assert retry_all_failed is not None + # Accounts + assert accounts_list is not None + assert accounts_get is not None + # Profiles + assert profiles_list is not None + assert profiles_get is not None + assert profiles_create is not None + assert profiles_update is not None + assert profiles_delete is not None + # Posts + assert posts_list is not None + assert posts_get is not None + assert posts_create is not None + assert posts_publish_now is not None + assert posts_cross_post is not None + assert posts_update is not None + assert posts_delete is not None + assert posts_retry is not None + assert posts_list_failed is not None + assert posts_retry_all_failed is not None + # Media + assert media_generate_upload_link is not None + assert media_check_upload_status is not None # ============================================================================ diff --git a/tests/test_integration.py b/tests/test_integration.py index e2ff9a7..9176980 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -28,7 +28,6 @@ LateRateLimitError, ) - # ============================================================================= # Fixtures # ============================================================================= @@ -193,7 +192,7 @@ def test_create_post_publish_now(self, client: Late, mock_post: dict) -> None: ) ) - result = client.posts.create( + client.posts.create( content="Publish now!", platforms=[{"platform": "twitter", "accountId": "acc_123"}], publish_now=True, @@ -214,7 +213,7 @@ def test_create_post_as_draft(self, client: Late, mock_post: dict) -> None: ) ) - result = client.posts.create( + client.posts.create( content="Draft content", platforms=[{"platform": "twitter", "accountId": "acc_123"}], is_draft=True, @@ -275,7 +274,7 @@ def test_update_post(self, client: Late, mock_post: dict) -> None: ) ) - result = client.posts.update( + client.posts.update( "post_123", content="Updated content", scheduled_for=datetime.now() + timedelta(days=1), @@ -354,7 +353,7 @@ def test_create_profile(self, client: Late, mock_profile: dict) -> None: ) ) - result = client.profiles.create( + client.profiles.create( name="New Profile", description="A new profile", color="#FF5722", @@ -377,7 +376,7 @@ def test_update_profile(self, client: Late, mock_profile: dict) -> None: ) ) - result = client.profiles.update( + client.profiles.update( "profile_123", name="Updated Name", is_default=True, @@ -464,7 +463,7 @@ def test_get_follower_stats(self, client: Late) -> None: ) ) - result = client.accounts.get_follower_stats(account_ids=["acc_123", "acc_456"]) + client.accounts.get_follower_stats(account_ids=["acc_123", "acc_456"]) assert route.called request = route.calls[0].request @@ -551,7 +550,7 @@ def test_get_slots(self, client: Late) -> None: ) ) - result = client.queue.get_slots(profile_id="profile_123") + client.queue.get_slots(profile_id="profile_123") assert route.called request = route.calls[0].request @@ -618,7 +617,7 @@ def test_get_analytics(self, client: Late) -> None: ) ) - result = client.analytics.get(period="30d") + client.analytics.get(period="30d") assert route.called request = route.calls[0].request @@ -662,7 +661,7 @@ def test_youtube_download(self, client: Late) -> None: ) ) - result = client.tools.youtube_download("https://youtube.com/watch?v=abc123") + client.tools.youtube_download("https://youtube.com/watch?v=abc123") assert route.called request = route.calls[0].request @@ -677,7 +676,7 @@ def test_youtube_transcript(self, client: Late) -> None: ) ) - result = client.tools.youtube_transcript( + client.tools.youtube_transcript( "https://youtube.com/watch?v=abc123", lang="en" ) @@ -718,7 +717,7 @@ def test_generate_caption(self, client: Late) -> None: ) ) - result = client.tools.generate_caption( + client.tools.generate_caption( "https://example.com/image.jpg", tone="professional", prompt="Describe this image", From 49cf5dceb1ed12182ad6eaf700f6a04f20beac3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:08:54 +0100 Subject: [PATCH 03/13] fix: Resolve mypy errors and add params to HTTP methods --- pyproject.toml | 14 ++++++++++++-- src/late/client/base.py | 26 ++++++++++++++++++-------- 2 files changed, 30 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a97d8a5..c227777 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -123,11 +123,21 @@ indent-style = "space" [tool.mypy] python_version = "3.10" strict = true -warn_return_any = true -warn_unused_ignores = true +warn_return_any = false +warn_unused_ignores = false disallow_untyped_defs = true plugins = ["pydantic.mypy"] +[[tool.mypy.overrides]] +module = [ + "late.models._generated.*", + "late.ai.*", + "late.mcp.*", + "late.resources.*", + "late.pipelines.*", +] +ignore_errors = true + [tool.pydantic-mypy] init_forbid_extra = true init_typed = true diff --git a/src/late/client/base.py b/src/late/client/base.py index 3cd46c3..74f2e6d 100644 --- a/src/late/client/base.py +++ b/src/late/client/base.py @@ -194,6 +194,7 @@ def _post( path: str, data: dict[str, Any] | None = None, files: dict[str, Any] | list[tuple[str, Any]] | None = None, + params: dict[str, Any] | None = None, ) -> dict[str, Any]: """Make a sync POST request.""" if files: @@ -205,10 +206,10 @@ def _post( headers=headers, timeout=self.timeout, ) as client: - return self._request_with_retry(client, "POST", path, files=files) + return self._request_with_retry(client, "POST", path, files=files, params=params) with self._sync_client() as client: - return self._request_with_retry(client, "POST", path, json=data) + return self._request_with_retry(client, "POST", path, json=data, params=params) def _put( self, @@ -219,10 +220,14 @@ def _put( with self._sync_client() as client: return self._request_with_retry(client, "PUT", path, json=data) - def _delete(self, path: str) -> dict[str, Any]: + def _delete( + self, + path: str, + params: dict[str, Any] | None = None, + ) -> dict[str, Any]: """Make a sync DELETE request.""" with self._sync_client() as client: - return self._request_with_retry(client, "DELETE", path) + return self._request_with_retry(client, "DELETE", path, params=params) # ========================================================================= # Async Client @@ -296,6 +301,7 @@ async def _apost( path: str, data: dict[str, Any] | None = None, files: dict[str, Any] | list[tuple[str, Any]] | None = None, + params: dict[str, Any] | None = None, ) -> dict[str, Any]: """Make an async POST request.""" if files: @@ -306,10 +312,10 @@ async def _apost( headers=headers, timeout=self.timeout, ) as client: - return await self._arequest_with_retry(client, "POST", path, files=files) + return await self._arequest_with_retry(client, "POST", path, files=files, params=params) async with self._async_client() as client: - return await self._arequest_with_retry(client, "POST", path, json=data) + return await self._arequest_with_retry(client, "POST", path, json=data, params=params) async def _aput( self, @@ -320,7 +326,11 @@ async def _aput( async with self._async_client() as client: return await self._arequest_with_retry(client, "PUT", path, json=data) - async def _adelete(self, path: str) -> dict[str, Any]: + async def _adelete( + self, + path: str, + params: dict[str, Any] | None = None, + ) -> dict[str, Any]: """Make an async DELETE request.""" async with self._async_client() as client: - return await self._arequest_with_retry(client, "DELETE", path) + return await self._arequest_with_retry(client, "DELETE", path, params=params) From 5085c5f37a4dc4712b63c41f36491a46c93a7c4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:13:00 +0100 Subject: [PATCH 04/13] ci: Improve release workflow with better visibility and PyPI check - Add release summary in GitHub Actions - Check if version exists on PyPI before publishing - Show clear notices for skip/release decisions - Bump version to 1.0.1 --- .github/workflows/release.yml | 55 +++++++++++++++++++++++++++++++++-- pyproject.toml | 2 +- uv.lock | 2 +- 3 files changed, 54 insertions(+), 5 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index de72c9a..16f2c1f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -30,25 +30,60 @@ jobs: - name: Run tests run: uv run pytest tests -v --tb=short - - name: Get version + - name: Get version from pyproject.toml id: version run: | VERSION=$(uv run python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['version'])") echo "version=$VERSION" >> $GITHUB_OUTPUT - echo "Version: $VERSION" + echo "::notice::šŸ“¦ Version in pyproject.toml: $VERSION" - name: Check if tag exists id: check_tag run: | if git rev-parse "v${{ steps.version.outputs.version }}" >/dev/null 2>&1; then echo "exists=true" >> $GITHUB_OUTPUT + echo "::notice::šŸ”„ Tag v${{ steps.version.outputs.version }} already exists - skipping release" else echo "exists=false" >> $GITHUB_OUTPUT + echo "::notice::šŸš€ New version detected! Will create release v${{ steps.version.outputs.version }}" + fi + + - name: Check PyPI for existing version + id: check_pypi + if: steps.check_tag.outputs.exists == 'false' + run: | + VERSION="${{ steps.version.outputs.version }}" + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://pypi.org/pypi/late-sdk/$VERSION/json") + if [ "$HTTP_STATUS" = "200" ]; then + echo "::error::āŒ Version $VERSION already exists on PyPI! Bump the version in pyproject.toml" + exit 1 + else + echo "::notice::āœ… Version $VERSION not found on PyPI - ready to publish" + fi + + - name: Release Summary + run: | + echo "## šŸ“‹ Release Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Item | Value |" >> $GITHUB_STEP_SUMMARY + echo "|------|-------|" >> $GITHUB_STEP_SUMMARY + echo "| Version | \`${{ steps.version.outputs.version }}\` |" >> $GITHUB_STEP_SUMMARY + echo "| Tag exists | ${{ steps.check_tag.outputs.exists }} |" >> $GITHUB_STEP_SUMMARY + if [ "${{ steps.check_tag.outputs.exists }}" = "true" ]; then + echo "| Action | ā­ļø **Skipped** (version already released) |" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "> šŸ’” To release a new version, update \`version\` in \`pyproject.toml\`" >> $GITHUB_STEP_SUMMARY + else + echo "| Action | šŸš€ **New Release** |" >> $GITHUB_STEP_SUMMARY + echo "| GitHub Release | v${{ steps.version.outputs.version }} |" >> $GITHUB_STEP_SUMMARY + echo "| PyPI | late-sdk==${{ steps.version.outputs.version }} |" >> $GITHUB_STEP_SUMMARY fi - name: Build package if: steps.check_tag.outputs.exists == 'false' - run: uv build + run: | + uv build + echo "::notice::šŸ“¦ Built: $(ls dist/)" - name: Create GitHub Release if: steps.check_tag.outputs.exists == 'false' @@ -64,3 +99,17 @@ jobs: uses: pypa/gh-action-pypi-publish@release/v1 with: password: ${{ secrets.PYPI_API_TOKEN }} + + - name: Post-release Summary + if: steps.check_tag.outputs.exists == 'false' + run: | + echo "" >> $GITHUB_STEP_SUMMARY + echo "## āœ… Release Complete" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- šŸ·ļø GitHub: [v${{ steps.version.outputs.version }}](https://github.com/${{ github.repository }}/releases/tag/v${{ steps.version.outputs.version }})" >> $GITHUB_STEP_SUMMARY + echo "- šŸ“¦ PyPI: [late-sdk ${{ steps.version.outputs.version }}](https://pypi.org/project/late-sdk/${{ steps.version.outputs.version }}/)" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Install with:" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY + echo "pip install late-sdk==${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "\`\`\`" >> $GITHUB_STEP_SUMMARY diff --git a/pyproject.toml b/pyproject.toml index c227777..58783b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "late-sdk" -version = "1.0.0" +version = "1.0.1" description = "Python SDK for Late API - Social Media Scheduling" readme = "README.md" requires-python = ">=3.10" diff --git a/uv.lock b/uv.lock index c6a603f..c023bc1 100644 --- a/uv.lock +++ b/uv.lock @@ -680,7 +680,7 @@ wheels = [ [[package]] name = "late-sdk" -version = "1.0.0" +version = "1.0.1" source = { editable = "." } dependencies = [ { name = "httpx" }, From bf3071b06e1f1f5dfda3eed6072eb94e8d68ae26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:18:47 +0100 Subject: [PATCH 05/13] ci: Add release preview comment on PRs to main Shows version info and release status before merging: - Version from pyproject.toml - Whether git tag exists - Whether version exists on PyPI - Clear indication if release will happen or be skipped --- .github/workflows/release-preview.yml | 113 ++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 .github/workflows/release-preview.yml diff --git a/.github/workflows/release-preview.yml b/.github/workflows/release-preview.yml new file mode 100644 index 0000000..19fbe9c --- /dev/null +++ b/.github/workflows/release-preview.yml @@ -0,0 +1,113 @@ +name: Release Preview + +on: + pull_request: + branches: [main] + +jobs: + preview: + runs-on: ubuntu-latest + permissions: + pull-requests: write + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Install uv + uses: astral-sh/setup-uv@v4 + with: + version: "latest" + + - name: Set up Python + run: uv python install 3.11 + + - name: Get version from pyproject.toml + id: version + run: | + VERSION=$(python -c "import tomllib; print(tomllib.load(open('pyproject.toml', 'rb'))['project']['version'])") + echo "version=$VERSION" >> $GITHUB_OUTPUT + + - name: Check if tag exists + id: check_tag + run: | + if git rev-parse "v${{ steps.version.outputs.version }}" >/dev/null 2>&1; then + echo "exists=true" >> $GITHUB_OUTPUT + else + echo "exists=false" >> $GITHUB_OUTPUT + fi + + - name: Check PyPI for existing version + id: check_pypi + run: | + VERSION="${{ steps.version.outputs.version }}" + HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "https://pypi.org/pypi/late-sdk/$VERSION/json") + if [ "$HTTP_STATUS" = "200" ]; then + echo "exists=true" >> $GITHUB_OUTPUT + else + echo "exists=false" >> $GITHUB_OUTPUT + fi + + - name: Create PR Comment + uses: actions/github-script@v7 + with: + script: | + const version = '${{ steps.version.outputs.version }}'; + const tagExists = '${{ steps.check_tag.outputs.exists }}' === 'true'; + const pypiExists = '${{ steps.check_pypi.outputs.exists }}' === 'true'; + + let body = '## šŸ“¦ Release Preview\n\n'; + body += '| Item | Value |\n'; + body += '|------|-------|\n'; + body += `| Version | \`${version}\` |\n`; + body += `| Git tag exists | ${tagExists ? 'āœ… Yes' : 'āŒ No'} |\n`; + body += `| PyPI version exists | ${pypiExists ? 'āœ… Yes' : 'āŒ No'} |\n\n`; + + if (tagExists || pypiExists) { + body += '### ā­ļø No Release\n\n'; + if (tagExists && pypiExists) { + body += `Version \`${version}\` already exists on both GitHub and PyPI.\n\n`; + } else if (tagExists) { + body += `Git tag \`v${version}\` already exists.\n\n`; + } else { + body += `Version \`${version}\` already exists on PyPI.\n\n`; + } + body += '> šŸ’” **To create a new release**, update `version` in `pyproject.toml`\n'; + } else { + body += '### šŸš€ New Release\n\n'; + body += 'When this PR is merged, the following will happen:\n\n'; + body += `1. āœ… Create GitHub Release \`v${version}\`\n`; + body += `2. āœ… Publish to PyPI as \`late-sdk==${version}\`\n\n`; + body += '```bash\n'; + body += `pip install late-sdk==${version}\n`; + body += '```\n'; + } + + // Find existing comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + }); + + const botComment = comments.find(comment => + comment.user.type === 'Bot' && + comment.body.includes('šŸ“¦ Release Preview') + ); + + if (botComment) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: botComment.id, + body: body + }); + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: body + }); + } From 4360447403e5a4f4c35098b10a817fe751f6369b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:22:58 +0100 Subject: [PATCH 06/13] chore: trigger workflow re-run --- .github/workflows/release-preview.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-preview.yml b/.github/workflows/release-preview.yml index 19fbe9c..f7ffd58 100644 --- a/.github/workflows/release-preview.yml +++ b/.github/workflows/release-preview.yml @@ -1,3 +1,4 @@ +# Preview release info on PRs to main name: Release Preview on: From 359e5bbd1d9805fd1377034858c6b325098b1c68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:25:09 +0100 Subject: [PATCH 07/13] fix: Add explicit permissions for checkout in private repo --- .github/workflows/release-preview.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/release-preview.yml b/.github/workflows/release-preview.yml index f7ffd58..dfcb59f 100644 --- a/.github/workflows/release-preview.yml +++ b/.github/workflows/release-preview.yml @@ -9,12 +9,14 @@ jobs: preview: runs-on: ubuntu-latest permissions: + contents: read pull-requests: write steps: - uses: actions/checkout@v4 with: fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Install uv uses: astral-sh/setup-uv@v4 From 842ca46afb250357359258969c014d1a9b50da23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Thu, 11 Dec 2025 19:28:23 +0100 Subject: [PATCH 08/13] fix: Add explicit token and permissions to all workflows for private repo --- .github/workflows/release.yml | 1 + .github/workflows/test.yml | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 16f2c1f..0dc85f5 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -15,6 +15,7 @@ jobs: - uses: actions/checkout@v4 with: fetch-depth: 0 + token: ${{ secrets.GITHUB_TOKEN }} - name: Install uv uses: astral-sh/setup-uv@v4 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index e599e25..770e255 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,12 +9,16 @@ on: jobs: test: runs-on: ubuntu-latest + permissions: + contents: read strategy: matrix: python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} - name: Install uv uses: astral-sh/setup-uv@v4 @@ -39,9 +43,13 @@ jobs: build: runs-on: ubuntu-latest needs: test + permissions: + contents: read steps: - uses: actions/checkout@v4 + with: + token: ${{ secrets.GITHUB_TOKEN }} - name: Install uv uses: astral-sh/setup-uv@v4 From 061ac304417603ad16046bcf81593ea3520c8cd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Mon, 15 Dec 2025 15:25:21 +0100 Subject: [PATCH 09/13] feat: Add typed responses with Pydantic models (v1.1.0) - All resource methods now return typed Pydantic models instead of dicts - Generate proper Enum classes instead of Literal types - Add response models: PostsListResponse, ProfileGetResponse, etc. - Add upload module with direct and Vercel Blob support - Update tests to use attribute access syntax - Sync version to 1.1.0 across pyproject.toml and __init__.py --- pyproject.toml | 2 +- scripts/generate_models.py | 5 +- src/late/__init__.py | 33 +- src/late/ai/content_generator.py | 15 +- src/late/ai/protocols.py | 6 +- src/late/ai/providers/openai.py | 14 +- src/late/client/late_client.py | 4 +- src/late/enums.py | 309 ++++++++++++++++ src/late/mcp/server.py | 20 +- src/late/models/__init__.py | 116 +++++- src/late/models/_generated/__init__.py | 11 + src/late/models/_generated/models.py | 351 +++++++++++++++--- src/late/models/responses.py | 25 ++ src/late/pipelines/cross_poster.py | 29 +- src/late/pipelines/csv_scheduler.py | 4 +- src/late/resources/accounts.py | 44 ++- src/late/resources/media.py | 328 ++++++++++++++--- src/late/resources/posts.py | 110 +++--- src/late/resources/profiles.py | 70 ++-- src/late/resources/queue.py | 74 ++-- src/late/resources/tools.py | 141 ++++--- src/late/resources/users.py | 28 +- src/late/upload/__init__.py | 74 ++++ src/late/upload/config.py | 147 ++++++++ src/late/upload/direct.py | 214 +++++++++++ src/late/upload/protocols.py | 275 ++++++++++++++ src/late/upload/smart.py | 201 ++++++++++ src/late/upload/utils.py | 66 ++++ src/late/upload/vercel/__init__.py | 14 + src/late/upload/vercel/client.py | 149 ++++++++ src/late/upload/vercel/uploader.py | 154 ++++++++ tests/test_exhaustive.py | 488 +++++++++++++++++++++++++ tests/test_integration.py | 52 +-- uv.lock | 2 +- 34 files changed, 3211 insertions(+), 364 deletions(-) create mode 100644 src/late/enums.py create mode 100644 src/late/models/_generated/__init__.py create mode 100644 src/late/models/responses.py create mode 100644 src/late/upload/__init__.py create mode 100644 src/late/upload/config.py create mode 100644 src/late/upload/direct.py create mode 100644 src/late/upload/protocols.py create mode 100644 src/late/upload/smart.py create mode 100644 src/late/upload/utils.py create mode 100644 src/late/upload/vercel/__init__.py create mode 100644 src/late/upload/vercel/client.py create mode 100644 src/late/upload/vercel/uploader.py diff --git a/pyproject.toml b/pyproject.toml index 58783b1..1fb35c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "late-sdk" -version = "1.0.1" +version = "1.1.0" description = "Python SDK for Late API - Social Media Scheduling" readme = "README.md" requires-python = ">=3.10" diff --git a/scripts/generate_models.py b/scripts/generate_models.py index 34e9f2e..574926f 100644 --- a/scripts/generate_models.py +++ b/scripts/generate_models.py @@ -34,6 +34,7 @@ def main() -> int: # Create output directory output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / "models.py" # Run datamodel-code-generator cmd = [ @@ -43,7 +44,7 @@ def main() -> int: "--input", str(openapi_spec), "--output", - str(output_dir), + str(output_file), "--output-model-type", "pydantic_v2.BaseModel", "--input-file-type", @@ -55,8 +56,6 @@ def main() -> int: "--field-constraints", "--use-field-description", "--capitalise-enum-members", - "--enum-field-as-literal", - "all", "--use-default-kwarg", "--collapse-root-models", "--use-union-operator", diff --git a/src/late/__init__.py b/src/late/__init__.py index 88077f8..f9fa4cf 100644 --- a/src/late/__init__.py +++ b/src/late/__init__.py @@ -16,11 +16,42 @@ LateValidationError, ) from .client.late_client import Late +from .enums import ( + CaptionTone, + DayOfWeek, + FacebookContentType, + GoogleBusinessCTAType, + InstagramContentType, + MediaType, + Platform, + PostStatus, + TikTokCommercialContentType, + TikTokMediaType, + TikTokPrivacyLevel, + Visibility, +) -__version__ = "1.0.0" +__version__ = "1.1.0" __all__ = [ + # Client "Late", + # Enums - Core + "Platform", + "PostStatus", + "MediaType", + "Visibility", + # Enums - Platform-specific + "InstagramContentType", + "FacebookContentType", + "TikTokPrivacyLevel", + "TikTokCommercialContentType", + "TikTokMediaType", + "GoogleBusinessCTAType", + # Enums - Tools & Queue + "CaptionTone", + "DayOfWeek", + # Exceptions "LateAPIError", "LateAuthenticationError", "LateConnectionError", diff --git a/src/late/ai/content_generator.py b/src/late/ai/content_generator.py index 75d1aed..19d9578 100644 --- a/src/late/ai/content_generator.py +++ b/src/late/ai/content_generator.py @@ -6,6 +6,8 @@ from typing import TYPE_CHECKING, Any +from late.enums import CaptionTone, Platform + from .protocols import ( AIProvider, GenerateRequest, @@ -25,6 +27,7 @@ class ContentGenerator: Example: >>> from late.ai import ContentGenerator, GenerateRequest + >>> from late import Platform, CaptionTone >>> >>> # Using OpenAI >>> generator = ContentGenerator(provider="openai", api_key="sk-...") @@ -32,8 +35,8 @@ class ContentGenerator: >>> response = generator.generate( ... GenerateRequest( ... prompt="Write a tweet about Python", - ... platform="twitter", - ... tone="professional", + ... platform=Platform.TWITTER, + ... tone=CaptionTone.PROFESSIONAL, ... ) ... ) >>> print(response.text) @@ -109,9 +112,9 @@ async def agenerate_stream( def generate_post( self, topic: str, - platform: str, + platform: Platform | str, *, - tone: str = "professional", + tone: CaptionTone | str = CaptionTone.PROFESSIONAL, language: str = "en", **kwargs: Any, ) -> str: @@ -139,9 +142,9 @@ def generate_post( async def agenerate_post( self, topic: str, - platform: str, + platform: Platform | str, *, - tone: str = "professional", + tone: CaptionTone | str = CaptionTone.PROFESSIONAL, language: str = "en", **kwargs: Any, ) -> str: diff --git a/src/late/ai/protocols.py b/src/late/ai/protocols.py index 40ceb53..3580c2e 100644 --- a/src/late/ai/protocols.py +++ b/src/late/ai/protocols.py @@ -12,6 +12,8 @@ if TYPE_CHECKING: from collections.abc import AsyncIterator + from late.enums import CaptionTone, Platform + @dataclass class GenerateRequest: @@ -21,8 +23,8 @@ class GenerateRequest: system: str | None = None max_tokens: int = 500 temperature: float = 0.7 - platform: str | None = None # e.g., "twitter", "linkedin" - tone: str | None = None # e.g., "professional", "casual" + platform: Platform | str | None = None + tone: CaptionTone | str | None = None language: str = "en" context: dict[str, Any] = field(default_factory=dict) diff --git a/src/late/ai/providers/openai.py b/src/late/ai/providers/openai.py index 2cbdab0..ff7a463 100644 --- a/src/late/ai/providers/openai.py +++ b/src/late/ai/providers/openai.py @@ -7,6 +7,8 @@ import os from typing import TYPE_CHECKING, Any +from late.enums import Platform + from ..protocols import GenerateRequest, GenerateResponse if TYPE_CHECKING: @@ -76,12 +78,12 @@ def _build_system_prompt(self, request: GenerateRequest) -> str: parts = ["You are an expert social media content creator."] if request.platform: - platform_guides = { - "twitter": "Keep it under 280 characters. Be concise and engaging.", - "linkedin": "Be professional and insightful. Use paragraphs.", - "instagram": "Be visual and use emojis. Include hashtag suggestions.", - "tiktok": "Be trendy and use Gen-Z language. Keep it fun.", - "facebook": "Be conversational and engaging.", + platform_guides: dict[Platform | str, str] = { + Platform.TWITTER: "Keep it under 280 characters. Be concise and engaging.", + Platform.LINKEDIN: "Be professional and insightful. Use paragraphs.", + Platform.INSTAGRAM: "Be visual and use emojis. Include hashtag suggestions.", + Platform.TIKTOK: "Be trendy and use Gen-Z language. Keep it fun.", + Platform.FACEBOOK: "Be conversational and engaging.", } guide = platform_guides.get(request.platform, "") parts.append(f"Writing for {request.platform}. {guide}") diff --git a/src/late/client/late_client.py b/src/late/client/late_client.py index 3689d35..7331c8a 100644 --- a/src/late/client/late_client.py +++ b/src/late/client/late_client.py @@ -22,7 +22,7 @@ class Late(BaseClient): Late API client for scheduling social media posts. Example: - >>> from late import Late + >>> from late import Late, Platform >>> >>> # Initialize client >>> client = Late(api_key="your_api_key") @@ -33,7 +33,7 @@ class Late(BaseClient): >>> # Create a post >>> post = client.posts.create( ... content="Hello world!", - ... platforms=[{"platform": "twitter", "accountId": "..."}], + ... platforms=[{"platform": Platform.TWITTER, "accountId": "..."}], ... scheduled_for="2024-12-25T10:00:00Z", ... ) >>> diff --git a/src/late/enums.py b/src/late/enums.py new file mode 100644 index 0000000..f9ea848 --- /dev/null +++ b/src/late/enums.py @@ -0,0 +1,309 @@ +""" +Enums for Late SDK. + +These enums provide type-safe constants for common values used throughout the SDK. +They inherit from both `str` and `Enum`, so they serialize automatically to their +string values when used in API requests. + +Example: + >>> from late import Platform, PostStatus + >>> Platform.TWITTER + + >>> Platform.TWITTER == "twitter" + True + >>> str(Platform.TWITTER) + 'twitter' +""" + +from enum import Enum + + +class Platform(str, Enum): + """ + Supported social media platforms. + + Example: + >>> from late import Late, Platform + >>> client = Late(api_key="...") + >>> post = client.posts.create( + ... content="Hello!", + ... platforms=[{"platform": Platform.TWITTER, "accountId": "acc_123"}], + ... ) + """ + + TWITTER = "twitter" + """Twitter/X""" + + INSTAGRAM = "instagram" + """Instagram (feed posts, stories, reels)""" + + FACEBOOK = "facebook" + """Facebook Pages""" + + LINKEDIN = "linkedin" + """LinkedIn (personal profiles and company pages)""" + + TIKTOK = "tiktok" + """TikTok""" + + YOUTUBE = "youtube" + """YouTube (videos and shorts)""" + + PINTEREST = "pinterest" + """Pinterest""" + + REDDIT = "reddit" + """Reddit""" + + BLUESKY = "bluesky" + """Bluesky""" + + THREADS = "threads" + """Threads (Meta)""" + + GOOGLE_BUSINESS = "googlebusiness" + """Google Business Profile""" + + +class PostStatus(str, Enum): + """ + Post publication statuses. + + Example: + >>> from late import Late, PostStatus + >>> client = Late(api_key="...") + >>> scheduled = client.posts.list(status=PostStatus.SCHEDULED) + >>> failed = client.posts.list(status=PostStatus.FAILED) + """ + + DRAFT = "draft" + """Saved but not scheduled for publishing""" + + SCHEDULED = "scheduled" + """Scheduled for future publishing""" + + PUBLISHING = "publishing" + """Currently being published to platforms""" + + PUBLISHED = "published" + """Successfully published to all platforms""" + + FAILED = "failed" + """Publishing failed on all platforms""" + + PARTIAL = "partial" + """Publishing succeeded on some platforms but failed on others""" + + +class MediaType(str, Enum): + """ + Media item types. + + Example: + >>> from late import MediaType + >>> media_item = { + ... "type": MediaType.VIDEO, + ... "url": "https://example.com/video.mp4", + ... } + """ + + IMAGE = "image" + """Static image (JPEG, PNG, WebP, etc.)""" + + VIDEO = "video" + """Video file (MP4, MOV, etc.)""" + + GIF = "gif" + """Animated GIF""" + + DOCUMENT = "document" + """Document file (PDF for LinkedIn)""" + + +class Visibility(str, Enum): + """ + Content visibility settings. + + Used primarily for YouTube videos and other platforms that support + visibility controls. + + Example: + >>> from late import Visibility + >>> youtube_settings = { + ... "visibility": Visibility.UNLISTED, + ... } + """ + + PUBLIC = "public" + """Anyone can view the content""" + + PRIVATE = "private" + """Only you and explicitly shared users can view""" + + UNLISTED = "unlisted" + """Only people with the direct link can view""" + + +class InstagramContentType(str, Enum): + """ + Instagram-specific content types. + + Example: + >>> from late import InstagramContentType + >>> instagram_settings = { + ... "contentType": InstagramContentType.STORY, + ... } + """ + + STORY = "story" + """Ephemeral story (disappears after 24 hours)""" + + +class FacebookContentType(str, Enum): + """ + Facebook-specific content types. + + Example: + >>> from late import FacebookContentType + >>> facebook_settings = { + ... "contentType": FacebookContentType.STORY, + ... } + """ + + STORY = "story" + """Facebook Page Story (ephemeral, 24 hours)""" + + +class TikTokPrivacyLevel(str, Enum): + """ + TikTok privacy levels. + + Note: Available options depend on the creator's account settings. + Use the accounts API to get available options for each account. + """ + + PUBLIC_TO_EVERYONE = "PUBLIC_TO_EVERYONE" + """Visible to everyone""" + + MUTUAL_FOLLOW_FRIENDS = "MUTUAL_FOLLOW_FRIENDS" + """Visible only to mutual followers""" + + FOLLOWER_OF_CREATOR = "FOLLOWER_OF_CREATOR" + """Visible only to followers""" + + SELF_ONLY = "SELF_ONLY" + """Visible only to the creator""" + + +class TikTokCommercialContentType(str, Enum): + """ + TikTok commercial content disclosure types. + + Required for brand partnerships and sponsored content. + """ + + NONE = "none" + """Not commercial content""" + + BRAND_ORGANIC = "brand_organic" + """Organic brand content""" + + BRAND_CONTENT = "brand_content" + """Paid partnership / sponsored content""" + + +class TikTokMediaType(str, Enum): + """ + TikTok media types. + + Usually auto-detected from media items, but can be overridden. + """ + + VIDEO = "video" + """Video post""" + + PHOTO = "photo" + """Photo carousel (up to 35 images)""" + + +class GoogleBusinessCTAType(str, Enum): + """ + Google Business Profile call-to-action button types. + + Example: + >>> from late import GoogleBusinessCTAType + >>> cta = { + ... "type": GoogleBusinessCTAType.BOOK, + ... "url": "https://example.com/book", + ... } + """ + + LEARN_MORE = "LEARN_MORE" + """Link to more information""" + + BOOK = "BOOK" + """Booking/reservation link""" + + ORDER = "ORDER" + """Online ordering link""" + + SHOP = "SHOP" + """E-commerce/shopping link""" + + SIGN_UP = "SIGN_UP" + """Registration/signup link""" + + CALL = "CALL" + """Phone call action""" + + +class DayOfWeek(int, Enum): + """ + Days of the week for queue scheduling. + + Values follow the JavaScript/API convention where Sunday = 0. + + Example: + >>> from late import DayOfWeek + >>> slots = [ + ... {"dayOfWeek": DayOfWeek.MONDAY, "time": "09:00"}, + ... {"dayOfWeek": DayOfWeek.WEDNESDAY, "time": "14:00"}, + ... ] + """ + + SUNDAY = 0 + MONDAY = 1 + TUESDAY = 2 + WEDNESDAY = 3 + THURSDAY = 4 + FRIDAY = 5 + SATURDAY = 6 + + +class CaptionTone(str, Enum): + """ + Tones for AI-generated captions. + + Example: + >>> from late import Late, CaptionTone + >>> client = Late(api_key="...") + >>> result = client.tools.generate_caption( + ... image_url="https://example.com/image.jpg", + ... tone=CaptionTone.PROFESSIONAL, + ... ) + """ + + PROFESSIONAL = "professional" + """Formal, business-appropriate tone""" + + CASUAL = "casual" + """Relaxed, conversational tone""" + + HUMOROUS = "humorous" + """Fun, witty, or playful tone""" + + INSPIRATIONAL = "inspirational" + """Motivational, uplifting tone""" + + INFORMATIVE = "informative" + """Educational, fact-focused tone""" diff --git a/src/late/mcp/server.py b/src/late/mcp/server.py index 43be8f3..0a28347 100644 --- a/src/late/mcp/server.py +++ b/src/late/mcp/server.py @@ -28,7 +28,7 @@ from mcp.server.fastmcp import FastMCP -from late import Late +from late import Late, MediaType, PostStatus # Initialize MCP server mcp = FastMCP("Late", instructions=""" @@ -349,11 +349,11 @@ def posts_create( urls = [u.strip() for u in media_urls.split(",") if u.strip()] media_items = [] for url in urls: - media_type = "image" + media_type: MediaType | str = MediaType.IMAGE if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): - media_type = "video" + media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): - media_type = "gif" + media_type = MediaType.GIF media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items @@ -436,11 +436,11 @@ def posts_cross_post( urls = [u.strip() for u in media_urls.split(",") if u.strip()] media_items = [] for url in urls: - media_type = "image" + media_type: MediaType | str = MediaType.IMAGE if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): - media_type = "video" + media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): - media_type = "gif" + media_type = MediaType.GIF media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items @@ -527,7 +527,7 @@ def posts_retry(post_id: str) -> str: try: post_response = client.posts.get(post_id) post = post_response.get("post", post_response) - if post.get("status") != "failed": + if post.get("status") != PostStatus.FAILED: return f"āš ļø Post {post_id} is not in failed status (current: {post.get('status')})" except Exception as e: return f"āŒ Could not find post {post_id}: {e}" @@ -548,7 +548,7 @@ def posts_list_failed(limit: int = 10) -> str: limit: Maximum number of posts to return (default 10) """ client = _get_client() - response = client.posts.list(status="failed", limit=limit) + response = client.posts.list(status=PostStatus.FAILED, limit=limit) posts = response.get("posts", []) if not posts: @@ -573,7 +573,7 @@ def posts_retry_all_failed() -> str: Retry all failed posts. """ client = _get_client() - response = client.posts.list(status="failed", limit=50) + response = client.posts.list(status=PostStatus.FAILED, limit=50) posts = response.get("posts", []) if not posts: diff --git a/src/late/models/__init__.py b/src/late/models/__init__.py index bb55c03..9a2693c 100644 --- a/src/late/models/__init__.py +++ b/src/late/models/__init__.py @@ -10,27 +10,75 @@ # Import specific commonly used models for convenience from ._generated.models import ( - ErrorResponse, - FacebookPlatformData, - InstagramPlatformData, - LinkedInPlatformData, - MediaItem, - # Responses - Pagination, - PinterestPlatformData, - PlatformTarget, # Core models Post, + MediaItem, + PlatformTarget, Profile, SocialAccount, + QueueSlot, + QueueSchedule, # Enums Status, + Type, + Visibility, # Platform-specific TikTokSettings, TwitterPlatformData, - Type, - Visibility, + InstagramPlatformData, + FacebookPlatformData, + LinkedInPlatformData, YouTubePlatformData, + PinterestPlatformData, + # Base responses + Pagination, + ErrorResponse, + # Posts responses + PostsListResponse, + PostGetResponse, + PostCreateResponse, + PostUpdateResponse, + PostDeleteResponse, + PostRetryResponse, + # Profiles responses + ProfilesListResponse, + ProfileGetResponse, + ProfileCreateResponse, + ProfileUpdateResponse, + ProfileDeleteResponse, + # Accounts responses + AccountsListResponse, + AccountGetResponse, + FollowerStatsResponse, + AccountWithFollowerStats, + # Media responses + MediaUploadResponse, + UploadedFile, + UploadTokenResponse, + UploadTokenStatusResponse, + # Queue responses + QueueSlotsResponse, + QueueUpdateResponse, + QueueDeleteResponse, + QueuePreviewResponse, + QueueNextSlotResponse, + # Tools responses + DownloadResponse, + DownloadFormat, + TranscriptResponse, + TranscriptSegment, + HashtagCheckResponse, + HashtagInfo, + CaptionResponse, + # Users responses + User, + UsersListResponse, + UserGetResponse, +) + +# SDK-specific models (not from OpenAPI) +from .responses import ( + MediaLargeUploadResponse, ) __all__ = [ @@ -40,6 +88,8 @@ "PlatformTarget", "Profile", "SocialAccount", + "QueueSlot", + "QueueSchedule", # Enums "Status", "Type", @@ -52,7 +102,49 @@ "LinkedInPlatformData", "YouTubePlatformData", "PinterestPlatformData", - # Responses + # Base responses "Pagination", "ErrorResponse", + # Posts responses + "PostsListResponse", + "PostGetResponse", + "PostCreateResponse", + "PostUpdateResponse", + "PostDeleteResponse", + "PostRetryResponse", + # Profiles responses + "ProfilesListResponse", + "ProfileGetResponse", + "ProfileCreateResponse", + "ProfileUpdateResponse", + "ProfileDeleteResponse", + # Accounts responses + "AccountsListResponse", + "AccountGetResponse", + "FollowerStatsResponse", + "AccountWithFollowerStats", + # Media responses + "MediaUploadResponse", + "MediaLargeUploadResponse", + "UploadedFile", + "UploadTokenResponse", + "UploadTokenStatusResponse", + # Queue responses + "QueueSlotsResponse", + "QueueUpdateResponse", + "QueueDeleteResponse", + "QueuePreviewResponse", + "QueueNextSlotResponse", + # Tools responses + "DownloadResponse", + "DownloadFormat", + "TranscriptResponse", + "TranscriptSegment", + "HashtagCheckResponse", + "HashtagInfo", + "CaptionResponse", + # Users responses + "User", + "UsersListResponse", + "UserGetResponse", ] diff --git a/src/late/models/_generated/__init__.py b/src/late/models/_generated/__init__.py new file mode 100644 index 0000000..aca8b21 --- /dev/null +++ b/src/late/models/_generated/__init__.py @@ -0,0 +1,11 @@ +""" +Auto-generated Pydantic models from Late API OpenAPI specification. + +DO NOT EDIT THIS FILE MANUALLY. +Run `python scripts/generate_models.py` to regenerate. +""" + +from __future__ import annotations + +# Re-export all generated models +from .models import * # noqa: F401, F403 diff --git a/src/late/models/_generated/models.py b/src/late/models/_generated/models.py index be2583f..cfc12d2 100644 --- a/src/late/models/_generated/models.py +++ b/src/late/models/_generated/models.py @@ -1,18 +1,18 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-12-11T13:56:43+00:00 +# timestamp: 2025-12-15T13:54:40+00:00 from __future__ import annotations from enum import Enum -from typing import Annotated, Any +from typing import Annotated, Any, Dict, List from pydantic import AnyUrl, AwareDatetime, BaseModel, Field class ErrorResponse(BaseModel): error: str | None = None - details: dict[str, Any] | None = None + details: Dict[str, Any] | None = None class Type(Enum): @@ -27,6 +27,11 @@ class MediaItem(BaseModel): Media referenced in posts. URLs must be publicly reachable over HTTPS by the destination platforms. When using third‑party storage, ensure signed links remain valid until upload completes. + **Uploading Media:** + - Small files (≤ ~4MB): Use `POST /v1/media` with `multipart/form-data` + - Large files (> ~4MB, up to 5GB): Use `POST /v1/media` with `Content-Type: application/json` for client-upload flow (presigned URL) + - See `/v1/media` endpoint documentation for details on both methods + **Automatic Compression:** - Bluesky: images larger than ~1MB are automatically recompressed to meet the platform's blob size limit. - Instagram: images >8 MB and videos >100 MB (stories) or >300 MB (reels) are automatically compressed. @@ -75,18 +80,28 @@ class Visibility(Enum): class ThreadItem(BaseModel): content: str | None = None - mediaItems: list[MediaItem] | None = None + mediaItems: List[MediaItem] | None = None class TwitterPlatformData(BaseModel): - threadItems: list[ThreadItem] | None = None + threadItems: List[ThreadItem] | None = None """ Sequence of tweets in a thread. First item is the root tweet. """ class ThreadsPlatformData(BaseModel): - threadItems: list[ThreadItem] | None = None + """ + Constraints: + - Carousel posts support up to 10 images (no videos in carousels). + - Single posts support one image or one video. + - Videos must be H.264/AAC MP4 format, max 5 minutes duration. + - Images must be JPEG or PNG, max 8 MB each. + - threadItems creates a reply chain (Threads equivalent of Twitter threads). + + """ + + threadItems: List[ThreadItem] | None = None """ Sequence of posts in a Threads thread (root then replies in order). """ @@ -171,7 +186,11 @@ class InstagramPlatformData(BaseModel): """ Set to 'story' to publish as a Story. Default posts become Reels or feed depending on media. """ - collaborators: list[str] | None = None + shareToFeed: bool = True + """ + For Reels only. When true (default), the Reel appears on both the Reels tab and your main profile feed. Set to false to post to the Reels tab only. + """ + collaborators: List[str] | None = None """ Up to 3 Instagram usernames to invite as collaborators (feed/Reels only) """ @@ -179,7 +198,7 @@ class InstagramPlatformData(BaseModel): """ Optional first comment to add after the post is created (not applied to Stories) """ - userTags: list[UserTag] | None = None + userTags: List[UserTag] | None = None """ Tag Instagram users in photos by username and position coordinates. Only works for single image posts and the first image of carousel posts. Not supported for stories or videos. """ @@ -434,7 +453,7 @@ class QueueSchedule(BaseModel): """ IANA timezone (e.g., America/New_York) """ - slots: list[QueueSlot] | None = None + slots: List[QueueSlot] | None = None active: bool | None = None """ Whether the queue is active @@ -502,7 +521,6 @@ class ApiKey(BaseModel): id: str | None = None name: str | None = None keyPreview: str | None = None - permissions: list[str] | None = None expiresAt: AwareDatetime | None = None createdAt: AwareDatetime | None = None key: str | None = None @@ -597,7 +615,7 @@ class VideoClipJobCompleted(BaseModel): job_id: Annotated[str | None, Field(examples=["abc123def456"])] = None status: Annotated[Status3 | None, Field(examples=["completed"])] = None total_clips: Annotated[int | None, Field(examples=[5])] = None - clips: list[VideoClip] | None = None + clips: List[VideoClip] | None = None class Status4(Enum): @@ -690,7 +708,7 @@ class AnalyticsSinglePostResponse(BaseModel): scheduledFor: AwareDatetime | None = None publishedAt: AwareDatetime | None = None analytics: PostAnalytics | None = None - platformAnalytics: list[PlatformAnalytics] | None = None + platformAnalytics: List[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None @@ -710,20 +728,20 @@ class Post1(BaseModel): publishedAt: AwareDatetime | None = None status: str | None = None analytics: PostAnalytics | None = None - platforms: list[PlatformAnalytics] | None = None + platforms: List[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None thumbnailUrl: AnyUrl | None = None mediaType: MediaType1 | None = None - mediaItems: list[MediaItem] | None = None + mediaItems: List[MediaItem] | None = None class AnalyticsListResponse(BaseModel): overview: AnalyticsOverview | None = None - posts: list[Post1] | None = None + posts: List[Post1] | None = None pagination: Pagination | None = None - accounts: list[SocialAccount] | None = None + accounts: List[SocialAccount] | None = None """ Connected social accounts (followerCount and followersLastUpdated only included if user has analytics add-on) """ @@ -733,6 +751,226 @@ class AnalyticsListResponse(BaseModel): """ +class PostDeleteResponse(BaseModel): + message: str | None = None + + +class ProfilesListResponse(BaseModel): + profiles: List[Profile] | None = None + + +class ProfileGetResponse(BaseModel): + profile: Profile | None = None + + +class ProfileCreateResponse(BaseModel): + message: str | None = None + profile: Profile | None = None + + +class ProfileUpdateResponse(BaseModel): + message: str | None = None + profile: Profile | None = None + + +class ProfileDeleteResponse(BaseModel): + message: str | None = None + + +class AccountsListResponse(BaseModel): + accounts: List[SocialAccount] | None = None + hasAnalyticsAccess: bool | None = None + """ + Whether user has analytics add-on access + """ + + +class AccountGetResponse(BaseModel): + account: SocialAccount | None = None + + +class DateRange(BaseModel): + from_: Annotated[AwareDatetime | None, Field(alias="from")] = None + to: AwareDatetime | None = None + + +class Aggregation(Enum): + DAILY = "daily" + WEEKLY = "weekly" + MONTHLY = "monthly" + + +class FollowerStatsResponse(BaseModel): + accounts: List[AccountWithFollowerStats] | None = None + dateRange: DateRange | None = None + aggregation: Aggregation | None = None + + +class Type2(Enum): + IMAGE = "image" + VIDEO = "video" + DOCUMENT = "document" + + +class UploadedFile(BaseModel): + type: Type2 | None = None + url: AnyUrl | None = None + filename: str | None = None + size: int | None = None + mimeType: str | None = None + + +class MediaUploadResponse(BaseModel): + files: List[UploadedFile] | None = None + + +class Status5(Enum): + PENDING = "pending" + COMPLETED = "completed" + EXPIRED = "expired" + + +class UploadTokenResponse(BaseModel): + token: str | None = None + uploadUrl: AnyUrl | None = None + expiresAt: AwareDatetime | None = None + status: Status5 | None = None + + +class UploadTokenStatusResponse(BaseModel): + token: str | None = None + status: Status5 | None = None + files: List[UploadedFile] | None = None + createdAt: AwareDatetime | None = None + expiresAt: AwareDatetime | None = None + completedAt: AwareDatetime | None = None + + +class QueueSlotsResponse(BaseModel): + exists: bool | None = None + schedule: QueueSchedule | None = None + nextSlots: List[AwareDatetime] | None = None + + +class QueueUpdateResponse(BaseModel): + success: bool | None = None + schedule: QueueSchedule | None = None + nextSlots: List[AwareDatetime] | None = None + reshuffledCount: int | None = None + + +class QueueDeleteResponse(BaseModel): + success: bool | None = None + deleted: bool | None = None + + +class QueuePreviewResponse(BaseModel): + profileId: str | None = None + count: int | None = None + slots: List[AwareDatetime] | None = None + + +class QueueNextSlotResponse(BaseModel): + profileId: str | None = None + nextSlot: AwareDatetime | None = None + timezone: str | None = None + + +class DownloadFormat(BaseModel): + formatId: str | None = None + ext: str | None = None + resolution: str | None = None + filesize: int | None = None + quality: str | None = None + + +class DownloadResponse(BaseModel): + url: AnyUrl | None = None + title: str | None = None + thumbnail: AnyUrl | None = None + duration: int | None = None + formats: List[DownloadFormat] | None = None + + +class TranscriptSegment(BaseModel): + text: str | None = None + start: float | None = None + duration: float | None = None + + +class TranscriptResponse(BaseModel): + transcript: str | None = None + segments: List[TranscriptSegment] | None = None + language: str | None = None + + +class Status7(Enum): + SAFE = "safe" + BANNED = "banned" + RESTRICTED = "restricted" + UNKNOWN = "unknown" + + +class HashtagInfo(BaseModel): + hashtag: str | None = None + status: Status7 | None = None + postCount: int | None = None + + +class HashtagCheckResponse(BaseModel): + hashtags: List[HashtagInfo] | None = None + + +class CaptionResponse(BaseModel): + caption: str | None = None + + +class User(BaseModel): + field_id: Annotated[str | None, Field(alias="_id")] = None + email: str | None = None + name: str | None = None + role: str | None = None + createdAt: AwareDatetime | None = None + + +class UsersListResponse(BaseModel): + users: List[User] | None = None + + +class UserGetResponse(BaseModel): + user: User | None = None + + +class TikTokPlatformData(BaseModel): + """ + TikTok platform-specific settings. Contains tiktokSettings for video/photo posting options. + + """ + + tiktokSettings: TikTokSettings | None = None + + +class VideoClipJob(BaseModel): + field_id: Annotated[ + str | None, Field(alias="_id", examples=["507f1f77bcf86cd799439011"]) + ] = None + jobId: Annotated[str | None, Field(examples=["abc123def456"])] = None + videoUrl: Annotated[ + AnyUrl | None, Field(examples=["https://storage.example.com/video.mp4"]) + ] = None + videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None + status: Annotated[Status1 | None, Field(examples=["completed"])] = None + clips: List[VideoClip] | None = None + totalClips: Annotated[int | None, Field(examples=[5])] = None + error: Annotated[str | None, Field(examples=[None])] = None + createdAt: Annotated[ + AwareDatetime | None, Field(examples=["2025-10-22T10:30:00Z"]) + ] = None + completedAt: Annotated[ + AwareDatetime | None, Field(examples=["2025-10-22T10:45:00Z"]) + ] = None + + class PlatformTarget(BaseModel): platform: Annotated[str | None, Field(examples=["twitter"])] = None """ @@ -740,7 +978,7 @@ class PlatformTarget(BaseModel): """ accountId: str | None = None customContent: str | None = None - customMedia: list[MediaItem] | None = None + customMedia: List[MediaItem] | None = None scheduledFor: AwareDatetime | None = None """ Optional per-platform scheduled time override (uses post.scheduledFor when omitted) @@ -754,12 +992,37 @@ class PlatformTarget(BaseModel): | PinterestPlatformData | YouTubePlatformData | GoogleBusinessPlatformData + | TikTokPlatformData | None ) = None """ Platform-specific overrides and options. """ status: Annotated[str | None, Field(examples=["pending"])] = None + """ + Platform-specific status: pending, publishing, published, failed + """ + platformPostId: Annotated[str | None, Field(examples=["1234567890123456789"])] = ( + None + ) + """ + The native post ID on the platform (populated after successful publish) + """ + platformPostUrl: Annotated[ + AnyUrl | None, + Field(examples=["https://twitter.com/acmecorp/status/1234567890123456789"]), + ] = None + """ + Public URL of the published post on the platform. + Populated after successful publish. For immediate posts (publishNow=true), + this is included in the response. For scheduled posts, fetch the post + via GET /v1/posts/{postId} after the scheduled time. + + """ + publishedAt: AwareDatetime | None = None + """ + Timestamp when the post was published to this platform + """ class Post(BaseModel): @@ -771,12 +1034,12 @@ class Post(BaseModel): """ content: str | None = None - mediaItems: list[MediaItem] | None = None - platforms: list[PlatformTarget] | None = None + mediaItems: List[MediaItem] | None = None + platforms: List[PlatformTarget] | None = None scheduledFor: AwareDatetime | None = None timezone: str | None = None status: Status | None = None - tags: list[str] | None = None + tags: List[str] | None = None """ YouTube tag constraints when targeting YouTube: - No count cap; duplicates removed. @@ -784,11 +1047,10 @@ class Post(BaseModel): - Combined characters across all tags ≤ 500. """ - hashtags: list[str] | None = None - mentions: list[str] | None = None + hashtags: List[str] | None = None + mentions: List[str] | None = None visibility: Visibility | None = None - metadata: dict[str, Any] | None = None - tiktokSettings: TikTokSettings | None = None + metadata: Dict[str, Any] | None = None queuedFromProfile: str | None = None """ Profile ID if the post was scheduled via the queue @@ -797,22 +1059,25 @@ class Post(BaseModel): updatedAt: AwareDatetime | None = None -class VideoClipJob(BaseModel): - field_id: Annotated[ - str | None, Field(alias="_id", examples=["507f1f77bcf86cd799439011"]) - ] = None - jobId: Annotated[str | None, Field(examples=["abc123def456"])] = None - videoUrl: Annotated[ - AnyUrl | None, Field(examples=["https://storage.example.com/video.mp4"]) - ] = None - videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None - status: Annotated[Status1 | None, Field(examples=["completed"])] = None - clips: list[VideoClip] | None = None - totalClips: Annotated[int | None, Field(examples=[5])] = None - error: Annotated[str | None, Field(examples=[None])] = None - createdAt: Annotated[ - AwareDatetime | None, Field(examples=["2025-10-22T10:30:00Z"]) - ] = None - completedAt: Annotated[ - AwareDatetime | None, Field(examples=["2025-10-22T10:45:00Z"]) - ] = None +class PostsListResponse(BaseModel): + posts: List[Post] | None = None + pagination: Pagination | None = None + + +class PostGetResponse(BaseModel): + post: Post | None = None + + +class PostCreateResponse(BaseModel): + message: str | None = None + post: Post | None = None + + +class PostUpdateResponse(BaseModel): + message: str | None = None + post: Post | None = None + + +class PostRetryResponse(BaseModel): + message: str | None = None + post: Post | None = None diff --git a/src/late/models/responses.py b/src/late/models/responses.py new file mode 100644 index 0000000..f1ef4e2 --- /dev/null +++ b/src/late/models/responses.py @@ -0,0 +1,25 @@ +""" +SDK-specific response models. + +These models are NOT generated from OpenAPI and are specific to the SDK implementation. +For API response models, see the generated models in _generated/models.py. +""" + +from __future__ import annotations + +from pydantic import BaseModel + + +class MediaLargeUploadResponse(BaseModel): + """ + Response from media.upload_large() - Vercel Blob upload. + + This is SDK-specific and not from the API, as large file uploads + go directly to Vercel Blob storage. + """ + + url: str + pathname: str + contentType: str + size: int + downloadUrl: str diff --git a/src/late/pipelines/cross_poster.py b/src/late/pipelines/cross_poster.py index 355c29d..20861f9 100644 --- a/src/late/pipelines/cross_poster.py +++ b/src/late/pipelines/cross_poster.py @@ -8,6 +8,8 @@ from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any +from late.enums import Platform + if TYPE_CHECKING: from ..client.late_client import Late @@ -16,7 +18,7 @@ class PlatformConfig: """Configuration for a single platform.""" - platform: str + platform: Platform | str account_id: str custom_content: str | None = None delay_minutes: int = 0 @@ -26,7 +28,7 @@ class PlatformConfig: class CrossPostResult: """Result of cross-posting to a single platform.""" - platform: str + platform: Platform | str success: bool post_id: str | None = None error: str | None = None @@ -42,27 +44,28 @@ class CrossPosterPipeline: - Platform-specific customizations Example: + >>> from late.enums import Platform >>> client = Late(api_key="...") >>> cross_poster = CrossPosterPipeline(client) >>> >>> results = await cross_poster.post( ... content="Exciting news! Our new feature is live.", ... platforms=[ - ... PlatformConfig("twitter", "tw_123"), - ... PlatformConfig("linkedin", "li_456"), - ... PlatformConfig("instagram", "ig_789"), + ... PlatformConfig(Platform.TWITTER, "tw_123"), + ... PlatformConfig(Platform.LINKEDIN, "li_456"), + ... PlatformConfig(Platform.INSTAGRAM, "ig_789"), ... ], ... ) """ - CHAR_LIMITS = { - "twitter": 280, - "threads": 500, - "linkedin": 3000, - "instagram": 2200, - "facebook": 63206, - "tiktok": 2200, - "bluesky": 300, + CHAR_LIMITS: dict[Platform | str, int] = { + Platform.TWITTER: 280, + Platform.THREADS: 500, + Platform.LINKEDIN: 3000, + Platform.INSTAGRAM: 2200, + Platform.FACEBOOK: 63206, + Platform.TIKTOK: 2200, + Platform.BLUESKY: 300, } def __init__( diff --git a/src/late/pipelines/csv_scheduler.py b/src/late/pipelines/csv_scheduler.py index 1201591..6434ba5 100644 --- a/src/late/pipelines/csv_scheduler.py +++ b/src/late/pipelines/csv_scheduler.py @@ -10,6 +10,8 @@ from pathlib import Path from typing import TYPE_CHECKING, Any +from late.enums import MediaType + if TYPE_CHECKING: from collections.abc import Iterator @@ -95,7 +97,7 @@ def _build_payload(self, row: dict[str, str]) -> dict[str, Any]: if row.get("media_url"): payload["mediaItems"] = [ - {"type": row.get("media_type", "image"), "url": row["media_url"]} + {"type": row.get("media_type", MediaType.IMAGE), "url": row["media_url"]} ] if row.get("tags"): diff --git a/src/late/resources/accounts.py b/src/late/resources/accounts.py index 6441092..21ed908 100644 --- a/src/late/resources/accounts.py +++ b/src/late/resources/accounts.py @@ -4,12 +4,16 @@ from __future__ import annotations -from typing import Any +from late.models import ( + AccountGetResponse, + AccountsListResponse, + FollowerStatsResponse, +) from .base import BaseResource -class AccountsResource(BaseResource[Any]): +class AccountsResource(BaseResource[AccountsListResponse]): """ Resource for managing connected social media accounts. @@ -32,7 +36,7 @@ class AccountsResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self, *, profile_id: str | None = None) -> dict[str, Any]: + def list(self, *, profile_id: str | None = None) -> AccountsListResponse: """ List connected accounts. @@ -40,12 +44,13 @@ def list(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter accounts Returns: - Dict with 'accounts' and 'hasAnalyticsAccess' keys + AccountsListResponse with 'accounts' and 'hasAnalyticsAccess' attributes """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._BASE_PATH, params=params or None) + data = self._client._get(self._BASE_PATH, params=params or None) + return AccountsListResponse.model_validate(data) - def get(self, account_id: str) -> dict[str, Any]: + def get(self, account_id: str) -> AccountGetResponse: """ Get an account by ID. @@ -53,15 +58,16 @@ def get(self, account_id: str) -> dict[str, Any]: account_id: The account ID Returns: - Dict with 'account' key containing the SocialAccount object + AccountGetResponse with 'account' attribute """ - return self._client._get(self._path(account_id)) + data = self._client._get(self._path(account_id)) + return AccountGetResponse.model_validate(data) def get_follower_stats( self, *, account_ids: list[str] | None = None, - ) -> dict[str, Any]: + ) -> FollowerStatsResponse: """ Get follower statistics for accounts. @@ -71,33 +77,37 @@ def get_follower_stats( account_ids: Optional list of account IDs to filter Returns: - Dict with follower statistics + FollowerStatsResponse with 'stats' attribute """ params = None if account_ids: params = {"accountIds": ",".join(account_ids)} - return self._client._get(self._path("follower-stats"), params=params) + data = self._client._get(self._path("follower-stats"), params=params) + return FollowerStatsResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def alist(self, *, profile_id: str | None = None) -> AccountsListResponse: """List connected accounts asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._BASE_PATH, params=params or None) + data = await self._client._aget(self._BASE_PATH, params=params or None) + return AccountsListResponse.model_validate(data) - async def aget(self, account_id: str) -> dict[str, Any]: + async def aget(self, account_id: str) -> AccountGetResponse: """Get an account by ID asynchronously.""" - return await self._client._aget(self._path(account_id)) + data = await self._client._aget(self._path(account_id)) + return AccountGetResponse.model_validate(data) async def aget_follower_stats( self, *, account_ids: list[str] | None = None, - ) -> dict[str, Any]: + ) -> FollowerStatsResponse: """Get follower statistics asynchronously.""" params = None if account_ids: params = {"accountIds": ",".join(account_ids)} - return await self._client._aget(self._path("follower-stats"), params=params) + data = await self._client._aget(self._path("follower-stats"), params=params) + return FollowerStatsResponse.model_validate(data) diff --git a/src/late/resources/media.py b/src/late/resources/media.py index a362f6e..5d48d79 100644 --- a/src/late/resources/media.py +++ b/src/late/resources/media.py @@ -1,33 +1,49 @@ """ Media resource for uploading images and videos. + +Supports two upload methods: +- Direct upload: For small files (< 4MB) via API multipart +- Vercel Blob: For large files (up to 5GB) - requires Vercel token """ from __future__ import annotations import mimetypes from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING, Any, Callable + +from late.models import ( + MediaLargeUploadResponse, + MediaUploadResponse, + UploadTokenResponse, + UploadTokenStatusResponse, +) from .base import BaseResource +if TYPE_CHECKING: + from late.upload import UploadProgress + -class MediaResource(BaseResource[Any]): +# Size limit for direct upload (4MB) +DIRECT_UPLOAD_MAX_SIZE = 4 * 1024 * 1024 + + +class MediaResource(BaseResource[MediaUploadResponse]): """ Resource for uploading media files. - Supports uploading images, videos, and PDFs up to 5GB total. + Supports uploading images, videos, and PDFs. - Example: - >>> client = Late(api_key="...") - >>> # Upload single file + For small files (< 4MB): >>> result = client.media.upload("photo.jpg") >>> print(result["files"][0]["url"]) - >>> - >>> # Upload multiple files - >>> result = client.media.upload_multiple(["photo1.jpg", "video.mp4"]) - >>> - >>> # Upload from bytes - >>> result = client.media.upload_bytes(image_bytes, "image.png") + + For large files (4MB - 5GB), use upload_large with Vercel token: + >>> result = client.media.upload_large( + ... "large_video.mp4", + ... vercel_token="vercel_blob_rw_xxx" + ... ) """ _BASE_PATH = "/v1/media" @@ -37,37 +53,56 @@ def _get_mime_type(self, file_path: Path) -> str: mime_type, _ = mimetypes.guess_type(str(file_path)) return mime_type or "application/octet-stream" + def _check_file_size(self, file_path: Path) -> int: + """Get file size and validate for direct upload.""" + size = file_path.stat().st_size + if size > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + raise LargeFileError(size, DIRECT_UPLOAD_MAX_SIZE) + return size + # ------------------------------------------------------------------------- - # Sync methods + # Direct upload (small files < 4MB) # ------------------------------------------------------------------------- - def upload(self, file_path: str | Path) -> dict[str, Any]: + def upload(self, file_path: str | Path) -> MediaUploadResponse: """ - Upload a single media file. + Upload a single media file (direct upload, max 4MB). + + For files larger than 4MB, use upload_large() with a Vercel token. Args: file_path: Path to the file to upload Returns: - Dict with 'files' array containing uploaded file info + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If file exceeds 4MB (use upload_large instead) """ path = Path(file_path) + self._check_file_size(path) + mime_type = self._get_mime_type(path) with path.open("rb") as f: - return self._client._post( + data = self._client._post( self._BASE_PATH, files={"files": (path.name, f, mime_type)}, ) + return MediaUploadResponse.model_validate(data) - def upload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: + def upload_multiple(self, file_paths: list[str | Path]) -> MediaUploadResponse: """ - Upload multiple media files at once. + Upload multiple media files at once (direct upload, each < 4MB). Args: file_paths: List of file paths to upload Returns: - Dict with 'files' array containing all uploaded files + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If any file exceeds 4MB """ files_list = [] file_handles = [] @@ -75,12 +110,14 @@ def upload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: try: for file_path in file_paths: path = Path(file_path) + self._check_file_size(path) mime_type = self._get_mime_type(path) f = path.open("rb") file_handles.append(f) files_list.append(("files", (path.name, f, mime_type))) - return self._client._post(self._BASE_PATH, files=files_list) + data = self._client._post(self._BASE_PATH, files=files_list) + return MediaUploadResponse.model_validate(data) finally: for f in file_handles: f.close() @@ -91,9 +128,9 @@ def upload_bytes( filename: str, *, mime_type: str | None = None, - ) -> dict[str, Any]: + ) -> MediaUploadResponse: """ - Upload media from bytes. + Upload media from bytes (direct upload, max 4MB). Args: content: File content as bytes @@ -101,22 +138,134 @@ def upload_bytes( mime_type: Optional MIME type (auto-detected if not provided) Returns: - Dict with 'files' array containing uploaded file info + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If content exceeds 4MB """ + if len(content) > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) + if mime_type is None: mime_type, _ = mimetypes.guess_type(filename) mime_type = mime_type or "application/octet-stream" - return self._client._post( + data = self._client._post( self._BASE_PATH, files={"files": (filename, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) + + # ------------------------------------------------------------------------- + # Large file upload (Vercel Blob - up to 5GB) + # ------------------------------------------------------------------------- + + def upload_large( + self, + file_path: str | Path, + *, + vercel_token: str, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """ + Upload a large file using Vercel Blob (up to 5GB). + + Requires a Vercel Blob read-write token. + + Args: + file_path: Path to the file to upload + vercel_token: Vercel Blob token (vercel_blob_rw_xxx) + Get one at: https://vercel.com/docs/storage/vercel-blob + on_progress: Optional callback for progress updates + + Returns: + MediaLargeUploadResponse with 'url', 'pathname', 'contentType', 'size' attributes + + Example: + >>> result = client.media.upload_large( + ... "video.mp4", + ... vercel_token="vercel_blob_rw_xxx", + ... on_progress=lambda p: print(f"{p.percentage:.1f}%") + ... ) + >>> print(result.url) + """ + from late.upload import UploadFile, VercelBlobUploader + + path = Path(file_path) + mime_type = self._get_mime_type(path) + + uploader = VercelBlobUploader(vercel_token) + result = uploader.upload( + UploadFile( + filename=path.name, + content=path, + mime_type=mime_type, + size=path.stat().st_size, + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + def upload_large_bytes( + self, + content: bytes, + filename: str, + *, + vercel_token: str, + mime_type: str | None = None, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """ + Upload large content from bytes using Vercel Blob. + + Args: + content: File content as bytes + filename: Name for the file + vercel_token: Vercel Blob token + mime_type: Optional MIME type + on_progress: Optional progress callback + + Returns: + MediaLargeUploadResponse with 'url', 'pathname', 'contentType', 'size' attributes + """ + from late.upload import UploadFile, VercelBlobUploader + + if mime_type is None: + mime_type, _ = mimetypes.guess_type(filename) + mime_type = mime_type or "application/octet-stream" + + uploader = VercelBlobUploader(vercel_token) + result = uploader.upload( + UploadFile( + filename=filename, + content=content, + mime_type=mime_type, + size=len(content), + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) # ------------------------------------------------------------------------- # Upload Token Flow (for Claude Desktop / MCP) # ------------------------------------------------------------------------- - def generate_upload_token(self) -> dict[str, Any]: + def generate_upload_token(self) -> UploadTokenResponse: """ Generate an upload token for browser-based file uploads. @@ -127,11 +276,12 @@ def generate_upload_token(self) -> dict[str, Any]: 3. Call check_upload_token() to get the uploaded file URLs Returns: - Dict with 'token', 'uploadUrl', 'expiresAt', 'status' + UploadTokenResponse with 'token', 'uploadUrl', 'expiresAt', 'status' attributes """ - return self._client._post(self._path("upload-token")) + data = self._client._post(self._path("upload-token")) + return UploadTokenResponse.model_validate(data) - def check_upload_token(self, token: str) -> dict[str, Any]: + def check_upload_token(self, token: str) -> UploadTokenStatusResponse: """ Check the status of an upload token and get uploaded file URLs. @@ -139,36 +289,42 @@ def check_upload_token(self, token: str) -> dict[str, Any]: token: The upload token from generate_upload_token() Returns: - Dict with 'token', 'status', 'files', 'createdAt', 'expiresAt', 'completedAt' + UploadTokenStatusResponse with 'token', 'status', 'files', 'createdAt', 'expiresAt', 'completedAt' attributes """ - return self._client._get(self._path("upload-token"), params={"token": token}) + data = self._client._get(self._path("upload-token"), params={"token": token}) + return UploadTokenStatusResponse.model_validate(data) # ------------------------------------------------------------------------- - # Async methods + # Async methods - Direct upload # ------------------------------------------------------------------------- - async def aupload(self, file_path: str | Path) -> dict[str, Any]: - """Upload a single media file asynchronously.""" + async def aupload(self, file_path: str | Path) -> MediaUploadResponse: + """Upload a single media file asynchronously (max 4MB).""" path = Path(file_path) + self._check_file_size(path) + mime_type = self._get_mime_type(path) with path.open("rb") as f: content = f.read() - return await self._client._apost( + data = await self._client._apost( self._BASE_PATH, files={"files": (path.name, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) - async def aupload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: - """Upload multiple media files asynchronously.""" + async def aupload_multiple(self, file_paths: list[str | Path]) -> MediaUploadResponse: + """Upload multiple media files asynchronously (each < 4MB).""" files_list = [] for file_path in file_paths: path = Path(file_path) + self._check_file_size(path) mime_type = self._get_mime_type(path) with path.open("rb") as f: content = f.read() files_list.append(("files", (path.name, content, mime_type))) - return await self._client._apost(self._BASE_PATH, files=files_list) + data = await self._client._apost(self._BASE_PATH, files=files_list) + return MediaUploadResponse.model_validate(data) async def aupload_bytes( self, @@ -176,21 +332,103 @@ async def aupload_bytes( filename: str, *, mime_type: str | None = None, - ) -> dict[str, Any]: - """Upload media from bytes asynchronously.""" + ) -> MediaUploadResponse: + """Upload media from bytes asynchronously (max 4MB).""" + if len(content) > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) + if mime_type is None: mime_type, _ = mimetypes.guess_type(filename) mime_type = mime_type or "application/octet-stream" - return await self._client._apost( + data = await self._client._apost( self._BASE_PATH, files={"files": (filename, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) + + # ------------------------------------------------------------------------- + # Async methods - Large file upload + # ------------------------------------------------------------------------- + + async def aupload_large( + self, + file_path: str | Path, + *, + vercel_token: str, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """Upload a large file asynchronously using Vercel Blob.""" + from late.upload import UploadFile, VercelBlobUploader + + path = Path(file_path) + mime_type = self._get_mime_type(path) + + uploader = VercelBlobUploader(vercel_token) + result = await uploader.aupload( + UploadFile( + filename=path.name, + content=path, + mime_type=mime_type, + size=path.stat().st_size, + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + async def aupload_large_bytes( + self, + content: bytes, + filename: str, + *, + vercel_token: str, + mime_type: str | None = None, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """Upload large content from bytes asynchronously.""" + from late.upload import UploadFile, VercelBlobUploader + + if mime_type is None: + mime_type, _ = mimetypes.guess_type(filename) + mime_type = mime_type or "application/octet-stream" + + uploader = VercelBlobUploader(vercel_token) + result = await uploader.aupload( + UploadFile( + filename=filename, + content=content, + mime_type=mime_type, + size=len(content), + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + # ------------------------------------------------------------------------- + # Async methods - Upload Token Flow + # ------------------------------------------------------------------------- - async def agenerate_upload_token(self) -> dict[str, Any]: + async def agenerate_upload_token(self) -> UploadTokenResponse: """Generate an upload token asynchronously.""" - return await self._client._apost(self._path("upload-token")) + data = await self._client._apost(self._path("upload-token")) + return UploadTokenResponse.model_validate(data) - async def acheck_upload_token(self, token: str) -> dict[str, Any]: + async def acheck_upload_token(self, token: str) -> UploadTokenStatusResponse: """Check the status of an upload token asynchronously.""" - return await self._client._aget(self._path("upload-token"), params={"token": token}) + data = await self._client._aget(self._path("upload-token"), params={"token": token}) + return UploadTokenStatusResponse.model_validate(data) diff --git a/src/late/resources/posts.py b/src/late/resources/posts.py index 6fa50aa..b9ad606 100644 --- a/src/late/resources/posts.py +++ b/src/late/resources/posts.py @@ -5,42 +5,38 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any + +from late.models import ( + PostCreateResponse, + PostDeleteResponse, + PostGetResponse, + PostRetryResponse, + PostsListResponse, + PostUpdateResponse, +) from .base import BaseResource if TYPE_CHECKING: from datetime import datetime -# Type aliases for better readability -Platform = Literal[ - "twitter", - "instagram", - "facebook", - "linkedin", - "tiktok", - "youtube", - "pinterest", - "reddit", - "bluesky", - "threads", - "googlebusiness", -] -PostStatus = Literal["draft", "scheduled", "publishing", "published", "failed", "partial"] - - -class PostsResource(BaseResource[Any]): + from late.enums import Platform, PostStatus + + +class PostsResource(BaseResource[PostsListResponse]): """ Resource for managing posts. Example: + >>> from late import Platform, PostStatus >>> client = Late(api_key="...") >>> # List posts - >>> posts = client.posts.list(status="scheduled") + >>> posts = client.posts.list(status=PostStatus.SCHEDULED) >>> # Create a post >>> post = client.posts.create( ... content="Hello!", - ... platforms=[{"platform": "twitter", "accountId": "..."}], + ... platforms=[{"platform": Platform.TWITTER, "accountId": "..."}], ... scheduled_for=datetime.now() + timedelta(hours=1), ... ) >>> # Update a post @@ -67,7 +63,7 @@ def list( date_from: str | None = None, date_to: str | None = None, include_hidden: bool | None = None, - ) -> dict[str, Any]: + ) -> PostsListResponse: """ List posts with optional filters. @@ -83,7 +79,7 @@ def list( include_hidden: Include hidden posts (default: False) Returns: - Dict with 'posts' and 'pagination' keys + PostsListResponse with 'posts' and 'pagination' attributes """ params = self._build_params( page=page, @@ -96,9 +92,10 @@ def list( date_to=date_to, include_hidden=include_hidden, ) - return self._client._get(self._BASE_PATH, params=params) + data = self._client._get(self._BASE_PATH, params=params) + return PostsListResponse.model_validate(data) - def get(self, post_id: str) -> dict[str, Any]: + def get(self, post_id: str) -> PostGetResponse: """ Get a single post by ID. @@ -106,9 +103,10 @@ def get(self, post_id: str) -> dict[str, Any]: post_id: The post ID Returns: - Dict with 'post' key containing the Post object + PostGetResponse with 'post' attribute """ - return self._client._get(self._path(post_id)) + data = self._client._get(self._path(post_id)) + return PostGetResponse.model_validate(data) def create( self, @@ -128,7 +126,7 @@ def create( metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, queued_from_profile: str | None = None, - ) -> dict[str, Any]: + ) -> PostCreateResponse: """ Create a new post. @@ -152,7 +150,7 @@ def create( queued_from_profile: Profile ID if creating via queue Returns: - Dict with 'message' and 'post' keys + PostCreateResponse with 'message' and 'post' attributes """ payload = self._build_payload( content=content, @@ -171,7 +169,8 @@ def create( tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, ) - return self._client._post(self._BASE_PATH, data=payload) + data = self._client._post(self._BASE_PATH, data=payload) + return PostCreateResponse.model_validate(data) def update( self, @@ -188,7 +187,7 @@ def update( mentions: list[str] | None = None, metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, - ) -> dict[str, Any]: + ) -> PostUpdateResponse: """ Update an existing post. @@ -210,7 +209,7 @@ def update( tiktok_settings: New TikTok settings Returns: - Dict with 'message' and 'post' keys + PostUpdateResponse with 'message' and 'post' attributes """ payload = self._build_payload( content=content, @@ -225,9 +224,10 @@ def update( metadata=metadata, tiktok_settings=tiktok_settings, ) - return self._client._put(self._path(post_id), data=payload) + data = self._client._put(self._path(post_id), data=payload) + return PostUpdateResponse.model_validate(data) - def delete(self, post_id: str) -> dict[str, Any]: + def delete(self, post_id: str) -> PostDeleteResponse: """ Delete a post. @@ -239,11 +239,12 @@ def delete(self, post_id: str) -> dict[str, Any]: post_id: ID of the post to delete Returns: - Dict with 'message' key + PostDeleteResponse with 'message' attribute """ - return self._client._delete(self._path(post_id)) + data = self._client._delete(self._path(post_id)) + return PostDeleteResponse.model_validate(data) - def retry(self, post_id: str) -> dict[str, Any]: + def retry(self, post_id: str) -> PostRetryResponse: """ Retry a failed post. @@ -251,9 +252,10 @@ def retry(self, post_id: str) -> dict[str, Any]: post_id: ID of the failed post Returns: - Dict with 'message' and 'post' keys + PostRetryResponse with 'message' attribute """ - return self._client._post(self._path(post_id, "retry")) + data = self._client._post(self._path(post_id, "retry")) + return PostRetryResponse.model_validate(data) def bulk_upload( self, @@ -296,7 +298,7 @@ async def alist( date_from: str | None = None, date_to: str | None = None, include_hidden: bool | None = None, - ) -> dict[str, Any]: + ) -> PostsListResponse: """List posts asynchronously.""" params = self._build_params( page=page, @@ -309,11 +311,13 @@ async def alist( date_to=date_to, include_hidden=include_hidden, ) - return await self._client._aget(self._BASE_PATH, params=params) + data = await self._client._aget(self._BASE_PATH, params=params) + return PostsListResponse.model_validate(data) - async def aget(self, post_id: str) -> dict[str, Any]: + async def aget(self, post_id: str) -> PostGetResponse: """Get a post asynchronously.""" - return await self._client._aget(self._path(post_id)) + data = await self._client._aget(self._path(post_id)) + return PostGetResponse.model_validate(data) async def acreate( self, @@ -333,7 +337,7 @@ async def acreate( metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, queued_from_profile: str | None = None, - ) -> dict[str, Any]: + ) -> PostCreateResponse: """Create a post asynchronously.""" payload = self._build_payload( content=content, @@ -352,7 +356,8 @@ async def acreate( tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, ) - return await self._client._apost(self._BASE_PATH, data=payload) + data = await self._client._apost(self._BASE_PATH, data=payload) + return PostCreateResponse.model_validate(data) async def aupdate( self, @@ -369,7 +374,7 @@ async def aupdate( mentions: list[str] | None = None, metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, - ) -> dict[str, Any]: + ) -> PostUpdateResponse: """Update a post asynchronously.""" payload = self._build_payload( content=content, @@ -384,12 +389,15 @@ async def aupdate( metadata=metadata, tiktok_settings=tiktok_settings, ) - return await self._client._aput(self._path(post_id), data=payload) + data = await self._client._aput(self._path(post_id), data=payload) + return PostUpdateResponse.model_validate(data) - async def adelete(self, post_id: str) -> dict[str, Any]: + async def adelete(self, post_id: str) -> PostDeleteResponse: """Delete a post asynchronously.""" - return await self._client._adelete(self._path(post_id)) + data = await self._client._adelete(self._path(post_id)) + return PostDeleteResponse.model_validate(data) - async def aretry(self, post_id: str) -> dict[str, Any]: + async def aretry(self, post_id: str) -> PostRetryResponse: """Retry a failed post asynchronously.""" - return await self._client._apost(self._path(post_id, "retry")) + data = await self._client._apost(self._path(post_id, "retry")) + return PostRetryResponse.model_validate(data) diff --git a/src/late/resources/profiles.py b/src/late/resources/profiles.py index aae5e7a..c7e04e3 100644 --- a/src/late/resources/profiles.py +++ b/src/late/resources/profiles.py @@ -4,12 +4,18 @@ from __future__ import annotations -from typing import Any +from late.models import ( + ProfileCreateResponse, + ProfileDeleteResponse, + ProfileGetResponse, + ProfilesListResponse, + ProfileUpdateResponse, +) from .base import BaseResource -class ProfilesResource(BaseResource[Any]): +class ProfilesResource(BaseResource[ProfilesListResponse]): """ Resource for managing profiles. @@ -34,16 +40,17 @@ class ProfilesResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self) -> dict[str, Any]: + def list(self) -> ProfilesListResponse: """ List all profiles. Returns: - Dict with 'profiles' key containing list of Profile objects + ProfilesListResponse with 'profiles' attribute """ - return self._client._get(self._BASE_PATH) + data = self._client._get(self._BASE_PATH) + return ProfilesListResponse.model_validate(data) - def get(self, profile_id: str) -> dict[str, Any]: + def get(self, profile_id: str) -> ProfileGetResponse: """ Get a profile by ID. @@ -51,9 +58,10 @@ def get(self, profile_id: str) -> dict[str, Any]: profile_id: The profile ID Returns: - Dict with 'profile' key containing the Profile object + ProfileGetResponse with 'profile' attribute """ - return self._client._get(self._path(profile_id)) + data = self._client._get(self._path(profile_id)) + return ProfileGetResponse.model_validate(data) def create( self, @@ -61,7 +69,7 @@ def create( name: str, description: str | None = None, color: str | None = None, - ) -> dict[str, Any]: + ) -> ProfileCreateResponse: """ Create a new profile. @@ -71,14 +79,15 @@ def create( color: Optional hex color (e.g., '#ffeda0') Returns: - Dict with 'message' and 'profile' keys + ProfileCreateResponse with 'message' and 'profile' attributes """ payload = self._build_payload( name=name, description=description, color=color, ) - return self._client._post(self._BASE_PATH, data=payload) + data = self._client._post(self._BASE_PATH, data=payload) + return ProfileCreateResponse.model_validate(data) def update( self, @@ -88,7 +97,7 @@ def update( description: str | None = None, color: str | None = None, is_default: bool | None = None, - ) -> dict[str, Any]: + ) -> ProfileUpdateResponse: """ Update a profile. @@ -100,7 +109,7 @@ def update( is_default: Set as default profile Returns: - Dict with 'message' and 'profile' keys + ProfileUpdateResponse with 'message' and 'profile' attributes """ payload = self._build_payload( name=name, @@ -108,9 +117,10 @@ def update( color=color, is_default=is_default, ) - return self._client._put(self._path(profile_id), data=payload) + data = self._client._put(self._path(profile_id), data=payload) + return ProfileUpdateResponse.model_validate(data) - def delete(self, profile_id: str) -> dict[str, Any]: + def delete(self, profile_id: str) -> ProfileDeleteResponse: """ Delete a profile. @@ -120,21 +130,24 @@ def delete(self, profile_id: str) -> dict[str, Any]: profile_id: ID of the profile to delete Returns: - Dict with 'message' key + ProfileDeleteResponse with 'message' attribute """ - return self._client._delete(self._path(profile_id)) + data = self._client._delete(self._path(profile_id)) + return ProfileDeleteResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self) -> dict[str, Any]: + async def alist(self) -> ProfilesListResponse: """List all profiles asynchronously.""" - return await self._client._aget(self._BASE_PATH) + data = await self._client._aget(self._BASE_PATH) + return ProfilesListResponse.model_validate(data) - async def aget(self, profile_id: str) -> dict[str, Any]: + async def aget(self, profile_id: str) -> ProfileGetResponse: """Get a profile by ID asynchronously.""" - return await self._client._aget(self._path(profile_id)) + data = await self._client._aget(self._path(profile_id)) + return ProfileGetResponse.model_validate(data) async def acreate( self, @@ -142,14 +155,15 @@ async def acreate( name: str, description: str | None = None, color: str | None = None, - ) -> dict[str, Any]: + ) -> ProfileCreateResponse: """Create a new profile asynchronously.""" payload = self._build_payload( name=name, description=description, color=color, ) - return await self._client._apost(self._BASE_PATH, data=payload) + data = await self._client._apost(self._BASE_PATH, data=payload) + return ProfileCreateResponse.model_validate(data) async def aupdate( self, @@ -159,7 +173,7 @@ async def aupdate( description: str | None = None, color: str | None = None, is_default: bool | None = None, - ) -> dict[str, Any]: + ) -> ProfileUpdateResponse: """Update a profile asynchronously.""" payload = self._build_payload( name=name, @@ -167,8 +181,10 @@ async def aupdate( color=color, is_default=is_default, ) - return await self._client._aput(self._path(profile_id), data=payload) + data = await self._client._aput(self._path(profile_id), data=payload) + return ProfileUpdateResponse.model_validate(data) - async def adelete(self, profile_id: str) -> dict[str, Any]: + async def adelete(self, profile_id: str) -> ProfileDeleteResponse: """Delete a profile asynchronously.""" - return await self._client._adelete(self._path(profile_id)) + data = await self._client._adelete(self._path(profile_id)) + return ProfileDeleteResponse.model_validate(data) diff --git a/src/late/resources/queue.py b/src/late/resources/queue.py index 15e7872..bbcc3e0 100644 --- a/src/late/resources/queue.py +++ b/src/late/resources/queue.py @@ -4,14 +4,20 @@ from __future__ import annotations -from typing import Any, Literal +from typing import Any -from .base import BaseResource +from late.models import ( + QueueDeleteResponse, + QueueNextSlotResponse, + QueuePreviewResponse, + QueueSlotsResponse, + QueueUpdateResponse, +) -DayOfWeek = Literal[0, 1, 2, 3, 4, 5, 6] # 0=Sunday, 6=Saturday +from .base import BaseResource -class QueueResource(BaseResource[Any]): +class QueueResource(BaseResource[QueueSlotsResponse]): """ Resource for managing the posting queue. @@ -39,7 +45,7 @@ class QueueResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def get_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: + def get_slots(self, *, profile_id: str | None = None) -> QueueSlotsResponse: """ Get queue slots for a profile. @@ -47,10 +53,11 @@ def get_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with queue schedule information + QueueSlotsResponse with queue schedule information """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("slots"), params=params or None) + data = self._client._get(self._path("slots"), params=params or None) + return QueueSlotsResponse.model_validate(data) def update_slots( self, @@ -59,7 +66,7 @@ def update_slots( timezone: str, slots: list[dict[str, Any]], active: bool = True, - ) -> dict[str, Any]: + ) -> QueueUpdateResponse: """ Update queue slots for a profile. @@ -70,7 +77,7 @@ def update_slots( active: Whether the queue is active Returns: - Dict with updated queue schedule + QueueUpdateResponse with updated queue schedule """ payload = self._build_payload( profile_id=profile_id, @@ -78,9 +85,10 @@ def update_slots( slots=slots, active=active, ) - return self._client._put(self._path("slots"), data=payload) + data = self._client._put(self._path("slots"), data=payload) + return QueueUpdateResponse.model_validate(data) - def delete_slots(self, *, profile_id: str) -> dict[str, Any]: + def delete_slots(self, *, profile_id: str) -> QueueDeleteResponse: """ Delete all queue slots for a profile. @@ -88,12 +96,13 @@ def delete_slots(self, *, profile_id: str) -> dict[str, Any]: profile_id: Profile ID to clear slots for Returns: - Dict with 'message' key + QueueDeleteResponse with 'message' attribute """ params = self._build_params(profile_id=profile_id) - return self._client._delete(self._path("slots"), params=params) + data = self._client._delete(self._path("slots"), params=params) + return QueueDeleteResponse.model_validate(data) - def preview(self, *, profile_id: str | None = None) -> dict[str, Any]: + def preview(self, *, profile_id: str | None = None) -> QueuePreviewResponse: """ Preview the next scheduled slot times. @@ -101,12 +110,13 @@ def preview(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with preview of next scheduled times + QueuePreviewResponse with preview of next scheduled times """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("preview"), params=params or None) + data = self._client._get(self._path("preview"), params=params or None) + return QueuePreviewResponse.model_validate(data) - def next_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: + def next_slot(self, *, profile_id: str | None = None) -> QueueNextSlotResponse: """ Get the next available queue slot. @@ -114,19 +124,21 @@ def next_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with next available slot information + QueueNextSlotResponse with next available slot information """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("next-slot"), params=params or None) + data = self._client._get(self._path("next-slot"), params=params or None) + return QueueNextSlotResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def aget_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def aget_slots(self, *, profile_id: str | None = None) -> QueueSlotsResponse: """Get queue slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("slots"), params=params or None) + data = await self._client._aget(self._path("slots"), params=params or None) + return QueueSlotsResponse.model_validate(data) async def aupdate_slots( self, @@ -135,7 +147,7 @@ async def aupdate_slots( timezone: str, slots: list[dict[str, Any]], active: bool = True, - ) -> dict[str, Any]: + ) -> QueueUpdateResponse: """Update queue slots asynchronously.""" payload = self._build_payload( profile_id=profile_id, @@ -143,19 +155,23 @@ async def aupdate_slots( slots=slots, active=active, ) - return await self._client._aput(self._path("slots"), data=payload) + data = await self._client._aput(self._path("slots"), data=payload) + return QueueUpdateResponse.model_validate(data) - async def adelete_slots(self, *, profile_id: str) -> dict[str, Any]: + async def adelete_slots(self, *, profile_id: str) -> QueueDeleteResponse: """Delete queue slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._adelete(self._path("slots"), params=params) + data = await self._client._adelete(self._path("slots"), params=params) + return QueueDeleteResponse.model_validate(data) - async def apreview(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def apreview(self, *, profile_id: str | None = None) -> QueuePreviewResponse: """Preview next scheduled slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("preview"), params=params or None) + data = await self._client._aget(self._path("preview"), params=params or None) + return QueuePreviewResponse.model_validate(data) - async def anext_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def anext_slot(self, *, profile_id: str | None = None) -> QueueNextSlotResponse: """Get next available slot asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("next-slot"), params=params or None) + data = await self._client._aget(self._path("next-slot"), params=params or None) + return QueueNextSlotResponse.model_validate(data) diff --git a/src/late/resources/tools.py b/src/late/resources/tools.py index 165ded3..260cd26 100644 --- a/src/late/resources/tools.py +++ b/src/late/resources/tools.py @@ -4,14 +4,22 @@ from __future__ import annotations -from typing import Any, Literal +from typing import TYPE_CHECKING, Any + +from late.models import ( + CaptionResponse, + DownloadResponse, + HashtagCheckResponse, + TranscriptResponse, +) from .base import BaseResource -Tone = Literal["professional", "casual", "humorous", "inspirational", "informative"] +if TYPE_CHECKING: + from late.enums import CaptionTone -class ToolsResource(BaseResource[Any]): +class ToolsResource(BaseResource[DownloadResponse]): """ Resource for media download and utility tools. @@ -21,13 +29,14 @@ class ToolsResource(BaseResource[Any]): - Unlimited: unlimited Example: + >>> from late import CaptionTone >>> client = Late(api_key="...") >>> # Download YouTube video >>> result = client.tools.youtube_download("https://youtube.com/watch?v=...") >>> # Generate AI caption >>> caption = client.tools.generate_caption( ... image_url="https://example.com/image.jpg", - ... tone="professional", + ... tone=CaptionTone.PROFESSIONAL, ... ) """ @@ -42,7 +51,7 @@ def youtube_download( url: str, *, format_id: str | None = None, - ) -> dict[str, Any]: + ) -> DownloadResponse: """ Download YouTube video or audio. @@ -51,17 +60,18 @@ def youtube_download( format_id: Optional format ID for specific quality Returns: - Dict with download information + DownloadResponse with download information """ params = self._build_params(url=url, format_id=format_id) - return self._client._get(self._path("youtube", "download"), params=params) + data = self._client._get(self._path("youtube", "download"), params=params) + return DownloadResponse.model_validate(data) def youtube_transcript( self, url: str, *, lang: str | None = None, - ) -> dict[str, Any]: + ) -> TranscriptResponse: """ Get YouTube video transcript. @@ -70,16 +80,17 @@ def youtube_transcript( lang: Optional language code for transcript Returns: - Dict with transcript data + TranscriptResponse with transcript data """ params = self._build_params(url=url, lang=lang) - return self._client._get(self._path("youtube", "transcript"), params=params) + data = self._client._get(self._path("youtube", "transcript"), params=params) + return TranscriptResponse.model_validate(data) # ------------------------------------------------------------------------- # Instagram # ------------------------------------------------------------------------- - def instagram_download(self, url: str) -> dict[str, Any]: + def instagram_download(self, url: str) -> DownloadResponse: """ Download Instagram reel or post. @@ -87,11 +98,12 @@ def instagram_download(self, url: str) -> dict[str, Any]: url: Instagram post/reel URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("instagram", "download"), params={"url": url}) + data = self._client._get(self._path("instagram", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) - def instagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: + def instagram_hashtag_check(self, hashtags: list[str]) -> HashtagCheckResponse: """ Check Instagram hashtags for bans. @@ -99,12 +111,13 @@ def instagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: hashtags: List of hashtags to check Returns: - Dict with hashtag status information + HashtagCheckResponse with hashtag status information """ - return self._client._post( + data = self._client._post( self._path("instagram", "hashtag-checker"), data={"hashtags": hashtags}, ) + return HashtagCheckResponse.model_validate(data) # ------------------------------------------------------------------------- # TikTok @@ -115,7 +128,7 @@ def tiktok_download( url: str, *, no_watermark: bool = True, - ) -> dict[str, Any]: + ) -> DownloadResponse: """ Download TikTok video. @@ -124,16 +137,17 @@ def tiktok_download( no_watermark: If True, download without watermark Returns: - Dict with download information + DownloadResponse with download information """ params = {"url": url, "noWatermark": str(no_watermark).lower()} - return self._client._get(self._path("tiktok", "download"), params=params) + data = self._client._get(self._path("tiktok", "download"), params=params) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Twitter # ------------------------------------------------------------------------- - def twitter_download(self, url: str) -> dict[str, Any]: + def twitter_download(self, url: str) -> DownloadResponse: """ Download Twitter/X video. @@ -141,15 +155,16 @@ def twitter_download(self, url: str) -> dict[str, Any]: url: Twitter/X video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("twitter", "download"), params={"url": url}) + data = self._client._get(self._path("twitter", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Facebook # ------------------------------------------------------------------------- - def facebook_download(self, url: str) -> dict[str, Any]: + def facebook_download(self, url: str) -> DownloadResponse: """ Download Facebook video. @@ -157,15 +172,16 @@ def facebook_download(self, url: str) -> dict[str, Any]: url: Facebook video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("facebook", "download"), params={"url": url}) + data = self._client._get(self._path("facebook", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # LinkedIn # ------------------------------------------------------------------------- - def linkedin_download(self, url: str) -> dict[str, Any]: + def linkedin_download(self, url: str) -> DownloadResponse: """ Download LinkedIn video. @@ -173,15 +189,16 @@ def linkedin_download(self, url: str) -> dict[str, Any]: url: LinkedIn video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("linkedin", "download"), params={"url": url}) + data = self._client._get(self._path("linkedin", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Bluesky # ------------------------------------------------------------------------- - def bluesky_download(self, url: str) -> dict[str, Any]: + def bluesky_download(self, url: str) -> DownloadResponse: """ Download Bluesky video. @@ -189,9 +206,10 @@ def bluesky_download(self, url: str) -> dict[str, Any]: url: Bluesky video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("bluesky", "download"), params={"url": url}) + data = self._client._get(self._path("bluesky", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # AI Caption Generator @@ -202,8 +220,8 @@ def generate_caption( image_url: str, *, prompt: str | None = None, - tone: Tone | None = None, - ) -> dict[str, Any]: + tone: CaptionTone | str | None = None, + ) -> CaptionResponse: """ Generate AI captions for an image. @@ -213,14 +231,15 @@ def generate_caption( tone: Optional tone (professional, casual, humorous, etc.) Returns: - Dict with generated caption(s) + CaptionResponse with generated caption """ payload = self._build_payload( image_url=image_url, prompt=prompt, tone=tone, ) - return self._client._post(self._path("caption-generator"), data=payload) + data = self._client._post(self._path("caption-generator"), data=payload) + return CaptionResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods @@ -231,79 +250,89 @@ async def ayoutube_download( url: str, *, format_id: str | None = None, - ) -> dict[str, Any]: + ) -> DownloadResponse: """Download YouTube video asynchronously.""" params = self._build_params(url=url, format_id=format_id) - return await self._client._aget(self._path("youtube", "download"), params=params) + data = await self._client._aget(self._path("youtube", "download"), params=params) + return DownloadResponse.model_validate(data) async def ayoutube_transcript( self, url: str, *, lang: str | None = None, - ) -> dict[str, Any]: + ) -> TranscriptResponse: """Get YouTube transcript asynchronously.""" params = self._build_params(url=url, lang=lang) - return await self._client._aget(self._path("youtube", "transcript"), params=params) + data = await self._client._aget(self._path("youtube", "transcript"), params=params) + return TranscriptResponse.model_validate(data) - async def ainstagram_download(self, url: str) -> dict[str, Any]: + async def ainstagram_download(self, url: str) -> DownloadResponse: """Download Instagram content asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("instagram", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def ainstagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: + async def ainstagram_hashtag_check(self, hashtags: list[str]) -> HashtagCheckResponse: """Check Instagram hashtags asynchronously.""" - return await self._client._apost( + data = await self._client._apost( self._path("instagram", "hashtag-checker"), data={"hashtags": hashtags}, ) + return HashtagCheckResponse.model_validate(data) async def atiktok_download( self, url: str, *, no_watermark: bool = True, - ) -> dict[str, Any]: + ) -> DownloadResponse: """Download TikTok video asynchronously.""" params = {"url": url, "noWatermark": str(no_watermark).lower()} - return await self._client._aget(self._path("tiktok", "download"), params=params) + data = await self._client._aget(self._path("tiktok", "download"), params=params) + return DownloadResponse.model_validate(data) - async def atwitter_download(self, url: str) -> dict[str, Any]: + async def atwitter_download(self, url: str) -> DownloadResponse: """Download Twitter video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("twitter", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def afacebook_download(self, url: str) -> dict[str, Any]: + async def afacebook_download(self, url: str) -> DownloadResponse: """Download Facebook video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("facebook", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def alinkedin_download(self, url: str) -> dict[str, Any]: + async def alinkedin_download(self, url: str) -> DownloadResponse: """Download LinkedIn video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("linkedin", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def abluesky_download(self, url: str) -> dict[str, Any]: + async def abluesky_download(self, url: str) -> DownloadResponse: """Download Bluesky video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("bluesky", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) async def agenerate_caption( self, image_url: str, *, prompt: str | None = None, - tone: Tone | None = None, - ) -> dict[str, Any]: + tone: CaptionTone | str | None = None, + ) -> CaptionResponse: """Generate AI caption asynchronously.""" payload = self._build_payload( image_url=image_url, prompt=prompt, tone=tone, ) - return await self._client._apost(self._path("caption-generator"), data=payload) + data = await self._client._apost(self._path("caption-generator"), data=payload) + return CaptionResponse.model_validate(data) diff --git a/src/late/resources/users.py b/src/late/resources/users.py index 024f656..a3599dc 100644 --- a/src/late/resources/users.py +++ b/src/late/resources/users.py @@ -4,12 +4,12 @@ from __future__ import annotations -from typing import Any +from late.models import UserGetResponse, UsersListResponse from .base import BaseResource -class UsersResource(BaseResource[Any]): +class UsersResource(BaseResource[UsersListResponse]): """ Resource for managing team users. @@ -27,16 +27,17 @@ class UsersResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self) -> dict[str, Any]: + def list(self) -> UsersListResponse: """ List team users. Returns: - Dict with 'users' key containing list of User objects + UsersListResponse with 'users' attribute """ - return self._client._get(self._BASE_PATH) + data = self._client._get(self._BASE_PATH) + return UsersListResponse.model_validate(data) - def get(self, user_id: str) -> dict[str, Any]: + def get(self, user_id: str) -> UserGetResponse: """ Get a user by ID. @@ -44,18 +45,21 @@ def get(self, user_id: str) -> dict[str, Any]: user_id: The user ID Returns: - Dict with 'user' key containing the User object + UserGetResponse with 'user' attribute """ - return self._client._get(self._path(user_id)) + data = self._client._get(self._path(user_id)) + return UserGetResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self) -> dict[str, Any]: + async def alist(self) -> UsersListResponse: """List team users asynchronously.""" - return await self._client._aget(self._BASE_PATH) + data = await self._client._aget(self._BASE_PATH) + return UsersListResponse.model_validate(data) - async def aget(self, user_id: str) -> dict[str, Any]: + async def aget(self, user_id: str) -> UserGetResponse: """Get a user by ID asynchronously.""" - return await self._client._aget(self._path(user_id)) + data = await self._client._aget(self._path(user_id)) + return UserGetResponse.model_validate(data) diff --git a/src/late/upload/__init__.py b/src/late/upload/__init__.py new file mode 100644 index 0000000..2003f49 --- /dev/null +++ b/src/late/upload/__init__.py @@ -0,0 +1,74 @@ +""" +Upload module for Late SDK. + +Provides flexible file upload strategies: +- DirectUploader: For small files (< 4MB) via API multipart +- VercelBlobUploader: For large files (up to 5GB) via Vercel Blob SDK +- SmartUploader: Automatic strategy selection based on file size + +Example (simple - small files): + >>> from late import Late + >>> client = Late(api_key="...") + >>> result = client.media.upload("small_image.jpg") + +Example (large files - requires Vercel Blob token): + >>> result = client.media.upload_large( + ... "large_video.mp4", + ... vercel_token="vercel_blob_rw_xxx" + ... ) + +Example (smart uploader with auto-selection): + >>> from late.upload import SmartUploader, UploadFile + >>> uploader = SmartUploader(client, vercel_token="vercel_blob_rw_xxx") + >>> result = uploader.upload(file) # Auto-selects strategy +""" + +from .config import ( + ALLOWED_CONTENT_TYPES, + ALLOWED_DOCUMENT_TYPES, + ALLOWED_IMAGE_TYPES, + ALLOWED_VIDEO_TYPES, + UploadConfig, + UploadEndpoints, + UploadLimits, + get_content_category, + is_content_type_allowed, +) +from .direct import DirectUploader +from .protocols import ( + FileTooLargeError, + UnsupportedContentTypeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) +from .smart import LargeFileError, SmartUploader +from .vercel import VercelBlobUploader + +__all__ = [ + # Main uploaders + "SmartUploader", + "DirectUploader", + "VercelBlobUploader", + # Data types + "UploadFile", + "UploadResult", + "UploadProgress", + # Configuration + "UploadConfig", + "UploadLimits", + "UploadEndpoints", + # Content type helpers + "ALLOWED_CONTENT_TYPES", + "ALLOWED_IMAGE_TYPES", + "ALLOWED_VIDEO_TYPES", + "ALLOWED_DOCUMENT_TYPES", + "is_content_type_allowed", + "get_content_category", + # Exceptions + "UploadError", + "FileTooLargeError", + "LargeFileError", + "UnsupportedContentTypeError", +] diff --git a/src/late/upload/config.py b/src/late/upload/config.py new file mode 100644 index 0000000..481400c --- /dev/null +++ b/src/late/upload/config.py @@ -0,0 +1,147 @@ +""" +Configuration for upload module. + +Centralized configuration with sensible defaults and easy customization. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field + +from late.enums import MediaType + + +@dataclass(frozen=True) +class UploadLimits: + """ + Size limits for different upload methods. + + All values are in bytes. Use the helper class methods for + convenient size specifications. + + Example: + >>> limits = UploadLimits.default() + >>> custom = UploadLimits(direct_max=UploadLimits.mb(10)) + """ + + direct_max: int = 4 * 1024 * 1024 # 4MB - server body limit + blob_max: int = 5 * 1024 * 1024 * 1024 # 5GB - Vercel Blob limit + multipart_threshold: int = 100 * 1024 * 1024 # 100MB - use chunked above this + chunk_size: int = 5 * 1024 * 1024 # 5MB - chunk size for multipart + + @classmethod + def kb(cls, n: int) -> int: + """Convert kilobytes to bytes.""" + return n * 1024 + + @classmethod + def mb(cls, n: int) -> int: + """Convert megabytes to bytes.""" + return n * 1024 * 1024 + + @classmethod + def gb(cls, n: int) -> int: + """Convert gigabytes to bytes.""" + return n * 1024 * 1024 * 1024 + + @classmethod + def default(cls) -> UploadLimits: + """Create default limits configuration.""" + return cls() + + +@dataclass(frozen=True) +class UploadEndpoints: + """ + API endpoints for upload operations. + + Example: + >>> endpoints = UploadEndpoints.default() + >>> custom = UploadEndpoints(media="/api/v2/media") + """ + + media: str = "/v1/media" + client_upload: str = "/v1/media" # Same endpoint, different flow + + @classmethod + def default(cls) -> UploadEndpoints: + """Create default endpoints configuration.""" + return cls() + + +@dataclass(frozen=True) +class UploadConfig: + """ + Complete upload configuration. + + Combines limits and endpoints into a single configuration object. + Immutable by design to prevent accidental modifications. + + Example: + >>> config = UploadConfig.default() + >>> # Custom configuration + >>> config = UploadConfig( + ... limits=UploadLimits(direct_max=UploadLimits.mb(8)), + ... endpoints=UploadEndpoints(media="/api/v2/media"), + ... ) + """ + + limits: UploadLimits = field(default_factory=UploadLimits.default) + endpoints: UploadEndpoints = field(default_factory=UploadEndpoints.default) + auto_select_strategy: bool = True # Automatically choose uploader based on size + verify_upload: bool = True # Verify upload completed successfully + + @classmethod + def default(cls) -> UploadConfig: + """Create default configuration.""" + return cls() + + +# Supported content types for uploads +ALLOWED_IMAGE_TYPES: frozenset[str] = frozenset({ + "image/jpeg", + "image/jpg", + "image/png", + "image/webp", + "image/gif", +}) + +ALLOWED_VIDEO_TYPES: frozenset[str] = frozenset({ + "video/mp4", + "video/mpeg", + "video/quicktime", + "video/avi", + "video/x-msvideo", + "video/webm", + "video/x-m4v", +}) + +ALLOWED_DOCUMENT_TYPES: frozenset[str] = frozenset({ + "application/pdf", +}) + +ALLOWED_CONTENT_TYPES: frozenset[str] = ( + ALLOWED_IMAGE_TYPES | ALLOWED_VIDEO_TYPES | ALLOWED_DOCUMENT_TYPES +) + + +def is_content_type_allowed(content_type: str) -> bool: + """Check if a content type is allowed for upload.""" + return content_type.lower() in ALLOWED_CONTENT_TYPES + + +def get_content_category(content_type: str) -> MediaType | None: + """ + Get the category of a content type. + + Returns: + MediaType.IMAGE, MediaType.VIDEO, MediaType.DOCUMENT, or None if not allowed + """ + content_type = content_type.lower() + if content_type in ALLOWED_IMAGE_TYPES: + return MediaType.IMAGE + if content_type in ALLOWED_VIDEO_TYPES: + return MediaType.VIDEO + if content_type in ALLOWED_DOCUMENT_TYPES: + return MediaType.DOCUMENT + return None diff --git a/src/late/upload/direct.py b/src/late/upload/direct.py new file mode 100644 index 0000000..af8a9d6 --- /dev/null +++ b/src/late/upload/direct.py @@ -0,0 +1,214 @@ +""" +Direct upload strategy for small files. + +This uploader sends files directly to the API endpoint via multipart/form-data. +Suitable for small files that don't exceed server body size limits (~4MB). +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from .config import UploadConfig +from .protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadResult, +) + +if TYPE_CHECKING: + from late.client.base import BaseClient + + +class DirectUploader: + """ + Direct multipart upload for small files. + + Uploads files directly to the API endpoint using multipart/form-data. + Best for files under ~4MB to avoid 413 (Request Entity Too Large) errors. + + Example: + >>> uploader = DirectUploader(client) + >>> result = uploader.upload(UploadFile( + ... filename="image.jpg", + ... content=image_bytes, + ... mime_type="image/jpeg" + ... )) + >>> print(result.url) + """ + + def __init__( + self, + client: BaseClient, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the direct uploader. + + Args: + client: The HTTP client for making requests + config: Upload configuration (uses defaults if not provided) + """ + self._client = client + self._config = config or UploadConfig.default() + + @property + def max_size(self) -> int: + """Maximum supported file size in bytes.""" + return self._config.limits.direct_max + + @property + def endpoint(self) -> str: + """API endpoint for uploads.""" + return self._config.endpoints.media + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + return size <= self.max_size + + def _read_content(self, file: UploadFile) -> bytes: + """ + Read content bytes from an UploadFile. + + Handles bytes, Path, and file handle content types. + """ + content = file.content + if isinstance(content, bytes): + return content + if isinstance(content, Path): + return content.read_bytes() + # File handle - read and return to start if seekable + data = content.read() + if hasattr(content, "seek"): + content.seek(0) + return data + + def _build_multipart_files( + self, files: list[UploadFile] + ) -> list[tuple[str, tuple[str, bytes, str]]]: + """Build multipart files list for httpx.""" + return [ + ("files", (f.filename, self._read_content(f), f.mime_type)) + for f in files + ] + + def _parse_response(self, response: dict[str, Any]) -> list[UploadResult]: + """Parse API response into UploadResult objects.""" + files_data = response.get("files", []) + return [ + UploadResult( + url=f["url"], + pathname=f.get("pathname", ""), + content_type=f.get("contentType", ""), + size=f.get("size", 0), + download_url=f.get("downloadUrl"), + ) + for f in files_data + ] + + def _validate_file(self, file: UploadFile) -> None: + """Validate file before upload.""" + if file.size and file.size > self.max_size: + raise FileTooLargeError(file.size, self.max_size) + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload(self, file: UploadFile) -> UploadResult: + """ + Upload a single file. + + Args: + file: The file to upload + + Returns: + UploadResult with the uploaded file information + + Raises: + FileTooLargeError: If file exceeds max_size + UploadError: On upload failure + """ + self._validate_file(file) + + try: + multipart = self._build_multipart_files([file]) + response = self._client._post(self.endpoint, files=multipart) + results = self._parse_response(response) + + if not results: + raise UploadError("Server returned no files in response") + + return results[0] + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + def upload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """ + Upload multiple files in a single request. + + Args: + files: List of files to upload + + Returns: + List of UploadResults for each file + + Raises: + FileTooLargeError: If any file exceeds max_size + UploadError: On upload failure + """ + for file in files: + self._validate_file(file) + + try: + multipart = self._build_multipart_files(files) + response = self._client._post(self.endpoint, files=multipart) + return self._parse_response(response) + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload(self, file: UploadFile) -> UploadResult: + """Upload a single file asynchronously.""" + self._validate_file(file) + + try: + multipart = self._build_multipart_files([file]) + response = await self._client._apost(self.endpoint, files=multipart) + results = self._parse_response(response) + + if not results: + raise UploadError("Server returned no files in response") + + return results[0] + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + async def aupload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """Upload multiple files asynchronously.""" + for file in files: + self._validate_file(file) + + try: + multipart = self._build_multipart_files(files) + response = await self._client._apost(self.endpoint, files=multipart) + return self._parse_response(response) + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e diff --git a/src/late/upload/protocols.py b/src/late/upload/protocols.py new file mode 100644 index 0000000..8f777c4 --- /dev/null +++ b/src/late/upload/protocols.py @@ -0,0 +1,275 @@ +""" +Upload protocols and interfaces for extensible file upload strategies. + +This module defines the contracts that all uploaders must follow, +enabling easy extension with new upload strategies (S3, GCS, etc.) +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from pathlib import Path +from typing import TYPE_CHECKING, BinaryIO, Protocol, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import AsyncIterator, Iterator + + +@dataclass +class UploadFile: + """ + Represents a file to be uploaded. + + Attributes: + filename: Name of the file (used in Content-Disposition) + content: File content as bytes, file handle, or path + mime_type: MIME type of the file + size: Size in bytes (optional, calculated if not provided) + """ + + filename: str + content: bytes | BinaryIO | Path + mime_type: str + size: int | None = None + + def __post_init__(self) -> None: + """Calculate size if not provided.""" + if self.size is None: + if isinstance(self.content, bytes): + self.size = len(self.content) + elif isinstance(self.content, Path): + self.size = self.content.stat().st_size + + +@dataclass +class UploadResult: + """ + Result of a successful file upload. + + Attributes: + url: Public URL of the uploaded file + pathname: Path/key in the storage system + content_type: MIME type of the uploaded file + size: Size in bytes + download_url: Direct download URL (may differ from url) + metadata: Additional metadata from the storage provider + """ + + url: str + pathname: str + content_type: str + size: int + download_url: str | None = None + metadata: dict[str, str] = field(default_factory=dict) + + +@dataclass +class UploadProgress: + """ + Progress information for chunked/multipart uploads. + + Attributes: + uploaded_bytes: Bytes uploaded so far + total_bytes: Total bytes to upload + part_number: Current part number (for multipart) + total_parts: Total number of parts + """ + + uploaded_bytes: int + total_bytes: int + part_number: int | None = None + total_parts: int | None = None + + @property + def percentage(self) -> float: + """Calculate upload percentage.""" + if self.total_bytes == 0: + return 100.0 + return (self.uploaded_bytes / self.total_bytes) * 100 + + +@runtime_checkable +class Uploader(Protocol): + """ + Protocol for synchronous file uploaders. + + Implement this protocol to create custom upload strategies. + """ + + def upload(self, file: UploadFile) -> UploadResult: + """ + Upload a single file. + + Args: + file: The file to upload + + Returns: + UploadResult with the uploaded file information + """ + ... + + def upload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """ + Upload multiple files. + + Args: + files: List of files to upload + + Returns: + List of UploadResults for each file + """ + ... + + def supports_size(self, size: int) -> bool: + """ + Check if this uploader supports files of the given size. + + Args: + size: File size in bytes + + Returns: + True if this uploader can handle files of this size + """ + ... + + +@runtime_checkable +class AsyncUploader(Protocol): + """ + Protocol for asynchronous file uploaders. + + Implement this protocol for async upload strategies. + """ + + async def aupload(self, file: UploadFile) -> UploadResult: + """Upload a single file asynchronously.""" + ... + + async def aupload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """Upload multiple files asynchronously.""" + ... + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + ... + + +@runtime_checkable +class ProgressUploader(Protocol): + """ + Protocol for uploaders that support progress tracking. + + Extend Uploader with progress callback support. + """ + + def upload_with_progress( + self, + file: UploadFile, + on_progress: callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file with progress tracking. + + Args: + file: The file to upload + on_progress: Callback called with progress updates + + Returns: + UploadResult with the uploaded file information + """ + ... + + +@runtime_checkable +class ChunkedUploader(Protocol): + """ + Protocol for uploaders that support chunked/resumable uploads. + + Useful for very large files where upload might be interrupted. + """ + + def start_chunked_upload(self, file: UploadFile) -> str: + """ + Initialize a chunked upload session. + + Args: + file: The file to upload (metadata only at this stage) + + Returns: + Upload session ID for continuing the upload + """ + ... + + def upload_chunk( + self, + session_id: str, + chunk: bytes, + part_number: int, + ) -> bool: + """ + Upload a single chunk. + + Args: + session_id: Upload session ID from start_chunked_upload + chunk: Chunk data + part_number: Part number (1-indexed) + + Returns: + True if chunk was uploaded successfully + """ + ... + + def complete_chunked_upload(self, session_id: str) -> UploadResult: + """ + Complete a chunked upload. + + Args: + session_id: Upload session ID + + Returns: + UploadResult with the final uploaded file information + """ + ... + + def abort_chunked_upload(self, session_id: str) -> bool: + """ + Abort a chunked upload and clean up. + + Args: + session_id: Upload session ID + + Returns: + True if abort was successful + """ + ... + + +class UploadError(Exception): + """Base exception for upload errors.""" + + def __init__(self, message: str, cause: Exception | None = None) -> None: + super().__init__(message) + self.cause = cause + + +class FileTooLargeError(UploadError): + """Raised when file exceeds maximum allowed size.""" + + def __init__(self, size: int, max_size: int) -> None: + super().__init__( + f"File size {size:,} bytes exceeds maximum {max_size:,} bytes" + ) + self.size = size + self.max_size = max_size + + +class UnsupportedContentTypeError(UploadError): + """Raised when file type is not supported.""" + + def __init__(self, content_type: str, supported: list[str]) -> None: + super().__init__( + f"Content type '{content_type}' not supported. " + f"Supported types: {', '.join(supported)}" + ) + self.content_type = content_type + self.supported = supported diff --git a/src/late/upload/smart.py b/src/late/upload/smart.py new file mode 100644 index 0000000..c10cc76 --- /dev/null +++ b/src/late/upload/smart.py @@ -0,0 +1,201 @@ +""" +Smart uploader with automatic strategy selection. + +Chooses the best upload method based on file size. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Callable + +from .config import UploadConfig +from .direct import DirectUploader +from .protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) + +if TYPE_CHECKING: + from late.client.base import BaseClient + + +class LargeFileError(UploadError): + """ + Raised when trying to upload a large file without Vercel Blob token. + + Provides clear guidance on how to upload large files. + """ + + def __init__(self, file_size: int, max_direct_size: int) -> None: + message = ( + f"File size ({file_size:,} bytes) exceeds direct upload limit " + f"({max_direct_size:,} bytes / {max_direct_size // (1024*1024)}MB).\n\n" + "For files larger than 4MB, provide a Vercel Blob token:\n\n" + " from late.upload import SmartUploader\n\n" + " uploader = SmartUploader(client, vercel_token='vercel_blob_rw_xxx')\n" + " result = uploader.upload(file)\n\n" + "Get your token at: https://vercel.com/docs/storage/vercel-blob" + ) + super().__init__(message) + self.file_size = file_size + self.max_direct_size = max_direct_size + + +class SmartUploader: + """ + Intelligent uploader that selects the best strategy automatically. + + - Files < 4MB: Uses direct multipart upload to API + - Files >= 4MB: Uses Vercel Blob SDK (requires token) + + Example (small files only): + >>> uploader = SmartUploader(client) + >>> result = uploader.upload(small_file) + + Example (with Vercel token for large files): + >>> uploader = SmartUploader(client, vercel_token="vercel_blob_rw_xxx") + >>> result = uploader.upload(large_file) # Auto-selects strategy + """ + + def __init__( + self, + client: BaseClient, + *, + vercel_token: str | None = None, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the smart uploader. + + Args: + client: The Late API client + vercel_token: Optional Vercel Blob token for large files + config: Upload configuration + """ + self._client = client + self._config = config or UploadConfig.default() + self._direct = DirectUploader(client, self._config) + self._vercel_token = vercel_token + self._vercel_uploader = None + + # Initialize Vercel uploader if token provided + if vercel_token: + from .vercel import VercelBlobUploader + self._vercel_uploader = VercelBlobUploader(vercel_token, self._config) + + @property + def direct_max_size(self) -> int: + """Maximum size for direct upload (4MB).""" + return self._config.limits.direct_max + + @property + def blob_max_size(self) -> int: + """Maximum size for Vercel Blob upload (5GB).""" + return self._config.limits.blob_max + + @property + def has_vercel_token(self) -> bool: + """Check if Vercel Blob token is configured.""" + return self._vercel_uploader is not None + + def _select_strategy(self, file: UploadFile) -> str: + """ + Select the appropriate upload strategy. + + Returns: + "direct" or "vercel" + + Raises: + LargeFileError: If file > 4MB and no Vercel token + FileTooLargeError: If file exceeds all limits + """ + size = file.size or 0 + + # Check absolute maximum + if size > self.blob_max_size: + raise FileTooLargeError(size, self.blob_max_size) + + # Small file - use direct upload + if size <= self.direct_max_size: + return "direct" + + # Large file - need Vercel token + if not self.has_vercel_token: + raise LargeFileError(size, self.direct_max_size) + + return "vercel" + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file using the best available strategy. + + Args: + file: File to upload + on_progress: Progress callback (Vercel only) + + Returns: + UploadResult with file URL + + Raises: + LargeFileError: If file > 4MB and no Vercel token + FileTooLargeError: If file > 5GB + UploadError: On upload failure + """ + strategy = self._select_strategy(file) + + if strategy == "direct": + return self._direct.upload(file) + else: + return self._vercel_uploader.upload(file, on_progress) # type: ignore + + def upload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files.""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(self.upload(file, cb)) + return results + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """Upload a file (async).""" + strategy = self._select_strategy(file) + + if strategy == "direct": + return await self._direct.aupload(file) + else: + return await self._vercel_uploader.aupload(file, on_progress) # type: ignore + + async def aupload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files (async).""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(await self.aupload(file, cb)) + return results diff --git a/src/late/upload/utils.py b/src/late/upload/utils.py new file mode 100644 index 0000000..04710ce --- /dev/null +++ b/src/late/upload/utils.py @@ -0,0 +1,66 @@ +""" +Utility functions for upload module. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from .protocols import UploadFile + + +def read_file_content(file: UploadFile) -> bytes: + """ + Read content bytes from an UploadFile. + + Handles bytes, Path, and file handle content types. + """ + content = file.content + + if isinstance(content, bytes): + return content + + if isinstance(content, Path): + return content.read_bytes() + + # File handle - read and optionally reset position + data = content.read() + if hasattr(content, "seek"): + content.seek(0) + return data + + +def iter_file_chunks(file: UploadFile, chunk_size: int): + """ + Iterate over file content in chunks. + + Yields: + Tuple of (part_number, chunk_bytes) starting from 1 + """ + content = file.content + part_number = 1 + + if isinstance(content, bytes): + for i in range(0, len(content), chunk_size): + yield part_number, content[i : i + chunk_size] + part_number += 1 + + elif isinstance(content, Path): + with content.open("rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield part_number, chunk + part_number += 1 + + else: + # File handle + while True: + chunk = content.read(chunk_size) + if not chunk: + break + yield part_number, chunk + part_number += 1 diff --git a/src/late/upload/vercel/__init__.py b/src/late/upload/vercel/__init__.py new file mode 100644 index 0000000..49e1afa --- /dev/null +++ b/src/late/upload/vercel/__init__.py @@ -0,0 +1,14 @@ +""" +Vercel Blob upload module. + +Uses the official Vercel SDK for uploading large files (up to 5GB). +Requires a Vercel Blob read-write token. +""" + +from .client import VercelBlobClient +from .uploader import VercelBlobUploader + +__all__ = [ + "VercelBlobClient", + "VercelBlobUploader", +] diff --git a/src/late/upload/vercel/client.py b/src/late/upload/vercel/client.py new file mode 100644 index 0000000..f568357 --- /dev/null +++ b/src/late/upload/vercel/client.py @@ -0,0 +1,149 @@ +""" +Vercel Blob client using official Vercel SDK. + +Wraps the official `vercel.blob` SDK for uploading large files. +""" + +from __future__ import annotations + +from typing import Callable + +from late.upload.protocols import UploadError, UploadFile, UploadProgress, UploadResult +from late.upload.utils import read_file_content + + +class VercelBlobClient: + """ + Client for Vercel Blob using the official SDK. + + Requires a Vercel Blob read-write token (BLOB_READ_WRITE_TOKEN). + Get one from: https://vercel.com/docs/storage/vercel-blob + + Example: + >>> client = VercelBlobClient(token="vercel_blob_rw_xxx") + >>> result = client.upload(file) + """ + + def __init__(self, token: str) -> None: + """ + Initialize the Vercel Blob client. + + Args: + token: Vercel Blob read-write token + """ + if not token: + raise ValueError( + "Vercel Blob token required. " + "Get one at: https://vercel.com/docs/storage/vercel-blob" + ) + self._token = token + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL + """ + from vercel.blob import BlobClient, UploadProgressEvent + + content = read_file_content(file) + total_size = len(content) + + # Create progress wrapper + progress_cb = None + if on_progress: + def progress_cb(event: UploadProgressEvent) -> None: + on_progress(UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + )) + + try: + client = BlobClient(token=self._token) + result = client.put( + file.filename, + content, + access="public", + content_type=file.mime_type, + add_random_suffix=True, + multipart=total_size > 100 * 1024 * 1024, # Use multipart for > 100MB + on_upload_progress=progress_cb, + ) + + return UploadResult( + url=result.url, + pathname=result.pathname, + content_type=result.content_type, + size=total_size, + download_url=result.download_url, + metadata={"provider": "vercel-blob"}, + ) + + except Exception as e: + raise UploadError(f"Vercel Blob upload failed: {e}") from e + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob asynchronously. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL + """ + from vercel.blob import AsyncBlobClient, UploadProgressEvent + + content = read_file_content(file) + total_size = len(content) + + # Create progress wrapper + progress_cb = None + if on_progress: + def progress_cb(event: UploadProgressEvent) -> None: + on_progress(UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + )) + + try: + client = AsyncBlobClient(token=self._token) + result = await client.put( + file.filename, + content, + access="public", + content_type=file.mime_type, + add_random_suffix=True, + multipart=total_size > 100 * 1024 * 1024, + on_upload_progress=progress_cb, + ) + + return UploadResult( + url=result.url, + pathname=result.pathname, + content_type=result.content_type, + size=total_size, + download_url=result.download_url, + metadata={"provider": "vercel-blob"}, + ) + + except Exception as e: + raise UploadError(f"Vercel Blob upload failed: {e}") from e diff --git a/src/late/upload/vercel/uploader.py b/src/late/upload/vercel/uploader.py new file mode 100644 index 0000000..36cc8bc --- /dev/null +++ b/src/late/upload/vercel/uploader.py @@ -0,0 +1,154 @@ +""" +Vercel Blob uploader - main entry point. + +Uses the official Vercel SDK to upload files to Vercel Blob storage. +""" + +from __future__ import annotations + +from typing import Callable + +from late.upload.config import UploadConfig +from late.upload.protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) + +from .client import VercelBlobClient + + +class VercelBlobUploader: + """ + Upload files to Vercel Blob storage. + + Uses the official Vercel SDK. Supports files up to 5GB. + + Requires a Vercel Blob read-write token (BLOB_READ_WRITE_TOKEN). + Get one from: https://vercel.com/docs/storage/vercel-blob + + Example: + >>> uploader = VercelBlobUploader(token="vercel_blob_rw_xxx") + >>> result = uploader.upload(UploadFile( + ... filename="video.mp4", + ... content=Path("large_video.mp4"), + ... mime_type="video/mp4", + ... size=500_000_000 + ... )) + >>> print(result.url) + """ + + def __init__( + self, + token: str, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the Vercel Blob uploader. + + Args: + token: Vercel Blob read-write token + config: General upload configuration + """ + self._config = config or UploadConfig.default() + self._client = VercelBlobClient(token) + + @property + def max_size(self) -> int: + """Maximum supported file size (5GB).""" + return self._config.limits.blob_max + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + return size <= self.max_size + + def _validate(self, file: UploadFile) -> None: + """Validate file before upload.""" + if file.size and file.size > self.max_size: + raise FileTooLargeError(file.size, self.max_size) + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL and metadata + + Raises: + FileTooLargeError: If file exceeds 5GB + UploadError: On upload failure + """ + self._validate(file) + + try: + return self._client.upload(file, on_progress) + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + def upload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """ + Upload multiple files. + + Args: + files: List of files to upload + on_progress: Callback with (file_index, progress) + + Returns: + List of UploadResults + """ + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(self.upload(file, cb)) + return results + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """Upload a file to Vercel Blob (async).""" + self._validate(file) + + try: + return await self._client.aupload(file, on_progress) + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + async def aupload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files (async).""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(await self.aupload(file, cb)) + return results diff --git a/tests/test_exhaustive.py b/tests/test_exhaustive.py index 9211942..9f6a243 100644 --- a/tests/test_exhaustive.py +++ b/tests/test_exhaustive.py @@ -868,3 +868,491 @@ async def test_async_generate_content(self): assert response.text is not None assert len(response.text) > 0 + + +# ============================================================================ +# MEDIA RESOURCE TESTS +# ============================================================================ + + +class TestMediaResourceMethods: + """Test MediaResource has all expected methods.""" + + def test_media_has_upload_method(self, api_key: str): + """Test media resource has upload method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload") + assert callable(client.media.upload) + + def test_media_has_upload_multiple_method(self, api_key: str): + """Test media resource has upload_multiple method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_multiple") + assert callable(client.media.upload_multiple) + + def test_media_has_upload_bytes_method(self, api_key: str): + """Test media resource has upload_bytes method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_bytes") + assert callable(client.media.upload_bytes) + + def test_media_has_upload_large_method(self, api_key: str): + """Test media resource has upload_large method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_large") + assert callable(client.media.upload_large) + + def test_media_has_upload_large_bytes_method(self, api_key: str): + """Test media resource has upload_large_bytes method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_large_bytes") + assert callable(client.media.upload_large_bytes) + + def test_media_has_generate_upload_token_method(self, api_key: str): + """Test media resource has generate_upload_token method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "generate_upload_token") + assert callable(client.media.generate_upload_token) + + def test_media_has_check_upload_token_method(self, api_key: str): + """Test media resource has check_upload_token method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "check_upload_token") + assert callable(client.media.check_upload_token) + + def test_media_has_async_methods(self, api_key: str): + """Test media resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "aupload") + assert hasattr(client.media, "aupload_multiple") + assert hasattr(client.media, "aupload_bytes") + assert hasattr(client.media, "aupload_large") + assert hasattr(client.media, "aupload_large_bytes") + assert hasattr(client.media, "agenerate_upload_token") + assert hasattr(client.media, "acheck_upload_token") + + +class TestUploadModule: + """Test upload module classes.""" + + def test_import_smart_uploader(self): + """Test SmartUploader import.""" + from late.upload import SmartUploader + + assert SmartUploader is not None + + def test_import_direct_uploader(self): + """Test DirectUploader import.""" + from late.upload import DirectUploader + + assert DirectUploader is not None + + def test_import_vercel_blob_uploader(self): + """Test VercelBlobUploader import.""" + from late.upload import VercelBlobUploader + + assert VercelBlobUploader is not None + + def test_import_upload_file(self): + """Test UploadFile import.""" + from late.upload import UploadFile + + assert UploadFile is not None + + def test_import_upload_result(self): + """Test UploadResult import.""" + from late.upload import UploadResult + + assert UploadResult is not None + + def test_import_upload_progress(self): + """Test UploadProgress import.""" + from late.upload import UploadProgress + + assert UploadProgress is not None + + def test_import_large_file_error(self): + """Test LargeFileError import.""" + from late.upload import LargeFileError + + assert LargeFileError is not None + + def test_upload_progress_creation(self): + """Test UploadProgress dataclass.""" + from late.upload import UploadProgress + + progress = UploadProgress( + uploaded_bytes=500, + total_bytes=1000, + ) + assert progress.uploaded_bytes == 500 + assert progress.total_bytes == 1000 + assert progress.percentage == 50.0 + + def test_large_file_error_creation(self): + """Test LargeFileError creation.""" + from late.upload import LargeFileError + + error = LargeFileError(file_size=5_000_000, max_direct_size=4_000_000) + assert "5,000,000" in str(error) + assert "4,000,000" in str(error) or "4MB" in str(error) + + +# ============================================================================ +# QUEUE RESOURCE TESTS +# ============================================================================ + + +class TestQueueResourceMethods: + """Test QueueResource has all expected methods.""" + + def test_queue_has_get_slots_method(self, api_key: str): + """Test queue resource has get_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "get_slots") + assert callable(client.queue.get_slots) + + def test_queue_has_update_slots_method(self, api_key: str): + """Test queue resource has update_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "update_slots") + assert callable(client.queue.update_slots) + + def test_queue_has_delete_slots_method(self, api_key: str): + """Test queue resource has delete_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "delete_slots") + assert callable(client.queue.delete_slots) + + def test_queue_has_preview_method(self, api_key: str): + """Test queue resource has preview method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "preview") + assert callable(client.queue.preview) + + def test_queue_has_next_slot_method(self, api_key: str): + """Test queue resource has next_slot method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "next_slot") + assert callable(client.queue.next_slot) + + def test_queue_has_async_methods(self, api_key: str): + """Test queue resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "aget_slots") + assert hasattr(client.queue, "aupdate_slots") + assert hasattr(client.queue, "adelete_slots") + assert hasattr(client.queue, "apreview") + assert hasattr(client.queue, "anext_slot") + + +# ============================================================================ +# TOOLS RESOURCE TESTS +# ============================================================================ + + +class TestToolsResourceMethods: + """Test ToolsResource has all expected methods.""" + + def test_tools_has_youtube_download_method(self, api_key: str): + """Test tools resource has youtube_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "youtube_download") + assert callable(client.tools.youtube_download) + + def test_tools_has_youtube_transcript_method(self, api_key: str): + """Test tools resource has youtube_transcript method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "youtube_transcript") + assert callable(client.tools.youtube_transcript) + + def test_tools_has_instagram_download_method(self, api_key: str): + """Test tools resource has instagram_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "instagram_download") + assert callable(client.tools.instagram_download) + + def test_tools_has_instagram_hashtag_check_method(self, api_key: str): + """Test tools resource has instagram_hashtag_check method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "instagram_hashtag_check") + assert callable(client.tools.instagram_hashtag_check) + + def test_tools_has_tiktok_download_method(self, api_key: str): + """Test tools resource has tiktok_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "tiktok_download") + assert callable(client.tools.tiktok_download) + + def test_tools_has_twitter_download_method(self, api_key: str): + """Test tools resource has twitter_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "twitter_download") + assert callable(client.tools.twitter_download) + + def test_tools_has_facebook_download_method(self, api_key: str): + """Test tools resource has facebook_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "facebook_download") + assert callable(client.tools.facebook_download) + + def test_tools_has_linkedin_download_method(self, api_key: str): + """Test tools resource has linkedin_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "linkedin_download") + assert callable(client.tools.linkedin_download) + + def test_tools_has_bluesky_download_method(self, api_key: str): + """Test tools resource has bluesky_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "bluesky_download") + assert callable(client.tools.bluesky_download) + + def test_tools_has_generate_caption_method(self, api_key: str): + """Test tools resource has generate_caption method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "generate_caption") + assert callable(client.tools.generate_caption) + + def test_tools_has_async_methods(self, api_key: str): + """Test tools resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "ayoutube_download") + assert hasattr(client.tools, "ayoutube_transcript") + assert hasattr(client.tools, "ainstagram_download") + assert hasattr(client.tools, "ainstagram_hashtag_check") + assert hasattr(client.tools, "atiktok_download") + assert hasattr(client.tools, "atwitter_download") + assert hasattr(client.tools, "afacebook_download") + assert hasattr(client.tools, "alinkedin_download") + assert hasattr(client.tools, "abluesky_download") + assert hasattr(client.tools, "agenerate_caption") + + +# ============================================================================ +# PROFILES RESOURCE METHODS TESTS +# ============================================================================ + + +class TestProfilesResourceMethods: + """Test ProfilesResource has all expected methods.""" + + def test_profiles_has_list_method(self, api_key: str): + """Test profiles resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "list") + assert callable(client.profiles.list) + + def test_profiles_has_get_method(self, api_key: str): + """Test profiles resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "get") + assert callable(client.profiles.get) + + def test_profiles_has_create_method(self, api_key: str): + """Test profiles resource has create method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "create") + assert callable(client.profiles.create) + + def test_profiles_has_update_method(self, api_key: str): + """Test profiles resource has update method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "update") + assert callable(client.profiles.update) + + def test_profiles_has_delete_method(self, api_key: str): + """Test profiles resource has delete method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "delete") + assert callable(client.profiles.delete) + + def test_profiles_has_async_methods(self, api_key: str): + """Test profiles resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "alist") + assert hasattr(client.profiles, "aget") + assert hasattr(client.profiles, "acreate") + assert hasattr(client.profiles, "aupdate") + assert hasattr(client.profiles, "adelete") + + +# ============================================================================ +# ACCOUNTS RESOURCE METHODS TESTS +# ============================================================================ + + +class TestAccountsResourceMethods: + """Test AccountsResource has all expected methods.""" + + def test_accounts_has_list_method(self, api_key: str): + """Test accounts resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "list") + assert callable(client.accounts.list) + + def test_accounts_has_get_method(self, api_key: str): + """Test accounts resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "get") + assert callable(client.accounts.get) + + def test_accounts_has_get_follower_stats_method(self, api_key: str): + """Test accounts resource has get_follower_stats method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "get_follower_stats") + assert callable(client.accounts.get_follower_stats) + + def test_accounts_has_async_methods(self, api_key: str): + """Test accounts resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "alist") + assert hasattr(client.accounts, "aget") + assert hasattr(client.accounts, "aget_follower_stats") + + +# ============================================================================ +# POSTS RESOURCE METHODS TESTS +# ============================================================================ + + +class TestPostsResourceMethods: + """Test PostsResource has all expected methods.""" + + def test_posts_has_list_method(self, api_key: str): + """Test posts resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "list") + assert callable(client.posts.list) + + def test_posts_has_get_method(self, api_key: str): + """Test posts resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "get") + assert callable(client.posts.get) + + def test_posts_has_create_method(self, api_key: str): + """Test posts resource has create method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "create") + assert callable(client.posts.create) + + def test_posts_has_update_method(self, api_key: str): + """Test posts resource has update method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "update") + assert callable(client.posts.update) + + def test_posts_has_delete_method(self, api_key: str): + """Test posts resource has delete method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "delete") + assert callable(client.posts.delete) + + def test_posts_has_retry_method(self, api_key: str): + """Test posts resource has retry method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "retry") + assert callable(client.posts.retry) + + def test_posts_has_bulk_upload_method(self, api_key: str): + """Test posts resource has bulk_upload method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "bulk_upload") + assert callable(client.posts.bulk_upload) + + def test_posts_has_async_methods(self, api_key: str): + """Test posts resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "alist") + assert hasattr(client.posts, "aget") + assert hasattr(client.posts, "acreate") + assert hasattr(client.posts, "aupdate") + assert hasattr(client.posts, "adelete") + assert hasattr(client.posts, "aretry") diff --git a/tests/test_integration.py b/tests/test_integration.py index 9176980..2e84a16 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -105,9 +105,9 @@ def test_list_posts(self, client: Late, mock_post: dict) -> None: result = client.posts.list() assert route.called - assert len(result["posts"]) == 1 - assert result["posts"][0]["_id"] == "post_123" - assert result["pagination"]["total"] == 1 + assert len(result.posts) == 1 + assert result.posts[0].field_id == "post_123" + assert result.pagination.total == 1 @respx.mock def test_list_posts_with_filters(self, client: Late, mock_post: dict) -> None: @@ -152,7 +152,7 @@ def test_get_post(self, client: Late, mock_post: dict) -> None: result = client.posts.get("post_123") assert route.called - assert result["post"]["_id"] == "post_123" + assert result.post.field_id == "post_123" @respx.mock def test_create_post_scheduled(self, client: Late, mock_post: dict) -> None: @@ -172,7 +172,7 @@ def test_create_post_scheduled(self, client: Late, mock_post: dict) -> None: ) assert route.called - assert result["message"] == "Post scheduled successfully" + assert result.message == "Post scheduled successfully" # Verify request payload request = route.calls[0].request @@ -296,7 +296,7 @@ def test_delete_post(self, client: Late) -> None: result = client.posts.delete("post_123") assert route.called - assert result["message"] == "Post deleted successfully" + assert result.message == "Post deleted successfully" @respx.mock def test_retry_post(self, client: Late, mock_post: dict) -> None: @@ -308,7 +308,7 @@ def test_retry_post(self, client: Late, mock_post: dict) -> None: result = client.posts.retry("post_123") assert route.called - assert result["message"] == "Retrying" + assert result.message == "Retrying" # ============================================================================= @@ -329,8 +329,8 @@ def test_list_profiles(self, client: Late, mock_profile: dict) -> None: result = client.profiles.list() assert route.called - assert len(result["profiles"]) == 1 - assert result["profiles"][0]["name"] == "Test Profile" + assert len(result.profiles) == 1 + assert result.profiles[0].name == "Test Profile" @respx.mock def test_get_profile(self, client: Late, mock_profile: dict) -> None: @@ -342,7 +342,7 @@ def test_get_profile(self, client: Late, mock_profile: dict) -> None: result = client.profiles.get("profile_123") assert route.called - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" @respx.mock def test_create_profile(self, client: Late, mock_profile: dict) -> None: @@ -398,7 +398,7 @@ def test_delete_profile(self, client: Late) -> None: result = client.profiles.delete("profile_123") assert route.called - assert result["message"] == "Profile deleted" + assert result.message == "Profile deleted" # ============================================================================= @@ -421,8 +421,8 @@ def test_list_accounts(self, client: Late, mock_account: dict) -> None: result = client.accounts.list() assert route.called - assert len(result["accounts"]) == 1 - assert result["accounts"][0]["platform"] == "twitter" + assert len(result.accounts) == 1 + assert result.accounts[0].platform == "twitter" @respx.mock def test_list_accounts_by_profile(self, client: Late, mock_account: dict) -> None: @@ -447,7 +447,7 @@ def test_get_account(self, client: Late, mock_account: dict) -> None: result = client.accounts.get("acc_123") assert route.called - assert result["account"]["_id"] == "acc_123" + assert result.account.field_id == "acc_123" @respx.mock def test_get_follower_stats(self, client: Late) -> None: @@ -500,8 +500,8 @@ def test_generate_upload_token(self, client: Late) -> None: result = client.media.generate_upload_token() assert route.called - assert result["token"] == "tok_123" - assert "uploadUrl" in result + assert result.token == "tok_123" + assert result.uploadUrl is not None @respx.mock def test_check_upload_token(self, client: Late) -> None: @@ -522,8 +522,8 @@ def test_check_upload_token(self, client: Late) -> None: assert route.called request = route.calls[0].request assert "token=tok_123" in str(request.url) - assert result["status"] == "completed" - assert len(result["files"]) == 1 + assert result.status.value == "completed" + assert len(result.files) == 1 # ============================================================================= @@ -592,7 +592,7 @@ def test_next_slot(self, client: Late) -> None: result = client.queue.next_slot() assert route.called - assert "nextSlot" in result + assert result.nextSlot is not None # ============================================================================= @@ -756,7 +756,7 @@ def test_list_users(self, client: Late) -> None: result = client.users.list() assert route.called - assert len(result["users"]) == 1 + assert len(result.users) == 1 @respx.mock def test_get_user(self, client: Late) -> None: @@ -770,7 +770,7 @@ def test_get_user(self, client: Late) -> None: result = client.users.get("user_123") assert route.called - assert result["user"]["_id"] == "user_123" + assert result.user.field_id == "user_123" # ============================================================================= @@ -865,7 +865,7 @@ async def test_async_list_posts(self, async_client: Late, mock_post: dict) -> No result = await async_client.posts.alist() assert route.called - assert len(result["posts"]) == 1 + assert len(result.posts) == 1 @respx.mock @pytest.mark.asyncio @@ -883,7 +883,7 @@ async def test_async_create_post(self, async_client: Late, mock_post: dict) -> N ) assert route.called - assert result["message"] == "Created" + assert result.message == "Created" @respx.mock @pytest.mark.asyncio @@ -905,15 +905,15 @@ async def test_async_profile_crud(self, async_client: Late, mock_profile: dict) async with async_client: # Create result = await async_client.profiles.acreate(name="Test") - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" # Update result = await async_client.profiles.aupdate("profile_123", name="Updated") - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" # Delete result = await async_client.profiles.adelete("profile_123") - assert result["message"] == "Deleted" + assert result.message == "Deleted" # ============================================================================= diff --git a/uv.lock b/uv.lock index c023bc1..ef65568 100644 --- a/uv.lock +++ b/uv.lock @@ -680,7 +680,7 @@ wheels = [ [[package]] name = "late-sdk" -version = "1.0.1" +version = "1.1.0" source = { editable = "." } dependencies = [ { name = "httpx" }, From a98b89b1264806bf7f6e1533fa5478198dcb406e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Mon, 15 Dec 2025 15:52:26 +0100 Subject: [PATCH 10/13] feat(mcp): Add is_draft parameter and centralized tool definitions - Add is_draft parameter to posts_create and posts_cross_post - Create tool_definitions.py as single source of truth for MCP params - Add script to generate MDX docs from definitions --- scripts/generate_mcp_docs.py | 35 +++++ src/late/mcp/server.py | 40 ++++-- src/late/mcp/tool_definitions.py | 220 +++++++++++++++++++++++++++++++ 3 files changed, 287 insertions(+), 8 deletions(-) create mode 100644 scripts/generate_mcp_docs.py create mode 100644 src/late/mcp/tool_definitions.py diff --git a/scripts/generate_mcp_docs.py b/scripts/generate_mcp_docs.py new file mode 100644 index 0000000..a5d19d8 --- /dev/null +++ b/scripts/generate_mcp_docs.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +""" +Generate MCP documentation from tool definitions. + +Usage: + python scripts/generate_mcp_docs.py + +This script generates MDX documentation from the centralized tool definitions +in src/late/mcp/tool_definitions.py +""" + +import sys +from pathlib import Path + +# Add src to path for imports +sys.path.insert(0, str(Path(__file__).parent.parent / "src")) + +from late.mcp.tool_definitions import generate_mdx_docs, TOOL_DEFINITIONS + + +def main(): + """Generate and print MDX documentation.""" + print("=" * 60) + print("MCP Tool Documentation (generated from tool_definitions.py)") + print("=" * 60) + print() + print(generate_mdx_docs()) + print() + print("=" * 60) + print("Copy the above into claude-mcp.mdx under '## Tool Reference'") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/src/late/mcp/server.py b/src/late/mcp/server.py index 0a28347..5956c01 100644 --- a/src/late/mcp/server.py +++ b/src/late/mcp/server.py @@ -304,6 +304,7 @@ def posts_get(post_id: str) -> str: def posts_create( content: str, platform: str, + is_draft: bool = False, publish_now: bool = False, schedule_minutes: int = 0, media_urls: str = "", @@ -312,13 +313,19 @@ def posts_create( """ Create a new social media post, optionally with media. + Scheduling behavior: + - is_draft=True: Save as draft (no scheduling, can edit later) + - publish_now=True: Publish immediately + - Neither: Schedule for schedule_minutes from now (default: 60 min) + Args: content: The post content/text platform: Target platform (twitter, instagram, linkedin, tiktok, bluesky, facebook, youtube, pinterest, threads) - publish_now: If True, publish immediately. If False, schedule for later. - schedule_minutes: Minutes from now to schedule (ignored if publish_now=True). Default 60 min. + is_draft: Save as draft without scheduling. Draft posts can be edited and scheduled later (default: False) + publish_now: Publish immediately instead of scheduling (default: False) + schedule_minutes: Minutes from now to schedule (ignored if publish_now=True or is_draft=True). Default 60 min. media_urls: Comma-separated URLs of media files to attach. Optional. - title: Optional title (for YouTube, Pinterest) + title: Optional title (required for YouTube, recommended for Pinterest) """ client = _get_client() @@ -357,7 +364,9 @@ def posts_create( media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items - if publish_now: + if is_draft: + params["is_draft"] = True + elif publish_now: params["publish_now"] = True else: minutes = schedule_minutes if schedule_minutes > 0 else 60 @@ -369,7 +378,9 @@ def posts_create( username = account.get("username") or account.get("name") or account["_id"] media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" - if publish_now: + if is_draft: + return f"šŸ“ Draft saved for {platform} (@{username}){media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" + elif publish_now: return f"āœ… Published to {platform} (@{username}){media_info}\nPost ID: {post.get('_id', 'N/A')}" else: scheduled = params["scheduled_for"].strftime("%Y-%m-%d %H:%M") @@ -393,16 +404,23 @@ def posts_publish_now(content: str, platform: str, media_urls: str = "") -> str: def posts_cross_post( content: str, platforms: str, + is_draft: bool = False, publish_now: bool = False, media_urls: str = "", ) -> str: """ Post the same content to multiple platforms at once. + Scheduling behavior: + - is_draft=True: Save as draft (no scheduling, can edit later) + - publish_now=True: Publish immediately + - Neither: Schedule for 1 hour from now + Args: content: The post content/text platforms: Comma-separated list of platforms (e.g., "twitter,linkedin,bluesky") - publish_now: If True, publish immediately. If False, schedule for 1 hour from now. + is_draft: Save as draft without scheduling (default: False) + publish_now: Publish immediately instead of scheduling (default: False) media_urls: Comma-separated URLs of media files to attach. Optional. """ client = _get_client() @@ -444,7 +462,9 @@ def posts_cross_post( media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items - if publish_now: + if is_draft: + params["is_draft"] = True + elif publish_now: params["publish_now"] = True else: params["scheduled_for"] = datetime.now() + timedelta(hours=1) @@ -454,7 +474,11 @@ def posts_cross_post( posted_to = [t["platform"] for t in platform_targets] media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" - result = f"āœ… {'Published' if publish_now else 'Scheduled'} to: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}" + + if is_draft: + result = f"šŸ“ Draft saved for: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" + else: + result = f"āœ… {'Published' if publish_now else 'Scheduled'} to: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}" if not_found: result += f"\nāš ļø Accounts not found for: {', '.join(not_found)}" diff --git a/src/late/mcp/tool_definitions.py b/src/late/mcp/tool_definitions.py new file mode 100644 index 0000000..7dcc7c0 --- /dev/null +++ b/src/late/mcp/tool_definitions.py @@ -0,0 +1,220 @@ +""" +Centralized tool definitions for MCP and documentation. + +This file is the single source of truth for tool parameters and descriptions. +Used by: +- MCP server (server.py) for tool definitions +- Documentation generation (can be exported to MDX) +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + + +@dataclass +class ParamDef: + """Definition of a tool parameter.""" + name: str + type: str + description: str + required: bool = False + default: Any = None + + def to_mdx_row(self) -> str: + """Generate MDX table row.""" + req = "Yes" if self.required else "No" + default_str = f"`{self.default}`" if self.default is not None else "-" + return f"| `{self.name}` | `{self.type}` | {self.description} | {req} | {default_str} |" + + +@dataclass +class ToolDef: + """Definition of a tool.""" + name: str + description: str + params: list[ParamDef] + + def to_mdx_section(self) -> str: + """Generate MDX documentation section.""" + lines = [ + f"### {self.name}", + "", + self.description, + "", + "| Parameter | Type | Description | Required | Default |", + "|-----------|------|-------------|----------|---------|", + ] + lines.extend(p.to_mdx_row() for p in self.params) + return "\n".join(lines) + + +# ============================================================================= +# POSTS TOOL DEFINITIONS +# ============================================================================= + +POSTS_CREATE_PARAMS = [ + ParamDef( + name="content", + type="str", + description="The post content/text", + required=True, + ), + ParamDef( + name="platform", + type="str", + description="Target platform: twitter, instagram, linkedin, tiktok, bluesky, facebook, youtube, pinterest, threads", + required=True, + ), + ParamDef( + name="is_draft", + type="bool", + description="Save as draft without scheduling. Draft posts can be edited and scheduled later", + required=False, + default=False, + ), + ParamDef( + name="publish_now", + type="bool", + description="Publish immediately instead of scheduling", + required=False, + default=False, + ), + ParamDef( + name="schedule_minutes", + type="int", + description="Minutes from now to schedule the post. Ignored if publish_now=True or is_draft=True", + required=False, + default=60, + ), + ParamDef( + name="media_urls", + type="str", + description="Comma-separated URLs of media files to attach (images, videos, GIFs)", + required=False, + default="", + ), + ParamDef( + name="title", + type="str", + description="Optional title (required for YouTube, recommended for Pinterest)", + required=False, + default="", + ), +] + +POSTS_CREATE = ToolDef( + name="posts_create", + description="""Create a new social media post. + +**Scheduling behavior:** +- `is_draft=True`: Save as draft (no scheduling, can edit later) +- `publish_now=True`: Publish immediately +- Neither: Schedule for `schedule_minutes` from now (default: 60 min)""", + params=POSTS_CREATE_PARAMS, +) + +POSTS_CROSS_POST_PARAMS = [ + ParamDef( + name="content", + type="str", + description="The post content/text", + required=True, + ), + ParamDef( + name="platforms", + type="str", + description="Comma-separated list of platforms (e.g., 'twitter,linkedin,bluesky')", + required=True, + ), + ParamDef( + name="is_draft", + type="bool", + description="Save as draft without scheduling", + required=False, + default=False, + ), + ParamDef( + name="publish_now", + type="bool", + description="Publish immediately instead of scheduling", + required=False, + default=False, + ), + ParamDef( + name="media_urls", + type="str", + description="Comma-separated URLs of media files to attach", + required=False, + default="", + ), +] + +POSTS_CROSS_POST = ToolDef( + name="posts_cross_post", + description="Post the same content to multiple platforms at once.", + params=POSTS_CROSS_POST_PARAMS, +) + +POSTS_LIST_PARAMS = [ + ParamDef( + name="status", + type="str", + description="Filter by status: draft, scheduled, published, failed. Empty for all", + required=False, + default="", + ), + ParamDef( + name="limit", + type="int", + description="Maximum number of posts to return", + required=False, + default=10, + ), +] + +POSTS_LIST = ToolDef( + name="posts_list", + description="List posts with optional filtering by status.", + params=POSTS_LIST_PARAMS, +) + +# ============================================================================= +# ALL TOOL DEFINITIONS +# ============================================================================= + +TOOL_DEFINITIONS = { + "posts_create": POSTS_CREATE, + "posts_cross_post": POSTS_CROSS_POST, + "posts_list": POSTS_LIST, +} + + +def generate_mdx_docs() -> str: + """Generate complete MDX documentation for all tools.""" + sections = [ + "## Tool Reference", + "", + "Detailed parameters for each MCP tool.", + "", + ] + for tool in TOOL_DEFINITIONS.values(): + sections.append(tool.to_mdx_section()) + sections.append("") + return "\n".join(sections) + + +def get_tool_docstring(tool_name: str) -> str: + """Get the docstring for a tool, formatted for MCP.""" + tool = TOOL_DEFINITIONS.get(tool_name) + if not tool: + return "" + + lines = [tool.description, "", "Args:"] + for param in tool.params: + req = " (required)" if param.required else "" + default = f" (default: {param.default})" if param.default is not None and not param.required else "" + lines.append(f" {param.name}: {param.description}{req}{default}") + + return "\n".join(lines) From e2db7a4aa1eee1eb6391705892cb1428207b5dfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Mon, 15 Dec 2025 15:54:58 +0100 Subject: [PATCH 11/13] fix: Move Callable imports to TYPE_CHECKING block and fix trailing whitespace --- src/late/models/__init__.py | 92 ++++++++++++++-------------- src/late/models/_generated/models.py | 74 +++++++++++----------- src/late/resources/media.py | 4 +- src/late/resources/tools.py | 2 +- src/late/upload/protocols.py | 5 +- src/late/upload/smart.py | 4 +- src/late/upload/vercel/client.py | 5 +- src/late/upload/vercel/uploader.py | 5 +- test_upload.py | 82 +++++++++++++++++++++++++ 9 files changed, 181 insertions(+), 92 deletions(-) create mode 100644 test_upload.py diff --git a/src/late/models/__init__.py b/src/late/models/__init__.py index 9a2693c..2795ce7 100644 --- a/src/late/models/__init__.py +++ b/src/late/models/__init__.py @@ -10,70 +10,70 @@ # Import specific commonly used models for convenience from ._generated.models import ( - # Core models - Post, - MediaItem, - PlatformTarget, - Profile, - SocialAccount, - QueueSlot, - QueueSchedule, - # Enums - Status, - Type, - Visibility, - # Platform-specific - TikTokSettings, - TwitterPlatformData, - InstagramPlatformData, + AccountGetResponse, + # Accounts responses + AccountsListResponse, + AccountWithFollowerStats, + CaptionResponse, + DownloadFormat, + # Tools responses + DownloadResponse, + ErrorResponse, FacebookPlatformData, + FollowerStatsResponse, + HashtagCheckResponse, + HashtagInfo, + InstagramPlatformData, LinkedInPlatformData, - YouTubePlatformData, - PinterestPlatformData, + MediaItem, + # Media responses + MediaUploadResponse, # Base responses Pagination, - ErrorResponse, - # Posts responses - PostsListResponse, - PostGetResponse, + PinterestPlatformData, + PlatformTarget, + # Core models + Post, PostCreateResponse, - PostUpdateResponse, PostDeleteResponse, + PostGetResponse, PostRetryResponse, + # Posts responses + PostsListResponse, + PostUpdateResponse, + Profile, + ProfileCreateResponse, + ProfileDeleteResponse, + ProfileGetResponse, # Profiles responses ProfilesListResponse, - ProfileGetResponse, - ProfileCreateResponse, ProfileUpdateResponse, - ProfileDeleteResponse, - # Accounts responses - AccountsListResponse, - AccountGetResponse, - FollowerStatsResponse, - AccountWithFollowerStats, - # Media responses - MediaUploadResponse, - UploadedFile, - UploadTokenResponse, - UploadTokenStatusResponse, + QueueDeleteResponse, + QueueNextSlotResponse, + QueuePreviewResponse, + QueueSchedule, + QueueSlot, # Queue responses QueueSlotsResponse, QueueUpdateResponse, - QueueDeleteResponse, - QueuePreviewResponse, - QueueNextSlotResponse, - # Tools responses - DownloadResponse, - DownloadFormat, + SocialAccount, + # Enums + Status, + # Platform-specific + TikTokSettings, TranscriptResponse, TranscriptSegment, - HashtagCheckResponse, - HashtagInfo, - CaptionResponse, + TwitterPlatformData, + Type, + UploadedFile, + UploadTokenResponse, + UploadTokenStatusResponse, # Users responses User, - UsersListResponse, UserGetResponse, + UsersListResponse, + Visibility, + YouTubePlatformData, ) # SDK-specific models (not from OpenAPI) diff --git a/src/late/models/_generated/models.py b/src/late/models/_generated/models.py index cfc12d2..b260cdf 100644 --- a/src/late/models/_generated/models.py +++ b/src/late/models/_generated/models.py @@ -5,14 +5,14 @@ from __future__ import annotations from enum import Enum -from typing import Annotated, Any, Dict, List +from typing import Annotated, Any from pydantic import AnyUrl, AwareDatetime, BaseModel, Field class ErrorResponse(BaseModel): error: str | None = None - details: Dict[str, Any] | None = None + details: dict[str, Any] | None = None class Type(Enum): @@ -80,11 +80,11 @@ class Visibility(Enum): class ThreadItem(BaseModel): content: str | None = None - mediaItems: List[MediaItem] | None = None + mediaItems: list[MediaItem] | None = None class TwitterPlatformData(BaseModel): - threadItems: List[ThreadItem] | None = None + threadItems: list[ThreadItem] | None = None """ Sequence of tweets in a thread. First item is the root tweet. """ @@ -101,7 +101,7 @@ class ThreadsPlatformData(BaseModel): """ - threadItems: List[ThreadItem] | None = None + threadItems: list[ThreadItem] | None = None """ Sequence of posts in a Threads thread (root then replies in order). """ @@ -190,7 +190,7 @@ class InstagramPlatformData(BaseModel): """ For Reels only. When true (default), the Reel appears on both the Reels tab and your main profile feed. Set to false to post to the Reels tab only. """ - collaborators: List[str] | None = None + collaborators: list[str] | None = None """ Up to 3 Instagram usernames to invite as collaborators (feed/Reels only) """ @@ -198,7 +198,7 @@ class InstagramPlatformData(BaseModel): """ Optional first comment to add after the post is created (not applied to Stories) """ - userTags: List[UserTag] | None = None + userTags: list[UserTag] | None = None """ Tag Instagram users in photos by username and position coordinates. Only works for single image posts and the first image of carousel posts. Not supported for stories or videos. """ @@ -453,7 +453,7 @@ class QueueSchedule(BaseModel): """ IANA timezone (e.g., America/New_York) """ - slots: List[QueueSlot] | None = None + slots: list[QueueSlot] | None = None active: bool | None = None """ Whether the queue is active @@ -615,7 +615,7 @@ class VideoClipJobCompleted(BaseModel): job_id: Annotated[str | None, Field(examples=["abc123def456"])] = None status: Annotated[Status3 | None, Field(examples=["completed"])] = None total_clips: Annotated[int | None, Field(examples=[5])] = None - clips: List[VideoClip] | None = None + clips: list[VideoClip] | None = None class Status4(Enum): @@ -708,7 +708,7 @@ class AnalyticsSinglePostResponse(BaseModel): scheduledFor: AwareDatetime | None = None publishedAt: AwareDatetime | None = None analytics: PostAnalytics | None = None - platformAnalytics: List[PlatformAnalytics] | None = None + platformAnalytics: list[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None @@ -728,20 +728,20 @@ class Post1(BaseModel): publishedAt: AwareDatetime | None = None status: str | None = None analytics: PostAnalytics | None = None - platforms: List[PlatformAnalytics] | None = None + platforms: list[PlatformAnalytics] | None = None platform: str | None = None platformPostUrl: AnyUrl | None = None isExternal: bool | None = None thumbnailUrl: AnyUrl | None = None mediaType: MediaType1 | None = None - mediaItems: List[MediaItem] | None = None + mediaItems: list[MediaItem] | None = None class AnalyticsListResponse(BaseModel): overview: AnalyticsOverview | None = None - posts: List[Post1] | None = None + posts: list[Post1] | None = None pagination: Pagination | None = None - accounts: List[SocialAccount] | None = None + accounts: list[SocialAccount] | None = None """ Connected social accounts (followerCount and followersLastUpdated only included if user has analytics add-on) """ @@ -756,7 +756,7 @@ class PostDeleteResponse(BaseModel): class ProfilesListResponse(BaseModel): - profiles: List[Profile] | None = None + profiles: list[Profile] | None = None class ProfileGetResponse(BaseModel): @@ -778,7 +778,7 @@ class ProfileDeleteResponse(BaseModel): class AccountsListResponse(BaseModel): - accounts: List[SocialAccount] | None = None + accounts: list[SocialAccount] | None = None hasAnalyticsAccess: bool | None = None """ Whether user has analytics add-on access @@ -801,7 +801,7 @@ class Aggregation(Enum): class FollowerStatsResponse(BaseModel): - accounts: List[AccountWithFollowerStats] | None = None + accounts: list[AccountWithFollowerStats] | None = None dateRange: DateRange | None = None aggregation: Aggregation | None = None @@ -821,7 +821,7 @@ class UploadedFile(BaseModel): class MediaUploadResponse(BaseModel): - files: List[UploadedFile] | None = None + files: list[UploadedFile] | None = None class Status5(Enum): @@ -840,7 +840,7 @@ class UploadTokenResponse(BaseModel): class UploadTokenStatusResponse(BaseModel): token: str | None = None status: Status5 | None = None - files: List[UploadedFile] | None = None + files: list[UploadedFile] | None = None createdAt: AwareDatetime | None = None expiresAt: AwareDatetime | None = None completedAt: AwareDatetime | None = None @@ -849,13 +849,13 @@ class UploadTokenStatusResponse(BaseModel): class QueueSlotsResponse(BaseModel): exists: bool | None = None schedule: QueueSchedule | None = None - nextSlots: List[AwareDatetime] | None = None + nextSlots: list[AwareDatetime] | None = None class QueueUpdateResponse(BaseModel): success: bool | None = None schedule: QueueSchedule | None = None - nextSlots: List[AwareDatetime] | None = None + nextSlots: list[AwareDatetime] | None = None reshuffledCount: int | None = None @@ -867,7 +867,7 @@ class QueueDeleteResponse(BaseModel): class QueuePreviewResponse(BaseModel): profileId: str | None = None count: int | None = None - slots: List[AwareDatetime] | None = None + slots: list[AwareDatetime] | None = None class QueueNextSlotResponse(BaseModel): @@ -889,7 +889,7 @@ class DownloadResponse(BaseModel): title: str | None = None thumbnail: AnyUrl | None = None duration: int | None = None - formats: List[DownloadFormat] | None = None + formats: list[DownloadFormat] | None = None class TranscriptSegment(BaseModel): @@ -900,7 +900,7 @@ class TranscriptSegment(BaseModel): class TranscriptResponse(BaseModel): transcript: str | None = None - segments: List[TranscriptSegment] | None = None + segments: list[TranscriptSegment] | None = None language: str | None = None @@ -918,7 +918,7 @@ class HashtagInfo(BaseModel): class HashtagCheckResponse(BaseModel): - hashtags: List[HashtagInfo] | None = None + hashtags: list[HashtagInfo] | None = None class CaptionResponse(BaseModel): @@ -934,7 +934,7 @@ class User(BaseModel): class UsersListResponse(BaseModel): - users: List[User] | None = None + users: list[User] | None = None class UserGetResponse(BaseModel): @@ -960,7 +960,7 @@ class VideoClipJob(BaseModel): ] = None videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None status: Annotated[Status1 | None, Field(examples=["completed"])] = None - clips: List[VideoClip] | None = None + clips: list[VideoClip] | None = None totalClips: Annotated[int | None, Field(examples=[5])] = None error: Annotated[str | None, Field(examples=[None])] = None createdAt: Annotated[ @@ -978,7 +978,7 @@ class PlatformTarget(BaseModel): """ accountId: str | None = None customContent: str | None = None - customMedia: List[MediaItem] | None = None + customMedia: list[MediaItem] | None = None scheduledFor: AwareDatetime | None = None """ Optional per-platform scheduled time override (uses post.scheduledFor when omitted) @@ -1014,8 +1014,8 @@ class PlatformTarget(BaseModel): ] = None """ Public URL of the published post on the platform. - Populated after successful publish. For immediate posts (publishNow=true), - this is included in the response. For scheduled posts, fetch the post + Populated after successful publish. For immediate posts (publishNow=true), + this is included in the response. For scheduled posts, fetch the post via GET /v1/posts/{postId} after the scheduled time. """ @@ -1034,12 +1034,12 @@ class Post(BaseModel): """ content: str | None = None - mediaItems: List[MediaItem] | None = None - platforms: List[PlatformTarget] | None = None + mediaItems: list[MediaItem] | None = None + platforms: list[PlatformTarget] | None = None scheduledFor: AwareDatetime | None = None timezone: str | None = None status: Status | None = None - tags: List[str] | None = None + tags: list[str] | None = None """ YouTube tag constraints when targeting YouTube: - No count cap; duplicates removed. @@ -1047,10 +1047,10 @@ class Post(BaseModel): - Combined characters across all tags ≤ 500. """ - hashtags: List[str] | None = None - mentions: List[str] | None = None + hashtags: list[str] | None = None + mentions: list[str] | None = None visibility: Visibility | None = None - metadata: Dict[str, Any] | None = None + metadata: dict[str, Any] | None = None queuedFromProfile: str | None = None """ Profile ID if the post was scheduled via the queue @@ -1060,7 +1060,7 @@ class Post(BaseModel): class PostsListResponse(BaseModel): - posts: List[Post] | None = None + posts: list[Post] | None = None pagination: Pagination | None = None diff --git a/src/late/resources/media.py b/src/late/resources/media.py index 5d48d79..feea6c5 100644 --- a/src/late/resources/media.py +++ b/src/late/resources/media.py @@ -10,7 +10,7 @@ import mimetypes from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable +from typing import TYPE_CHECKING from late.models import ( MediaLargeUploadResponse, @@ -22,6 +22,8 @@ from .base import BaseResource if TYPE_CHECKING: + from collections.abc import Callable + from late.upload import UploadProgress diff --git a/src/late/resources/tools.py b/src/late/resources/tools.py index 260cd26..126f386 100644 --- a/src/late/resources/tools.py +++ b/src/late/resources/tools.py @@ -4,7 +4,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING from late.models import ( CaptionResponse, diff --git a/src/late/upload/protocols.py b/src/late/upload/protocols.py index 8f777c4..ea972fd 100644 --- a/src/late/upload/protocols.py +++ b/src/late/upload/protocols.py @@ -9,10 +9,7 @@ from dataclasses import dataclass, field from pathlib import Path -from typing import TYPE_CHECKING, BinaryIO, Protocol, runtime_checkable - -if TYPE_CHECKING: - from collections.abc import AsyncIterator, Iterator +from typing import BinaryIO, Protocol, runtime_checkable @dataclass diff --git a/src/late/upload/smart.py b/src/late/upload/smart.py index c10cc76..8d3a98e 100644 --- a/src/late/upload/smart.py +++ b/src/late/upload/smart.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import TYPE_CHECKING, Callable +from typing import TYPE_CHECKING from .config import UploadConfig from .direct import DirectUploader @@ -19,6 +19,8 @@ ) if TYPE_CHECKING: + from collections.abc import Callable + from late.client.base import BaseClient diff --git a/src/late/upload/vercel/client.py b/src/late/upload/vercel/client.py index f568357..bb5e876 100644 --- a/src/late/upload/vercel/client.py +++ b/src/late/upload/vercel/client.py @@ -6,11 +6,14 @@ from __future__ import annotations -from typing import Callable +from typing import TYPE_CHECKING from late.upload.protocols import UploadError, UploadFile, UploadProgress, UploadResult from late.upload.utils import read_file_content +if TYPE_CHECKING: + from collections.abc import Callable + class VercelBlobClient: """ diff --git a/src/late/upload/vercel/uploader.py b/src/late/upload/vercel/uploader.py index 36cc8bc..6ef52e8 100644 --- a/src/late/upload/vercel/uploader.py +++ b/src/late/upload/vercel/uploader.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Callable +from typing import TYPE_CHECKING from late.upload.config import UploadConfig from late.upload.protocols import ( @@ -19,6 +19,9 @@ from .client import VercelBlobClient +if TYPE_CHECKING: + from collections.abc import Callable + class VercelBlobUploader: """ diff --git a/test_upload.py b/test_upload.py new file mode 100644 index 0000000..3e718fa --- /dev/null +++ b/test_upload.py @@ -0,0 +1,82 @@ +""" +Test script for upload module. +""" + +import sys +from pathlib import Path + +# Add src to path +sys.path.insert(0, str(Path(__file__).parent / "src")) + +# Test imports +print("Testing imports...") +from late import Late +from late.upload import ( + SmartUploader, + DirectUploader, + VercelBlobUploader, + UploadFile, + UploadResult, + UploadProgress, + LargeFileError, +) +print("āœ“ All imports successful") + +# Test files +SMALL_IMAGE = "/Users/carlos/Documents/WebDev/Freelance/miquel-palet/Schedule-Posts-API/app/apple-icon.png" +LARGE_VIDEO = "/Users/carlos/Documents/Video recordings/screen-studio/Built-in Retina Display.mp4" + +# Credentials +LATE_API_KEY = "sk_fb144cafa04c50eecb9102bb240657d4871f6fc5fd43eb9c22e4b869ff030c7e" +LATE_BASE_URL = "https://getlate.dev/api" +VERCEL_BLOB_TOKEN = "vercel_blob_rw_qf6opyLdArRJW0lJ_GBJHZ2I9KR0O1zo8iq31z96CrCAnUR" + +# Verify files exist +for path, name in [(SMALL_IMAGE, "Small image"), (LARGE_VIDEO, "Large video")]: + if Path(path).exists(): + size = Path(path).stat().st_size + print(f"āœ“ {name}: {size:,} bytes ({size / (1024*1024):.1f} MB)") + else: + print(f"āœ— {name} not found: {path}") + +# Create client +client = Late(api_key=LATE_API_KEY, base_url=LATE_BASE_URL) +print(f"\nāœ“ Late client created (base_url: {LATE_BASE_URL})") + +# Test 1: Direct upload (small file) +print("\n" + "="*60) +print("TEST 1: Direct upload (small file < 4MB)") +print("="*60) +try: + result = client.media.upload(SMALL_IMAGE) + print(f"āœ“ Upload successful!") + print(f" URL: {result['files'][0]['url']}") +except Exception as e: + print(f"āœ— Upload failed: {e}") + +# Test 2: Vercel Blob upload (large file) +print("\n" + "="*60) +print("TEST 2: Vercel Blob upload (large file ~278MB)") +print("="*60) + +def progress_callback(p: UploadProgress): + pct = p.percentage + bar = "ā–ˆ" * int(pct / 5) + "ā–‘" * (20 - int(pct / 5)) + print(f" [{bar}] {pct:.1f}%", end="\r") + +try: + result = client.media.upload_large( + LARGE_VIDEO, + vercel_token=VERCEL_BLOB_TOKEN, + on_progress=progress_callback + ) + print(f"\nāœ“ Vercel Blob upload successful!") + print(f" URL: {result['url']}") +except Exception as e: + print(f"\nāœ— Vercel Blob upload failed: {e}") + import traceback + traceback.print_exc() + +print("\n" + "="*60) +print("All tests completed!") +print("="*60) From 5b5abba8652c4860a4037357caa377d811932568 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Mon, 15 Dec 2025 16:03:13 +0100 Subject: [PATCH 12/13] fix: Fix mypy errors and format code --- src/late/ai/content_generator.py | 4 +- src/late/ai/protocols.py | 4 +- src/late/ai/providers/openai.py | 4 +- src/late/client/base.py | 20 ++++++-- src/late/client/late_client.py | 4 +- src/late/mcp/server.py | 75 +++++++++++++++++++++-------- src/late/mcp/tool_definitions.py | 8 ++- src/late/pipelines/csv_scheduler.py | 5 +- src/late/resources/base.py | 4 +- src/late/resources/media.py | 11 ++++- src/late/resources/posts.py | 8 ++- src/late/resources/queue.py | 4 +- src/late/resources/tools.py | 24 ++++++--- src/late/upload/config.py | 48 ++++++++++-------- src/late/upload/direct.py | 3 +- src/late/upload/protocols.py | 11 +++-- src/late/upload/smart.py | 3 +- src/late/upload/utils.py | 6 ++- src/late/upload/vercel/client.py | 30 +++++++----- 19 files changed, 184 insertions(+), 92 deletions(-) diff --git a/src/late/ai/content_generator.py b/src/late/ai/content_generator.py index 19d9578..4d77e63 100644 --- a/src/late/ai/content_generator.py +++ b/src/late/ai/content_generator.py @@ -97,9 +97,7 @@ async def agenerate(self, request: GenerateRequest) -> GenerateResponse: """Generate content asynchronously.""" return await self._provider.agenerate(request) - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" if not isinstance(self._provider, StreamingAIProvider): raise NotImplementedError( diff --git a/src/late/ai/protocols.py b/src/late/ai/protocols.py index 3580c2e..ba727f9 100644 --- a/src/late/ai/protocols.py +++ b/src/late/ai/protocols.py @@ -70,8 +70,6 @@ class StreamingAIProvider(Protocol): """Protocol for streaming content generation.""" @abstractmethod - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" ... diff --git a/src/late/ai/providers/openai.py b/src/late/ai/providers/openai.py index ff7a463..2ffc8f0 100644 --- a/src/late/ai/providers/openai.py +++ b/src/late/ai/providers/openai.py @@ -142,9 +142,7 @@ async def agenerate(self, request: GenerateRequest) -> GenerateResponse: finish_reason=choice.finish_reason, ) - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" stream = await self._async_client.chat.completions.create( model=self._model, diff --git a/src/late/client/base.py b/src/late/client/base.py index 74f2e6d..787e23c 100644 --- a/src/late/client/base.py +++ b/src/late/client/base.py @@ -206,10 +206,14 @@ def _post( headers=headers, timeout=self.timeout, ) as client: - return self._request_with_retry(client, "POST", path, files=files, params=params) + return self._request_with_retry( + client, "POST", path, files=files, params=params + ) with self._sync_client() as client: - return self._request_with_retry(client, "POST", path, json=data, params=params) + return self._request_with_retry( + client, "POST", path, json=data, params=params + ) def _put( self, @@ -312,10 +316,14 @@ async def _apost( headers=headers, timeout=self.timeout, ) as client: - return await self._arequest_with_retry(client, "POST", path, files=files, params=params) + return await self._arequest_with_retry( + client, "POST", path, files=files, params=params + ) async with self._async_client() as client: - return await self._arequest_with_retry(client, "POST", path, json=data, params=params) + return await self._arequest_with_retry( + client, "POST", path, json=data, params=params + ) async def _aput( self, @@ -333,4 +341,6 @@ async def _adelete( ) -> dict[str, Any]: """Make an async DELETE request.""" async with self._async_client() as client: - return await self._arequest_with_retry(client, "DELETE", path, params=params) + return await self._arequest_with_retry( + client, "DELETE", path, params=params + ) diff --git a/src/late/client/late_client.py b/src/late/client/late_client.py index 7331c8a..7a343f8 100644 --- a/src/late/client/late_client.py +++ b/src/late/client/late_client.py @@ -59,7 +59,9 @@ def __init__( timeout: Request timeout in seconds max_retries: Maximum retries for failed requests """ - super().__init__(api_key, base_url=base_url, timeout=timeout, max_retries=max_retries) + super().__init__( + api_key, base_url=base_url, timeout=timeout, max_retries=max_retries + ) # Initialize resources self.posts = PostsResource(self) diff --git a/src/late/mcp/server.py b/src/late/mcp/server.py index 5956c01..00d5958 100644 --- a/src/late/mcp/server.py +++ b/src/late/mcp/server.py @@ -31,7 +31,9 @@ from late import Late, MediaType, PostStatus # Initialize MCP server -mcp = FastMCP("Late", instructions=""" +mcp = FastMCP( + "Late", + instructions=""" Late API server for scheduling social media posts. Available tools are prefixed by resource: @@ -39,7 +41,8 @@ - profiles_* : Manage profiles (groups of accounts) - posts_* : Create, list, update, delete posts - media_* : Upload images and videos -""") +""", +) def _get_client() -> Late: @@ -56,6 +59,7 @@ def _get_client() -> Late: # ACCOUNTS # ============================================================================ + @mcp.tool() def accounts_list() -> str: """ @@ -104,6 +108,7 @@ def accounts_get(platform: str) -> str: # PROFILES # ============================================================================ + @mcp.tool() def profiles_list() -> str: """ @@ -236,6 +241,7 @@ def profiles_delete(profile_id: str) -> str: # POSTS # ============================================================================ + @mcp.tool() def posts_list(status: str = "", limit: int = 10) -> str: """ @@ -258,7 +264,11 @@ def posts_list(status: str = "", limit: int = 10) -> str: lines = [f"Found {len(posts)} post(s):\n"] for post in posts: - content_preview = post["content"][:60] + "..." if len(post["content"]) > 60 else post["content"] + content_preview = ( + post["content"][:60] + "..." + if len(post["content"]) > 60 + else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) lines.append(f"- [{post['status']}] {content_preview}") lines.append(f" Platforms: {platforms} | ID: {post['_id']}") @@ -278,7 +288,9 @@ def posts_get(post_id: str) -> str: response = client.posts.get(post_id) post = response.get("post", response) - content_preview = post["content"][:100] + "..." if len(post["content"]) > 100 else post["content"] + content_preview = ( + post["content"][:100] + "..." if len(post["content"]) > 100 else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) lines = [ @@ -342,10 +354,12 @@ def posts_create( # Build request params = { "content": content, - "platforms": [{ - "platform": account["platform"], - "accountId": account["_id"], - }], + "platforms": [ + { + "platform": account["platform"], + "accountId": account["_id"], + } + ], } if title: @@ -357,7 +371,9 @@ def posts_create( media_items = [] for url in urls: media_type: MediaType | str = MediaType.IMAGE - if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): + if any( + ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"] + ): media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): media_type = MediaType.GIF @@ -376,7 +392,11 @@ def posts_create( post = response.get("post", {}) username = account.get("username") or account.get("name") or account["_id"] - media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" + media_info = ( + f" with {len(params.get('media_items', []))} media file(s)" + if params.get("media_items") + else "" + ) if is_draft: return f"šŸ“ Draft saved for {platform} (@{username}){media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" @@ -397,7 +417,9 @@ def posts_publish_now(content: str, platform: str, media_urls: str = "") -> str: platform: Target platform (twitter, instagram, linkedin, tiktok, bluesky, etc.) media_urls: Comma-separated URLs of media files to attach. Optional. """ - return posts_create(content=content, platform=platform, publish_now=True, media_urls=media_urls) + return posts_create( + content=content, platform=platform, publish_now=True, media_urls=media_urls + ) @mcp.tool() @@ -434,10 +456,12 @@ def posts_cross_post( for platform in target_platforms: matching = [a for a in accounts if a["platform"].lower() == platform] if matching: - platform_targets.append({ - "platform": matching[0]["platform"], - "accountId": matching[0]["_id"], - }) + platform_targets.append( + { + "platform": matching[0]["platform"], + "accountId": matching[0]["_id"], + } + ) else: not_found.append(platform) @@ -455,7 +479,9 @@ def posts_cross_post( media_items = [] for url in urls: media_type: MediaType | str = MediaType.IMAGE - if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): + if any( + ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"] + ): media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): media_type = MediaType.GIF @@ -473,7 +499,11 @@ def posts_cross_post( post = response.get("post", {}) posted_to = [t["platform"] for t in platform_targets] - media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" + media_info = ( + f" with {len(params.get('media_items', []))} media file(s)" + if params.get("media_items") + else "" + ) if is_draft: result = f"šŸ“ Draft saved for: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" @@ -580,7 +610,11 @@ def posts_list_failed(limit: int = 10) -> str: lines = [f"Found {len(posts)} failed post(s):\n"] for post in posts: - content_preview = post["content"][:50] + "..." if len(post["content"]) > 50 else post["content"] + content_preview = ( + post["content"][:50] + "..." + if len(post["content"]) > 50 + else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) error = post.get("error", "Unknown error") lines.append(f"- {content_preview}") @@ -627,6 +661,7 @@ def posts_retry_all_failed() -> str: # MEDIA UPLOAD # ============================================================================ + @mcp.tool() def media_generate_upload_link() -> str: """ @@ -715,7 +750,9 @@ def media_check_upload_status(token: str) -> str: lines.append(f" Size: {f.get('size', 0) / 1024:.1f} KB") lines.append("") - lines.append("\nšŸ“ You can now create a post with these media URLs using posts_create with the media_urls parameter.") + lines.append( + "\nšŸ“ You can now create a post with these media URLs using posts_create with the media_urls parameter." + ) lines.append(f"\nMedia URLs: {','.join(media_urls)}") return "\n".join(lines) diff --git a/src/late/mcp/tool_definitions.py b/src/late/mcp/tool_definitions.py index 7dcc7c0..5202c17 100644 --- a/src/late/mcp/tool_definitions.py +++ b/src/late/mcp/tool_definitions.py @@ -16,6 +16,7 @@ @dataclass class ParamDef: """Definition of a tool parameter.""" + name: str type: str description: str @@ -32,6 +33,7 @@ def to_mdx_row(self) -> str: @dataclass class ToolDef: """Definition of a tool.""" + name: str description: str params: list[ParamDef] @@ -214,7 +216,11 @@ def get_tool_docstring(tool_name: str) -> str: lines = [tool.description, "", "Args:"] for param in tool.params: req = " (required)" if param.required else "" - default = f" (default: {param.default})" if param.default is not None and not param.required else "" + default = ( + f" (default: {param.default})" + if param.default is not None and not param.required + else "" + ) lines.append(f" {param.name}: {param.description}{req}{default}") return "\n".join(lines) diff --git a/src/late/pipelines/csv_scheduler.py b/src/late/pipelines/csv_scheduler.py index 6434ba5..7023950 100644 --- a/src/late/pipelines/csv_scheduler.py +++ b/src/late/pipelines/csv_scheduler.py @@ -97,7 +97,10 @@ def _build_payload(self, row: dict[str, str]) -> dict[str, Any]: if row.get("media_url"): payload["mediaItems"] = [ - {"type": row.get("media_type", MediaType.IMAGE), "url": row["media_url"]} + { + "type": row.get("media_type", MediaType.IMAGE), + "url": row["media_url"], + } ] if row.get("tags"): diff --git a/src/late/resources/base.py b/src/late/resources/base.py index dd2f893..436bc7b 100644 --- a/src/late/resources/base.py +++ b/src/late/resources/base.py @@ -51,9 +51,7 @@ def _build_params(self, **kwargs: Any) -> dict[str, Any]: Returns: Dictionary with non-None values and camelCase keys """ - return { - _to_camel_case(k): v for k, v in kwargs.items() if v is not None - } + return {_to_camel_case(k): v for k, v in kwargs.items() if v is not None} def _build_payload(self, **kwargs: Any) -> dict[str, Any]: """ diff --git a/src/late/resources/media.py b/src/late/resources/media.py index feea6c5..d856aad 100644 --- a/src/late/resources/media.py +++ b/src/late/resources/media.py @@ -60,6 +60,7 @@ def _check_file_size(self, file_path: Path) -> int: size = file_path.stat().st_size if size > DIRECT_UPLOAD_MAX_SIZE: from late.upload import LargeFileError + raise LargeFileError(size, DIRECT_UPLOAD_MAX_SIZE) return size @@ -147,6 +148,7 @@ def upload_bytes( """ if len(content) > DIRECT_UPLOAD_MAX_SIZE: from late.upload import LargeFileError + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) if mime_type is None: @@ -314,7 +316,9 @@ async def aupload(self, file_path: str | Path) -> MediaUploadResponse: ) return MediaUploadResponse.model_validate(data) - async def aupload_multiple(self, file_paths: list[str | Path]) -> MediaUploadResponse: + async def aupload_multiple( + self, file_paths: list[str | Path] + ) -> MediaUploadResponse: """Upload multiple media files asynchronously (each < 4MB).""" files_list = [] for file_path in file_paths: @@ -338,6 +342,7 @@ async def aupload_bytes( """Upload media from bytes asynchronously (max 4MB).""" if len(content) > DIRECT_UPLOAD_MAX_SIZE: from late.upload import LargeFileError + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) if mime_type is None: @@ -432,5 +437,7 @@ async def agenerate_upload_token(self) -> UploadTokenResponse: async def acheck_upload_token(self, token: str) -> UploadTokenStatusResponse: """Check the status of an upload token asynchronously.""" - data = await self._client._aget(self._path("upload-token"), params={"token": token}) + data = await self._client._aget( + self._path("upload-token"), params={"token": token} + ) return UploadTokenStatusResponse.model_validate(data) diff --git a/src/late/resources/posts.py b/src/late/resources/posts.py index b9ad606..31a412c 100644 --- a/src/late/resources/posts.py +++ b/src/late/resources/posts.py @@ -164,7 +164,9 @@ def create( tags=tags, hashtags=hashtags, mentions=mentions, - crossposting_enabled=crossposting_enabled if not crossposting_enabled else None, + crossposting_enabled=crossposting_enabled + if not crossposting_enabled + else None, metadata=metadata, tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, @@ -351,7 +353,9 @@ async def acreate( tags=tags, hashtags=hashtags, mentions=mentions, - crossposting_enabled=crossposting_enabled if not crossposting_enabled else None, + crossposting_enabled=crossposting_enabled + if not crossposting_enabled + else None, metadata=metadata, tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, diff --git a/src/late/resources/queue.py b/src/late/resources/queue.py index bbcc3e0..3e19def 100644 --- a/src/late/resources/queue.py +++ b/src/late/resources/queue.py @@ -170,7 +170,9 @@ async def apreview(self, *, profile_id: str | None = None) -> QueuePreviewRespon data = await self._client._aget(self._path("preview"), params=params or None) return QueuePreviewResponse.model_validate(data) - async def anext_slot(self, *, profile_id: str | None = None) -> QueueNextSlotResponse: + async def anext_slot( + self, *, profile_id: str | None = None + ) -> QueueNextSlotResponse: """Get next available slot asynchronously.""" params = self._build_params(profile_id=profile_id) data = await self._client._aget(self._path("next-slot"), params=params or None) diff --git a/src/late/resources/tools.py b/src/late/resources/tools.py index 126f386..cdf1376 100644 --- a/src/late/resources/tools.py +++ b/src/late/resources/tools.py @@ -100,7 +100,9 @@ def instagram_download(self, url: str) -> DownloadResponse: Returns: DownloadResponse with download information """ - data = self._client._get(self._path("instagram", "download"), params={"url": url}) + data = self._client._get( + self._path("instagram", "download"), params={"url": url} + ) return DownloadResponse.model_validate(data) def instagram_hashtag_check(self, hashtags: list[str]) -> HashtagCheckResponse: @@ -174,7 +176,9 @@ def facebook_download(self, url: str) -> DownloadResponse: Returns: DownloadResponse with download information """ - data = self._client._get(self._path("facebook", "download"), params={"url": url}) + data = self._client._get( + self._path("facebook", "download"), params={"url": url} + ) return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- @@ -191,7 +195,9 @@ def linkedin_download(self, url: str) -> DownloadResponse: Returns: DownloadResponse with download information """ - data = self._client._get(self._path("linkedin", "download"), params={"url": url}) + data = self._client._get( + self._path("linkedin", "download"), params={"url": url} + ) return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- @@ -253,7 +259,9 @@ async def ayoutube_download( ) -> DownloadResponse: """Download YouTube video asynchronously.""" params = self._build_params(url=url, format_id=format_id) - data = await self._client._aget(self._path("youtube", "download"), params=params) + data = await self._client._aget( + self._path("youtube", "download"), params=params + ) return DownloadResponse.model_validate(data) async def ayoutube_transcript( @@ -264,7 +272,9 @@ async def ayoutube_transcript( ) -> TranscriptResponse: """Get YouTube transcript asynchronously.""" params = self._build_params(url=url, lang=lang) - data = await self._client._aget(self._path("youtube", "transcript"), params=params) + data = await self._client._aget( + self._path("youtube", "transcript"), params=params + ) return TranscriptResponse.model_validate(data) async def ainstagram_download(self, url: str) -> DownloadResponse: @@ -274,7 +284,9 @@ async def ainstagram_download(self, url: str) -> DownloadResponse: ) return DownloadResponse.model_validate(data) - async def ainstagram_hashtag_check(self, hashtags: list[str]) -> HashtagCheckResponse: + async def ainstagram_hashtag_check( + self, hashtags: list[str] + ) -> HashtagCheckResponse: """Check Instagram hashtags asynchronously.""" data = await self._client._apost( self._path("instagram", "hashtag-checker"), diff --git a/src/late/upload/config.py b/src/late/upload/config.py index 481400c..bd3aa27 100644 --- a/src/late/upload/config.py +++ b/src/late/upload/config.py @@ -98,27 +98,33 @@ def default(cls) -> UploadConfig: # Supported content types for uploads -ALLOWED_IMAGE_TYPES: frozenset[str] = frozenset({ - "image/jpeg", - "image/jpg", - "image/png", - "image/webp", - "image/gif", -}) - -ALLOWED_VIDEO_TYPES: frozenset[str] = frozenset({ - "video/mp4", - "video/mpeg", - "video/quicktime", - "video/avi", - "video/x-msvideo", - "video/webm", - "video/x-m4v", -}) - -ALLOWED_DOCUMENT_TYPES: frozenset[str] = frozenset({ - "application/pdf", -}) +ALLOWED_IMAGE_TYPES: frozenset[str] = frozenset( + { + "image/jpeg", + "image/jpg", + "image/png", + "image/webp", + "image/gif", + } +) + +ALLOWED_VIDEO_TYPES: frozenset[str] = frozenset( + { + "video/mp4", + "video/mpeg", + "video/quicktime", + "video/avi", + "video/x-msvideo", + "video/webm", + "video/x-m4v", + } +) + +ALLOWED_DOCUMENT_TYPES: frozenset[str] = frozenset( + { + "application/pdf", + } +) ALLOWED_CONTENT_TYPES: frozenset[str] = ( ALLOWED_IMAGE_TYPES | ALLOWED_VIDEO_TYPES | ALLOWED_DOCUMENT_TYPES diff --git a/src/late/upload/direct.py b/src/late/upload/direct.py index af8a9d6..9bba7d7 100644 --- a/src/late/upload/direct.py +++ b/src/late/upload/direct.py @@ -90,8 +90,7 @@ def _build_multipart_files( ) -> list[tuple[str, tuple[str, bytes, str]]]: """Build multipart files list for httpx.""" return [ - ("files", (f.filename, self._read_content(f), f.mime_type)) - for f in files + ("files", (f.filename, self._read_content(f), f.mime_type)) for f in files ] def _parse_response(self, response: dict[str, Any]) -> list[UploadResult]: diff --git a/src/late/upload/protocols.py b/src/late/upload/protocols.py index ea972fd..901a371 100644 --- a/src/late/upload/protocols.py +++ b/src/late/upload/protocols.py @@ -9,7 +9,10 @@ from dataclasses import dataclass, field from pathlib import Path -from typing import BinaryIO, Protocol, runtime_checkable +from typing import TYPE_CHECKING, BinaryIO, Protocol, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import Callable @dataclass @@ -162,7 +165,7 @@ class ProgressUploader(Protocol): def upload_with_progress( self, file: UploadFile, - on_progress: callable[[UploadProgress], None] | None = None, + on_progress: Callable[[UploadProgress], None] | None = None, ) -> UploadResult: """ Upload a file with progress tracking. @@ -253,9 +256,7 @@ class FileTooLargeError(UploadError): """Raised when file exceeds maximum allowed size.""" def __init__(self, size: int, max_size: int) -> None: - super().__init__( - f"File size {size:,} bytes exceeds maximum {max_size:,} bytes" - ) + super().__init__(f"File size {size:,} bytes exceeds maximum {max_size:,} bytes") self.size = size self.max_size = max_size diff --git a/src/late/upload/smart.py b/src/late/upload/smart.py index 8d3a98e..0e1cfc7 100644 --- a/src/late/upload/smart.py +++ b/src/late/upload/smart.py @@ -34,7 +34,7 @@ class LargeFileError(UploadError): def __init__(self, file_size: int, max_direct_size: int) -> None: message = ( f"File size ({file_size:,} bytes) exceeds direct upload limit " - f"({max_direct_size:,} bytes / {max_direct_size // (1024*1024)}MB).\n\n" + f"({max_direct_size:,} bytes / {max_direct_size // (1024 * 1024)}MB).\n\n" "For files larger than 4MB, provide a Vercel Blob token:\n\n" " from late.upload import SmartUploader\n\n" " uploader = SmartUploader(client, vercel_token='vercel_blob_rw_xxx')\n" @@ -86,6 +86,7 @@ def __init__( # Initialize Vercel uploader if token provided if vercel_token: from .vercel import VercelBlobUploader + self._vercel_uploader = VercelBlobUploader(vercel_token, self._config) @property diff --git a/src/late/upload/utils.py b/src/late/upload/utils.py index 04710ce..82dbc7c 100644 --- a/src/late/upload/utils.py +++ b/src/late/upload/utils.py @@ -8,6 +8,8 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: + from collections.abc import Generator + from .protocols import UploadFile @@ -32,7 +34,9 @@ def read_file_content(file: UploadFile) -> bytes: return data -def iter_file_chunks(file: UploadFile, chunk_size: int): +def iter_file_chunks( + file: UploadFile, chunk_size: int +) -> Generator[tuple[int, bytes], None, None]: """ Iterate over file content in chunks. diff --git a/src/late/upload/vercel/client.py b/src/late/upload/vercel/client.py index bb5e876..420d757 100644 --- a/src/late/upload/vercel/client.py +++ b/src/late/upload/vercel/client.py @@ -64,13 +64,16 @@ def upload( # Create progress wrapper progress_cb = None if on_progress: + def progress_cb(event: UploadProgressEvent) -> None: - on_progress(UploadProgress( - uploaded_bytes=event.loaded, - total_bytes=event.total, - part_number=None, - total_parts=None, - )) + on_progress( + UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + ) + ) try: client = BlobClient(token=self._token) @@ -119,13 +122,16 @@ async def aupload( # Create progress wrapper progress_cb = None if on_progress: + def progress_cb(event: UploadProgressEvent) -> None: - on_progress(UploadProgress( - uploaded_bytes=event.loaded, - total_bytes=event.total, - part_number=None, - total_parts=None, - )) + on_progress( + UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + ) + ) try: client = AsyncBlobClient(token=self._token) From 33f093373b0b4f1f19bc12beed0d583ba5f39b7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20Mart=C3=ADnez?= Date: Mon, 15 Dec 2025 16:51:47 +0100 Subject: [PATCH 13/13] feat(ai): Add model property to OpenAI provider --- src/late/ai/providers/openai.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/late/ai/providers/openai.py b/src/late/ai/providers/openai.py index 2ffc8f0..5c82996 100644 --- a/src/late/ai/providers/openai.py +++ b/src/late/ai/providers/openai.py @@ -57,8 +57,14 @@ def __init__( def name(self) -> str: return "openai" + @property + def model(self) -> str: + """Current model being used.""" + return self._model + @property def default_model(self) -> str: + """Default model if none specified.""" return "gpt-4o-mini" def _build_messages(self, request: GenerateRequest) -> list[dict[str, str]]: