diff --git a/pyproject.toml b/pyproject.toml index 58783b1..1fb35c6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "late-sdk" -version = "1.0.1" +version = "1.1.0" description = "Python SDK for Late API - Social Media Scheduling" readme = "README.md" requires-python = ">=3.10" diff --git a/scripts/generate_mcp_docs.py b/scripts/generate_mcp_docs.py new file mode 100644 index 0000000..a5d19d8 --- /dev/null +++ b/scripts/generate_mcp_docs.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +""" +Generate MCP documentation from tool definitions. + +Usage: + python scripts/generate_mcp_docs.py + +This script generates MDX documentation from the centralized tool definitions +in src/late/mcp/tool_definitions.py +""" + +import sys +from pathlib import Path + +# Add src to path for imports +sys.path.insert(0, str(Path(__file__).parent.parent / "src")) + +from late.mcp.tool_definitions import generate_mdx_docs, TOOL_DEFINITIONS + + +def main(): + """Generate and print MDX documentation.""" + print("=" * 60) + print("MCP Tool Documentation (generated from tool_definitions.py)") + print("=" * 60) + print() + print(generate_mdx_docs()) + print() + print("=" * 60) + print("Copy the above into claude-mcp.mdx under '## Tool Reference'") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_models.py b/scripts/generate_models.py index 34e9f2e..574926f 100644 --- a/scripts/generate_models.py +++ b/scripts/generate_models.py @@ -34,6 +34,7 @@ def main() -> int: # Create output directory output_dir.mkdir(parents=True, exist_ok=True) + output_file = output_dir / "models.py" # Run datamodel-code-generator cmd = [ @@ -43,7 +44,7 @@ def main() -> int: "--input", str(openapi_spec), "--output", - str(output_dir), + str(output_file), "--output-model-type", "pydantic_v2.BaseModel", "--input-file-type", @@ -55,8 +56,6 @@ def main() -> int: "--field-constraints", "--use-field-description", "--capitalise-enum-members", - "--enum-field-as-literal", - "all", "--use-default-kwarg", "--collapse-root-models", "--use-union-operator", diff --git a/src/late/__init__.py b/src/late/__init__.py index 88077f8..f9fa4cf 100644 --- a/src/late/__init__.py +++ b/src/late/__init__.py @@ -16,11 +16,42 @@ LateValidationError, ) from .client.late_client import Late +from .enums import ( + CaptionTone, + DayOfWeek, + FacebookContentType, + GoogleBusinessCTAType, + InstagramContentType, + MediaType, + Platform, + PostStatus, + TikTokCommercialContentType, + TikTokMediaType, + TikTokPrivacyLevel, + Visibility, +) -__version__ = "1.0.0" +__version__ = "1.1.0" __all__ = [ + # Client "Late", + # Enums - Core + "Platform", + "PostStatus", + "MediaType", + "Visibility", + # Enums - Platform-specific + "InstagramContentType", + "FacebookContentType", + "TikTokPrivacyLevel", + "TikTokCommercialContentType", + "TikTokMediaType", + "GoogleBusinessCTAType", + # Enums - Tools & Queue + "CaptionTone", + "DayOfWeek", + # Exceptions "LateAPIError", "LateAuthenticationError", "LateConnectionError", diff --git a/src/late/ai/content_generator.py b/src/late/ai/content_generator.py index 75d1aed..4d77e63 100644 --- a/src/late/ai/content_generator.py +++ b/src/late/ai/content_generator.py @@ -6,6 +6,8 @@ from typing import TYPE_CHECKING, Any +from late.enums import CaptionTone, Platform + from .protocols import ( AIProvider, GenerateRequest, @@ -25,6 +27,7 @@ class ContentGenerator: Example: >>> from late.ai import ContentGenerator, GenerateRequest + >>> from late import Platform, CaptionTone >>> >>> # Using OpenAI >>> generator = ContentGenerator(provider="openai", api_key="sk-...") @@ -32,8 +35,8 @@ class ContentGenerator: >>> response = generator.generate( ... GenerateRequest( ... prompt="Write a tweet about Python", - ... platform="twitter", - ... tone="professional", + ... platform=Platform.TWITTER, + ... tone=CaptionTone.PROFESSIONAL, ... ) ... ) >>> print(response.text) @@ -94,9 +97,7 @@ async def agenerate(self, request: GenerateRequest) -> GenerateResponse: """Generate content asynchronously.""" return await self._provider.agenerate(request) - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" if not isinstance(self._provider, StreamingAIProvider): raise NotImplementedError( @@ -109,9 +110,9 @@ async def agenerate_stream( def generate_post( self, topic: str, - platform: str, + platform: Platform | str, *, - tone: str = "professional", + tone: CaptionTone | str = CaptionTone.PROFESSIONAL, language: str = "en", **kwargs: Any, ) -> str: @@ -139,9 +140,9 @@ def generate_post( async def agenerate_post( self, topic: str, - platform: str, + platform: Platform | str, *, - tone: str = "professional", + tone: CaptionTone | str = CaptionTone.PROFESSIONAL, language: str = "en", **kwargs: Any, ) -> str: diff --git a/src/late/ai/protocols.py b/src/late/ai/protocols.py index 40ceb53..ba727f9 100644 --- a/src/late/ai/protocols.py +++ b/src/late/ai/protocols.py @@ -12,6 +12,8 @@ if TYPE_CHECKING: from collections.abc import AsyncIterator + from late.enums import CaptionTone, Platform + @dataclass class GenerateRequest: @@ -21,8 +23,8 @@ class GenerateRequest: system: str | None = None max_tokens: int = 500 temperature: float = 0.7 - platform: str | None = None # e.g., "twitter", "linkedin" - tone: str | None = None # e.g., "professional", "casual" + platform: Platform | str | None = None + tone: CaptionTone | str | None = None language: str = "en" context: dict[str, Any] = field(default_factory=dict) @@ -68,8 +70,6 @@ class StreamingAIProvider(Protocol): """Protocol for streaming content generation.""" @abstractmethod - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" ... diff --git a/src/late/ai/providers/openai.py b/src/late/ai/providers/openai.py index 2cbdab0..5c82996 100644 --- a/src/late/ai/providers/openai.py +++ b/src/late/ai/providers/openai.py @@ -7,6 +7,8 @@ import os from typing import TYPE_CHECKING, Any +from late.enums import Platform + from ..protocols import GenerateRequest, GenerateResponse if TYPE_CHECKING: @@ -55,8 +57,14 @@ def __init__( def name(self) -> str: return "openai" + @property + def model(self) -> str: + """Current model being used.""" + return self._model + @property def default_model(self) -> str: + """Default model if none specified.""" return "gpt-4o-mini" def _build_messages(self, request: GenerateRequest) -> list[dict[str, str]]: @@ -76,12 +84,12 @@ def _build_system_prompt(self, request: GenerateRequest) -> str: parts = ["You are an expert social media content creator."] if request.platform: - platform_guides = { - "twitter": "Keep it under 280 characters. Be concise and engaging.", - "linkedin": "Be professional and insightful. Use paragraphs.", - "instagram": "Be visual and use emojis. Include hashtag suggestions.", - "tiktok": "Be trendy and use Gen-Z language. Keep it fun.", - "facebook": "Be conversational and engaging.", + platform_guides: dict[Platform | str, str] = { + Platform.TWITTER: "Keep it under 280 characters. Be concise and engaging.", + Platform.LINKEDIN: "Be professional and insightful. Use paragraphs.", + Platform.INSTAGRAM: "Be visual and use emojis. Include hashtag suggestions.", + Platform.TIKTOK: "Be trendy and use Gen-Z language. Keep it fun.", + Platform.FACEBOOK: "Be conversational and engaging.", } guide = platform_guides.get(request.platform, "") parts.append(f"Writing for {request.platform}. {guide}") @@ -140,9 +148,7 @@ async def agenerate(self, request: GenerateRequest) -> GenerateResponse: finish_reason=choice.finish_reason, ) - async def agenerate_stream( - self, request: GenerateRequest - ) -> AsyncIterator[str]: + async def agenerate_stream(self, request: GenerateRequest) -> AsyncIterator[str]: """Generate content as a stream.""" stream = await self._async_client.chat.completions.create( model=self._model, diff --git a/src/late/client/base.py b/src/late/client/base.py index 74f2e6d..787e23c 100644 --- a/src/late/client/base.py +++ b/src/late/client/base.py @@ -206,10 +206,14 @@ def _post( headers=headers, timeout=self.timeout, ) as client: - return self._request_with_retry(client, "POST", path, files=files, params=params) + return self._request_with_retry( + client, "POST", path, files=files, params=params + ) with self._sync_client() as client: - return self._request_with_retry(client, "POST", path, json=data, params=params) + return self._request_with_retry( + client, "POST", path, json=data, params=params + ) def _put( self, @@ -312,10 +316,14 @@ async def _apost( headers=headers, timeout=self.timeout, ) as client: - return await self._arequest_with_retry(client, "POST", path, files=files, params=params) + return await self._arequest_with_retry( + client, "POST", path, files=files, params=params + ) async with self._async_client() as client: - return await self._arequest_with_retry(client, "POST", path, json=data, params=params) + return await self._arequest_with_retry( + client, "POST", path, json=data, params=params + ) async def _aput( self, @@ -333,4 +341,6 @@ async def _adelete( ) -> dict[str, Any]: """Make an async DELETE request.""" async with self._async_client() as client: - return await self._arequest_with_retry(client, "DELETE", path, params=params) + return await self._arequest_with_retry( + client, "DELETE", path, params=params + ) diff --git a/src/late/client/late_client.py b/src/late/client/late_client.py index 3689d35..7a343f8 100644 --- a/src/late/client/late_client.py +++ b/src/late/client/late_client.py @@ -22,7 +22,7 @@ class Late(BaseClient): Late API client for scheduling social media posts. Example: - >>> from late import Late + >>> from late import Late, Platform >>> >>> # Initialize client >>> client = Late(api_key="your_api_key") @@ -33,7 +33,7 @@ class Late(BaseClient): >>> # Create a post >>> post = client.posts.create( ... content="Hello world!", - ... platforms=[{"platform": "twitter", "accountId": "..."}], + ... platforms=[{"platform": Platform.TWITTER, "accountId": "..."}], ... scheduled_for="2024-12-25T10:00:00Z", ... ) >>> @@ -59,7 +59,9 @@ def __init__( timeout: Request timeout in seconds max_retries: Maximum retries for failed requests """ - super().__init__(api_key, base_url=base_url, timeout=timeout, max_retries=max_retries) + super().__init__( + api_key, base_url=base_url, timeout=timeout, max_retries=max_retries + ) # Initialize resources self.posts = PostsResource(self) diff --git a/src/late/enums.py b/src/late/enums.py new file mode 100644 index 0000000..f9ea848 --- /dev/null +++ b/src/late/enums.py @@ -0,0 +1,309 @@ +""" +Enums for Late SDK. + +These enums provide type-safe constants for common values used throughout the SDK. +They inherit from both `str` and `Enum`, so they serialize automatically to their +string values when used in API requests. + +Example: + >>> from late import Platform, PostStatus + >>> Platform.TWITTER + + >>> Platform.TWITTER == "twitter" + True + >>> str(Platform.TWITTER) + 'twitter' +""" + +from enum import Enum + + +class Platform(str, Enum): + """ + Supported social media platforms. + + Example: + >>> from late import Late, Platform + >>> client = Late(api_key="...") + >>> post = client.posts.create( + ... content="Hello!", + ... platforms=[{"platform": Platform.TWITTER, "accountId": "acc_123"}], + ... ) + """ + + TWITTER = "twitter" + """Twitter/X""" + + INSTAGRAM = "instagram" + """Instagram (feed posts, stories, reels)""" + + FACEBOOK = "facebook" + """Facebook Pages""" + + LINKEDIN = "linkedin" + """LinkedIn (personal profiles and company pages)""" + + TIKTOK = "tiktok" + """TikTok""" + + YOUTUBE = "youtube" + """YouTube (videos and shorts)""" + + PINTEREST = "pinterest" + """Pinterest""" + + REDDIT = "reddit" + """Reddit""" + + BLUESKY = "bluesky" + """Bluesky""" + + THREADS = "threads" + """Threads (Meta)""" + + GOOGLE_BUSINESS = "googlebusiness" + """Google Business Profile""" + + +class PostStatus(str, Enum): + """ + Post publication statuses. + + Example: + >>> from late import Late, PostStatus + >>> client = Late(api_key="...") + >>> scheduled = client.posts.list(status=PostStatus.SCHEDULED) + >>> failed = client.posts.list(status=PostStatus.FAILED) + """ + + DRAFT = "draft" + """Saved but not scheduled for publishing""" + + SCHEDULED = "scheduled" + """Scheduled for future publishing""" + + PUBLISHING = "publishing" + """Currently being published to platforms""" + + PUBLISHED = "published" + """Successfully published to all platforms""" + + FAILED = "failed" + """Publishing failed on all platforms""" + + PARTIAL = "partial" + """Publishing succeeded on some platforms but failed on others""" + + +class MediaType(str, Enum): + """ + Media item types. + + Example: + >>> from late import MediaType + >>> media_item = { + ... "type": MediaType.VIDEO, + ... "url": "https://example.com/video.mp4", + ... } + """ + + IMAGE = "image" + """Static image (JPEG, PNG, WebP, etc.)""" + + VIDEO = "video" + """Video file (MP4, MOV, etc.)""" + + GIF = "gif" + """Animated GIF""" + + DOCUMENT = "document" + """Document file (PDF for LinkedIn)""" + + +class Visibility(str, Enum): + """ + Content visibility settings. + + Used primarily for YouTube videos and other platforms that support + visibility controls. + + Example: + >>> from late import Visibility + >>> youtube_settings = { + ... "visibility": Visibility.UNLISTED, + ... } + """ + + PUBLIC = "public" + """Anyone can view the content""" + + PRIVATE = "private" + """Only you and explicitly shared users can view""" + + UNLISTED = "unlisted" + """Only people with the direct link can view""" + + +class InstagramContentType(str, Enum): + """ + Instagram-specific content types. + + Example: + >>> from late import InstagramContentType + >>> instagram_settings = { + ... "contentType": InstagramContentType.STORY, + ... } + """ + + STORY = "story" + """Ephemeral story (disappears after 24 hours)""" + + +class FacebookContentType(str, Enum): + """ + Facebook-specific content types. + + Example: + >>> from late import FacebookContentType + >>> facebook_settings = { + ... "contentType": FacebookContentType.STORY, + ... } + """ + + STORY = "story" + """Facebook Page Story (ephemeral, 24 hours)""" + + +class TikTokPrivacyLevel(str, Enum): + """ + TikTok privacy levels. + + Note: Available options depend on the creator's account settings. + Use the accounts API to get available options for each account. + """ + + PUBLIC_TO_EVERYONE = "PUBLIC_TO_EVERYONE" + """Visible to everyone""" + + MUTUAL_FOLLOW_FRIENDS = "MUTUAL_FOLLOW_FRIENDS" + """Visible only to mutual followers""" + + FOLLOWER_OF_CREATOR = "FOLLOWER_OF_CREATOR" + """Visible only to followers""" + + SELF_ONLY = "SELF_ONLY" + """Visible only to the creator""" + + +class TikTokCommercialContentType(str, Enum): + """ + TikTok commercial content disclosure types. + + Required for brand partnerships and sponsored content. + """ + + NONE = "none" + """Not commercial content""" + + BRAND_ORGANIC = "brand_organic" + """Organic brand content""" + + BRAND_CONTENT = "brand_content" + """Paid partnership / sponsored content""" + + +class TikTokMediaType(str, Enum): + """ + TikTok media types. + + Usually auto-detected from media items, but can be overridden. + """ + + VIDEO = "video" + """Video post""" + + PHOTO = "photo" + """Photo carousel (up to 35 images)""" + + +class GoogleBusinessCTAType(str, Enum): + """ + Google Business Profile call-to-action button types. + + Example: + >>> from late import GoogleBusinessCTAType + >>> cta = { + ... "type": GoogleBusinessCTAType.BOOK, + ... "url": "https://example.com/book", + ... } + """ + + LEARN_MORE = "LEARN_MORE" + """Link to more information""" + + BOOK = "BOOK" + """Booking/reservation link""" + + ORDER = "ORDER" + """Online ordering link""" + + SHOP = "SHOP" + """E-commerce/shopping link""" + + SIGN_UP = "SIGN_UP" + """Registration/signup link""" + + CALL = "CALL" + """Phone call action""" + + +class DayOfWeek(int, Enum): + """ + Days of the week for queue scheduling. + + Values follow the JavaScript/API convention where Sunday = 0. + + Example: + >>> from late import DayOfWeek + >>> slots = [ + ... {"dayOfWeek": DayOfWeek.MONDAY, "time": "09:00"}, + ... {"dayOfWeek": DayOfWeek.WEDNESDAY, "time": "14:00"}, + ... ] + """ + + SUNDAY = 0 + MONDAY = 1 + TUESDAY = 2 + WEDNESDAY = 3 + THURSDAY = 4 + FRIDAY = 5 + SATURDAY = 6 + + +class CaptionTone(str, Enum): + """ + Tones for AI-generated captions. + + Example: + >>> from late import Late, CaptionTone + >>> client = Late(api_key="...") + >>> result = client.tools.generate_caption( + ... image_url="https://example.com/image.jpg", + ... tone=CaptionTone.PROFESSIONAL, + ... ) + """ + + PROFESSIONAL = "professional" + """Formal, business-appropriate tone""" + + CASUAL = "casual" + """Relaxed, conversational tone""" + + HUMOROUS = "humorous" + """Fun, witty, or playful tone""" + + INSPIRATIONAL = "inspirational" + """Motivational, uplifting tone""" + + INFORMATIVE = "informative" + """Educational, fact-focused tone""" diff --git a/src/late/mcp/server.py b/src/late/mcp/server.py index 43be8f3..00d5958 100644 --- a/src/late/mcp/server.py +++ b/src/late/mcp/server.py @@ -28,10 +28,12 @@ from mcp.server.fastmcp import FastMCP -from late import Late +from late import Late, MediaType, PostStatus # Initialize MCP server -mcp = FastMCP("Late", instructions=""" +mcp = FastMCP( + "Late", + instructions=""" Late API server for scheduling social media posts. Available tools are prefixed by resource: @@ -39,7 +41,8 @@ - profiles_* : Manage profiles (groups of accounts) - posts_* : Create, list, update, delete posts - media_* : Upload images and videos -""") +""", +) def _get_client() -> Late: @@ -56,6 +59,7 @@ def _get_client() -> Late: # ACCOUNTS # ============================================================================ + @mcp.tool() def accounts_list() -> str: """ @@ -104,6 +108,7 @@ def accounts_get(platform: str) -> str: # PROFILES # ============================================================================ + @mcp.tool() def profiles_list() -> str: """ @@ -236,6 +241,7 @@ def profiles_delete(profile_id: str) -> str: # POSTS # ============================================================================ + @mcp.tool() def posts_list(status: str = "", limit: int = 10) -> str: """ @@ -258,7 +264,11 @@ def posts_list(status: str = "", limit: int = 10) -> str: lines = [f"Found {len(posts)} post(s):\n"] for post in posts: - content_preview = post["content"][:60] + "..." if len(post["content"]) > 60 else post["content"] + content_preview = ( + post["content"][:60] + "..." + if len(post["content"]) > 60 + else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) lines.append(f"- [{post['status']}] {content_preview}") lines.append(f" Platforms: {platforms} | ID: {post['_id']}") @@ -278,7 +288,9 @@ def posts_get(post_id: str) -> str: response = client.posts.get(post_id) post = response.get("post", response) - content_preview = post["content"][:100] + "..." if len(post["content"]) > 100 else post["content"] + content_preview = ( + post["content"][:100] + "..." if len(post["content"]) > 100 else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) lines = [ @@ -304,6 +316,7 @@ def posts_get(post_id: str) -> str: def posts_create( content: str, platform: str, + is_draft: bool = False, publish_now: bool = False, schedule_minutes: int = 0, media_urls: str = "", @@ -312,13 +325,19 @@ def posts_create( """ Create a new social media post, optionally with media. + Scheduling behavior: + - is_draft=True: Save as draft (no scheduling, can edit later) + - publish_now=True: Publish immediately + - Neither: Schedule for schedule_minutes from now (default: 60 min) + Args: content: The post content/text platform: Target platform (twitter, instagram, linkedin, tiktok, bluesky, facebook, youtube, pinterest, threads) - publish_now: If True, publish immediately. If False, schedule for later. - schedule_minutes: Minutes from now to schedule (ignored if publish_now=True). Default 60 min. + is_draft: Save as draft without scheduling. Draft posts can be edited and scheduled later (default: False) + publish_now: Publish immediately instead of scheduling (default: False) + schedule_minutes: Minutes from now to schedule (ignored if publish_now=True or is_draft=True). Default 60 min. media_urls: Comma-separated URLs of media files to attach. Optional. - title: Optional title (for YouTube, Pinterest) + title: Optional title (required for YouTube, recommended for Pinterest) """ client = _get_client() @@ -335,10 +354,12 @@ def posts_create( # Build request params = { "content": content, - "platforms": [{ - "platform": account["platform"], - "accountId": account["_id"], - }], + "platforms": [ + { + "platform": account["platform"], + "accountId": account["_id"], + } + ], } if title: @@ -349,15 +370,19 @@ def posts_create( urls = [u.strip() for u in media_urls.split(",") if u.strip()] media_items = [] for url in urls: - media_type = "image" - if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): - media_type = "video" + media_type: MediaType | str = MediaType.IMAGE + if any( + ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"] + ): + media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): - media_type = "gif" + media_type = MediaType.GIF media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items - if publish_now: + if is_draft: + params["is_draft"] = True + elif publish_now: params["publish_now"] = True else: minutes = schedule_minutes if schedule_minutes > 0 else 60 @@ -367,9 +392,15 @@ def posts_create( post = response.get("post", {}) username = account.get("username") or account.get("name") or account["_id"] - media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" - - if publish_now: + media_info = ( + f" with {len(params.get('media_items', []))} media file(s)" + if params.get("media_items") + else "" + ) + + if is_draft: + return f"πŸ“ Draft saved for {platform} (@{username}){media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" + elif publish_now: return f"βœ… Published to {platform} (@{username}){media_info}\nPost ID: {post.get('_id', 'N/A')}" else: scheduled = params["scheduled_for"].strftime("%Y-%m-%d %H:%M") @@ -386,23 +417,32 @@ def posts_publish_now(content: str, platform: str, media_urls: str = "") -> str: platform: Target platform (twitter, instagram, linkedin, tiktok, bluesky, etc.) media_urls: Comma-separated URLs of media files to attach. Optional. """ - return posts_create(content=content, platform=platform, publish_now=True, media_urls=media_urls) + return posts_create( + content=content, platform=platform, publish_now=True, media_urls=media_urls + ) @mcp.tool() def posts_cross_post( content: str, platforms: str, + is_draft: bool = False, publish_now: bool = False, media_urls: str = "", ) -> str: """ Post the same content to multiple platforms at once. + Scheduling behavior: + - is_draft=True: Save as draft (no scheduling, can edit later) + - publish_now=True: Publish immediately + - Neither: Schedule for 1 hour from now + Args: content: The post content/text platforms: Comma-separated list of platforms (e.g., "twitter,linkedin,bluesky") - publish_now: If True, publish immediately. If False, schedule for 1 hour from now. + is_draft: Save as draft without scheduling (default: False) + publish_now: Publish immediately instead of scheduling (default: False) media_urls: Comma-separated URLs of media files to attach. Optional. """ client = _get_client() @@ -416,10 +456,12 @@ def posts_cross_post( for platform in target_platforms: matching = [a for a in accounts if a["platform"].lower() == platform] if matching: - platform_targets.append({ - "platform": matching[0]["platform"], - "accountId": matching[0]["_id"], - }) + platform_targets.append( + { + "platform": matching[0]["platform"], + "accountId": matching[0]["_id"], + } + ) else: not_found.append(platform) @@ -436,15 +478,19 @@ def posts_cross_post( urls = [u.strip() for u in media_urls.split(",") if u.strip()] media_items = [] for url in urls: - media_type = "image" - if any(ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"]): - media_type = "video" + media_type: MediaType | str = MediaType.IMAGE + if any( + ext in url.lower() for ext in [".mp4", ".mov", ".avi", ".webm", ".m4v"] + ): + media_type = MediaType.VIDEO elif any(ext in url.lower() for ext in [".gif"]): - media_type = "gif" + media_type = MediaType.GIF media_items.append({"type": media_type, "url": url}) params["media_items"] = media_items - if publish_now: + if is_draft: + params["is_draft"] = True + elif publish_now: params["publish_now"] = True else: params["scheduled_for"] = datetime.now() + timedelta(hours=1) @@ -453,8 +499,16 @@ def posts_cross_post( post = response.get("post", {}) posted_to = [t["platform"] for t in platform_targets] - media_info = f" with {len(params.get('media_items', []))} media file(s)" if params.get("media_items") else "" - result = f"βœ… {'Published' if publish_now else 'Scheduled'} to: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}" + media_info = ( + f" with {len(params.get('media_items', []))} media file(s)" + if params.get("media_items") + else "" + ) + + if is_draft: + result = f"πŸ“ Draft saved for: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}\nStatus: draft" + else: + result = f"βœ… {'Published' if publish_now else 'Scheduled'} to: {', '.join(posted_to)}{media_info}\nPost ID: {post.get('_id', 'N/A')}" if not_found: result += f"\n⚠️ Accounts not found for: {', '.join(not_found)}" @@ -527,7 +581,7 @@ def posts_retry(post_id: str) -> str: try: post_response = client.posts.get(post_id) post = post_response.get("post", post_response) - if post.get("status") != "failed": + if post.get("status") != PostStatus.FAILED: return f"⚠️ Post {post_id} is not in failed status (current: {post.get('status')})" except Exception as e: return f"❌ Could not find post {post_id}: {e}" @@ -548,7 +602,7 @@ def posts_list_failed(limit: int = 10) -> str: limit: Maximum number of posts to return (default 10) """ client = _get_client() - response = client.posts.list(status="failed", limit=limit) + response = client.posts.list(status=PostStatus.FAILED, limit=limit) posts = response.get("posts", []) if not posts: @@ -556,7 +610,11 @@ def posts_list_failed(limit: int = 10) -> str: lines = [f"Found {len(posts)} failed post(s):\n"] for post in posts: - content_preview = post["content"][:50] + "..." if len(post["content"]) > 50 else post["content"] + content_preview = ( + post["content"][:50] + "..." + if len(post["content"]) > 50 + else post["content"] + ) platforms = ", ".join(t.get("platform", "?") for t in post.get("platforms", [])) error = post.get("error", "Unknown error") lines.append(f"- {content_preview}") @@ -573,7 +631,7 @@ def posts_retry_all_failed() -> str: Retry all failed posts. """ client = _get_client() - response = client.posts.list(status="failed", limit=50) + response = client.posts.list(status=PostStatus.FAILED, limit=50) posts = response.get("posts", []) if not posts: @@ -603,6 +661,7 @@ def posts_retry_all_failed() -> str: # MEDIA UPLOAD # ============================================================================ + @mcp.tool() def media_generate_upload_link() -> str: """ @@ -691,7 +750,9 @@ def media_check_upload_status(token: str) -> str: lines.append(f" Size: {f.get('size', 0) / 1024:.1f} KB") lines.append("") - lines.append("\nπŸ“ You can now create a post with these media URLs using posts_create with the media_urls parameter.") + lines.append( + "\nπŸ“ You can now create a post with these media URLs using posts_create with the media_urls parameter." + ) lines.append(f"\nMedia URLs: {','.join(media_urls)}") return "\n".join(lines) diff --git a/src/late/mcp/tool_definitions.py b/src/late/mcp/tool_definitions.py new file mode 100644 index 0000000..5202c17 --- /dev/null +++ b/src/late/mcp/tool_definitions.py @@ -0,0 +1,226 @@ +""" +Centralized tool definitions for MCP and documentation. + +This file is the single source of truth for tool parameters and descriptions. +Used by: +- MCP server (server.py) for tool definitions +- Documentation generation (can be exported to MDX) +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Any + + +@dataclass +class ParamDef: + """Definition of a tool parameter.""" + + name: str + type: str + description: str + required: bool = False + default: Any = None + + def to_mdx_row(self) -> str: + """Generate MDX table row.""" + req = "Yes" if self.required else "No" + default_str = f"`{self.default}`" if self.default is not None else "-" + return f"| `{self.name}` | `{self.type}` | {self.description} | {req} | {default_str} |" + + +@dataclass +class ToolDef: + """Definition of a tool.""" + + name: str + description: str + params: list[ParamDef] + + def to_mdx_section(self) -> str: + """Generate MDX documentation section.""" + lines = [ + f"### {self.name}", + "", + self.description, + "", + "| Parameter | Type | Description | Required | Default |", + "|-----------|------|-------------|----------|---------|", + ] + lines.extend(p.to_mdx_row() for p in self.params) + return "\n".join(lines) + + +# ============================================================================= +# POSTS TOOL DEFINITIONS +# ============================================================================= + +POSTS_CREATE_PARAMS = [ + ParamDef( + name="content", + type="str", + description="The post content/text", + required=True, + ), + ParamDef( + name="platform", + type="str", + description="Target platform: twitter, instagram, linkedin, tiktok, bluesky, facebook, youtube, pinterest, threads", + required=True, + ), + ParamDef( + name="is_draft", + type="bool", + description="Save as draft without scheduling. Draft posts can be edited and scheduled later", + required=False, + default=False, + ), + ParamDef( + name="publish_now", + type="bool", + description="Publish immediately instead of scheduling", + required=False, + default=False, + ), + ParamDef( + name="schedule_minutes", + type="int", + description="Minutes from now to schedule the post. Ignored if publish_now=True or is_draft=True", + required=False, + default=60, + ), + ParamDef( + name="media_urls", + type="str", + description="Comma-separated URLs of media files to attach (images, videos, GIFs)", + required=False, + default="", + ), + ParamDef( + name="title", + type="str", + description="Optional title (required for YouTube, recommended for Pinterest)", + required=False, + default="", + ), +] + +POSTS_CREATE = ToolDef( + name="posts_create", + description="""Create a new social media post. + +**Scheduling behavior:** +- `is_draft=True`: Save as draft (no scheduling, can edit later) +- `publish_now=True`: Publish immediately +- Neither: Schedule for `schedule_minutes` from now (default: 60 min)""", + params=POSTS_CREATE_PARAMS, +) + +POSTS_CROSS_POST_PARAMS = [ + ParamDef( + name="content", + type="str", + description="The post content/text", + required=True, + ), + ParamDef( + name="platforms", + type="str", + description="Comma-separated list of platforms (e.g., 'twitter,linkedin,bluesky')", + required=True, + ), + ParamDef( + name="is_draft", + type="bool", + description="Save as draft without scheduling", + required=False, + default=False, + ), + ParamDef( + name="publish_now", + type="bool", + description="Publish immediately instead of scheduling", + required=False, + default=False, + ), + ParamDef( + name="media_urls", + type="str", + description="Comma-separated URLs of media files to attach", + required=False, + default="", + ), +] + +POSTS_CROSS_POST = ToolDef( + name="posts_cross_post", + description="Post the same content to multiple platforms at once.", + params=POSTS_CROSS_POST_PARAMS, +) + +POSTS_LIST_PARAMS = [ + ParamDef( + name="status", + type="str", + description="Filter by status: draft, scheduled, published, failed. Empty for all", + required=False, + default="", + ), + ParamDef( + name="limit", + type="int", + description="Maximum number of posts to return", + required=False, + default=10, + ), +] + +POSTS_LIST = ToolDef( + name="posts_list", + description="List posts with optional filtering by status.", + params=POSTS_LIST_PARAMS, +) + +# ============================================================================= +# ALL TOOL DEFINITIONS +# ============================================================================= + +TOOL_DEFINITIONS = { + "posts_create": POSTS_CREATE, + "posts_cross_post": POSTS_CROSS_POST, + "posts_list": POSTS_LIST, +} + + +def generate_mdx_docs() -> str: + """Generate complete MDX documentation for all tools.""" + sections = [ + "## Tool Reference", + "", + "Detailed parameters for each MCP tool.", + "", + ] + for tool in TOOL_DEFINITIONS.values(): + sections.append(tool.to_mdx_section()) + sections.append("") + return "\n".join(sections) + + +def get_tool_docstring(tool_name: str) -> str: + """Get the docstring for a tool, formatted for MCP.""" + tool = TOOL_DEFINITIONS.get(tool_name) + if not tool: + return "" + + lines = [tool.description, "", "Args:"] + for param in tool.params: + req = " (required)" if param.required else "" + default = ( + f" (default: {param.default})" + if param.default is not None and not param.required + else "" + ) + lines.append(f" {param.name}: {param.description}{req}{default}") + + return "\n".join(lines) diff --git a/src/late/models/__init__.py b/src/late/models/__init__.py index bb55c03..2795ce7 100644 --- a/src/late/models/__init__.py +++ b/src/late/models/__init__.py @@ -10,29 +10,77 @@ # Import specific commonly used models for convenience from ._generated.models import ( + AccountGetResponse, + # Accounts responses + AccountsListResponse, + AccountWithFollowerStats, + CaptionResponse, + DownloadFormat, + # Tools responses + DownloadResponse, ErrorResponse, FacebookPlatformData, + FollowerStatsResponse, + HashtagCheckResponse, + HashtagInfo, InstagramPlatformData, LinkedInPlatformData, MediaItem, - # Responses + # Media responses + MediaUploadResponse, + # Base responses Pagination, PinterestPlatformData, PlatformTarget, # Core models Post, + PostCreateResponse, + PostDeleteResponse, + PostGetResponse, + PostRetryResponse, + # Posts responses + PostsListResponse, + PostUpdateResponse, Profile, + ProfileCreateResponse, + ProfileDeleteResponse, + ProfileGetResponse, + # Profiles responses + ProfilesListResponse, + ProfileUpdateResponse, + QueueDeleteResponse, + QueueNextSlotResponse, + QueuePreviewResponse, + QueueSchedule, + QueueSlot, + # Queue responses + QueueSlotsResponse, + QueueUpdateResponse, SocialAccount, # Enums Status, # Platform-specific TikTokSettings, + TranscriptResponse, + TranscriptSegment, TwitterPlatformData, Type, + UploadedFile, + UploadTokenResponse, + UploadTokenStatusResponse, + # Users responses + User, + UserGetResponse, + UsersListResponse, Visibility, YouTubePlatformData, ) +# SDK-specific models (not from OpenAPI) +from .responses import ( + MediaLargeUploadResponse, +) + __all__ = [ # Core models "Post", @@ -40,6 +88,8 @@ "PlatformTarget", "Profile", "SocialAccount", + "QueueSlot", + "QueueSchedule", # Enums "Status", "Type", @@ -52,7 +102,49 @@ "LinkedInPlatformData", "YouTubePlatformData", "PinterestPlatformData", - # Responses + # Base responses "Pagination", "ErrorResponse", + # Posts responses + "PostsListResponse", + "PostGetResponse", + "PostCreateResponse", + "PostUpdateResponse", + "PostDeleteResponse", + "PostRetryResponse", + # Profiles responses + "ProfilesListResponse", + "ProfileGetResponse", + "ProfileCreateResponse", + "ProfileUpdateResponse", + "ProfileDeleteResponse", + # Accounts responses + "AccountsListResponse", + "AccountGetResponse", + "FollowerStatsResponse", + "AccountWithFollowerStats", + # Media responses + "MediaUploadResponse", + "MediaLargeUploadResponse", + "UploadedFile", + "UploadTokenResponse", + "UploadTokenStatusResponse", + # Queue responses + "QueueSlotsResponse", + "QueueUpdateResponse", + "QueueDeleteResponse", + "QueuePreviewResponse", + "QueueNextSlotResponse", + # Tools responses + "DownloadResponse", + "DownloadFormat", + "TranscriptResponse", + "TranscriptSegment", + "HashtagCheckResponse", + "HashtagInfo", + "CaptionResponse", + # Users responses + "User", + "UsersListResponse", + "UserGetResponse", ] diff --git a/src/late/models/_generated/__init__.py b/src/late/models/_generated/__init__.py new file mode 100644 index 0000000..aca8b21 --- /dev/null +++ b/src/late/models/_generated/__init__.py @@ -0,0 +1,11 @@ +""" +Auto-generated Pydantic models from Late API OpenAPI specification. + +DO NOT EDIT THIS FILE MANUALLY. +Run `python scripts/generate_models.py` to regenerate. +""" + +from __future__ import annotations + +# Re-export all generated models +from .models import * # noqa: F401, F403 diff --git a/src/late/models/_generated/models.py b/src/late/models/_generated/models.py index be2583f..b260cdf 100644 --- a/src/late/models/_generated/models.py +++ b/src/late/models/_generated/models.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2025-12-11T13:56:43+00:00 +# timestamp: 2025-12-15T13:54:40+00:00 from __future__ import annotations @@ -27,6 +27,11 @@ class MediaItem(BaseModel): Media referenced in posts. URLs must be publicly reachable over HTTPS by the destination platforms. When using third‑party storage, ensure signed links remain valid until upload completes. + **Uploading Media:** + - Small files (≀ ~4MB): Use `POST /v1/media` with `multipart/form-data` + - Large files (> ~4MB, up to 5GB): Use `POST /v1/media` with `Content-Type: application/json` for client-upload flow (presigned URL) + - See `/v1/media` endpoint documentation for details on both methods + **Automatic Compression:** - Bluesky: images larger than ~1MB are automatically recompressed to meet the platform's blob size limit. - Instagram: images >8 MB and videos >100 MB (stories) or >300 MB (reels) are automatically compressed. @@ -86,6 +91,16 @@ class TwitterPlatformData(BaseModel): class ThreadsPlatformData(BaseModel): + """ + Constraints: + - Carousel posts support up to 10 images (no videos in carousels). + - Single posts support one image or one video. + - Videos must be H.264/AAC MP4 format, max 5 minutes duration. + - Images must be JPEG or PNG, max 8 MB each. + - threadItems creates a reply chain (Threads equivalent of Twitter threads). + + """ + threadItems: list[ThreadItem] | None = None """ Sequence of posts in a Threads thread (root then replies in order). @@ -171,6 +186,10 @@ class InstagramPlatformData(BaseModel): """ Set to 'story' to publish as a Story. Default posts become Reels or feed depending on media. """ + shareToFeed: bool = True + """ + For Reels only. When true (default), the Reel appears on both the Reels tab and your main profile feed. Set to false to post to the Reels tab only. + """ collaborators: list[str] | None = None """ Up to 3 Instagram usernames to invite as collaborators (feed/Reels only) @@ -502,7 +521,6 @@ class ApiKey(BaseModel): id: str | None = None name: str | None = None keyPreview: str | None = None - permissions: list[str] | None = None expiresAt: AwareDatetime | None = None createdAt: AwareDatetime | None = None key: str | None = None @@ -733,6 +751,226 @@ class AnalyticsListResponse(BaseModel): """ +class PostDeleteResponse(BaseModel): + message: str | None = None + + +class ProfilesListResponse(BaseModel): + profiles: list[Profile] | None = None + + +class ProfileGetResponse(BaseModel): + profile: Profile | None = None + + +class ProfileCreateResponse(BaseModel): + message: str | None = None + profile: Profile | None = None + + +class ProfileUpdateResponse(BaseModel): + message: str | None = None + profile: Profile | None = None + + +class ProfileDeleteResponse(BaseModel): + message: str | None = None + + +class AccountsListResponse(BaseModel): + accounts: list[SocialAccount] | None = None + hasAnalyticsAccess: bool | None = None + """ + Whether user has analytics add-on access + """ + + +class AccountGetResponse(BaseModel): + account: SocialAccount | None = None + + +class DateRange(BaseModel): + from_: Annotated[AwareDatetime | None, Field(alias="from")] = None + to: AwareDatetime | None = None + + +class Aggregation(Enum): + DAILY = "daily" + WEEKLY = "weekly" + MONTHLY = "monthly" + + +class FollowerStatsResponse(BaseModel): + accounts: list[AccountWithFollowerStats] | None = None + dateRange: DateRange | None = None + aggregation: Aggregation | None = None + + +class Type2(Enum): + IMAGE = "image" + VIDEO = "video" + DOCUMENT = "document" + + +class UploadedFile(BaseModel): + type: Type2 | None = None + url: AnyUrl | None = None + filename: str | None = None + size: int | None = None + mimeType: str | None = None + + +class MediaUploadResponse(BaseModel): + files: list[UploadedFile] | None = None + + +class Status5(Enum): + PENDING = "pending" + COMPLETED = "completed" + EXPIRED = "expired" + + +class UploadTokenResponse(BaseModel): + token: str | None = None + uploadUrl: AnyUrl | None = None + expiresAt: AwareDatetime | None = None + status: Status5 | None = None + + +class UploadTokenStatusResponse(BaseModel): + token: str | None = None + status: Status5 | None = None + files: list[UploadedFile] | None = None + createdAt: AwareDatetime | None = None + expiresAt: AwareDatetime | None = None + completedAt: AwareDatetime | None = None + + +class QueueSlotsResponse(BaseModel): + exists: bool | None = None + schedule: QueueSchedule | None = None + nextSlots: list[AwareDatetime] | None = None + + +class QueueUpdateResponse(BaseModel): + success: bool | None = None + schedule: QueueSchedule | None = None + nextSlots: list[AwareDatetime] | None = None + reshuffledCount: int | None = None + + +class QueueDeleteResponse(BaseModel): + success: bool | None = None + deleted: bool | None = None + + +class QueuePreviewResponse(BaseModel): + profileId: str | None = None + count: int | None = None + slots: list[AwareDatetime] | None = None + + +class QueueNextSlotResponse(BaseModel): + profileId: str | None = None + nextSlot: AwareDatetime | None = None + timezone: str | None = None + + +class DownloadFormat(BaseModel): + formatId: str | None = None + ext: str | None = None + resolution: str | None = None + filesize: int | None = None + quality: str | None = None + + +class DownloadResponse(BaseModel): + url: AnyUrl | None = None + title: str | None = None + thumbnail: AnyUrl | None = None + duration: int | None = None + formats: list[DownloadFormat] | None = None + + +class TranscriptSegment(BaseModel): + text: str | None = None + start: float | None = None + duration: float | None = None + + +class TranscriptResponse(BaseModel): + transcript: str | None = None + segments: list[TranscriptSegment] | None = None + language: str | None = None + + +class Status7(Enum): + SAFE = "safe" + BANNED = "banned" + RESTRICTED = "restricted" + UNKNOWN = "unknown" + + +class HashtagInfo(BaseModel): + hashtag: str | None = None + status: Status7 | None = None + postCount: int | None = None + + +class HashtagCheckResponse(BaseModel): + hashtags: list[HashtagInfo] | None = None + + +class CaptionResponse(BaseModel): + caption: str | None = None + + +class User(BaseModel): + field_id: Annotated[str | None, Field(alias="_id")] = None + email: str | None = None + name: str | None = None + role: str | None = None + createdAt: AwareDatetime | None = None + + +class UsersListResponse(BaseModel): + users: list[User] | None = None + + +class UserGetResponse(BaseModel): + user: User | None = None + + +class TikTokPlatformData(BaseModel): + """ + TikTok platform-specific settings. Contains tiktokSettings for video/photo posting options. + + """ + + tiktokSettings: TikTokSettings | None = None + + +class VideoClipJob(BaseModel): + field_id: Annotated[ + str | None, Field(alias="_id", examples=["507f1f77bcf86cd799439011"]) + ] = None + jobId: Annotated[str | None, Field(examples=["abc123def456"])] = None + videoUrl: Annotated[ + AnyUrl | None, Field(examples=["https://storage.example.com/video.mp4"]) + ] = None + videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None + status: Annotated[Status1 | None, Field(examples=["completed"])] = None + clips: list[VideoClip] | None = None + totalClips: Annotated[int | None, Field(examples=[5])] = None + error: Annotated[str | None, Field(examples=[None])] = None + createdAt: Annotated[ + AwareDatetime | None, Field(examples=["2025-10-22T10:30:00Z"]) + ] = None + completedAt: Annotated[ + AwareDatetime | None, Field(examples=["2025-10-22T10:45:00Z"]) + ] = None + + class PlatformTarget(BaseModel): platform: Annotated[str | None, Field(examples=["twitter"])] = None """ @@ -754,12 +992,37 @@ class PlatformTarget(BaseModel): | PinterestPlatformData | YouTubePlatformData | GoogleBusinessPlatformData + | TikTokPlatformData | None ) = None """ Platform-specific overrides and options. """ status: Annotated[str | None, Field(examples=["pending"])] = None + """ + Platform-specific status: pending, publishing, published, failed + """ + platformPostId: Annotated[str | None, Field(examples=["1234567890123456789"])] = ( + None + ) + """ + The native post ID on the platform (populated after successful publish) + """ + platformPostUrl: Annotated[ + AnyUrl | None, + Field(examples=["https://twitter.com/acmecorp/status/1234567890123456789"]), + ] = None + """ + Public URL of the published post on the platform. + Populated after successful publish. For immediate posts (publishNow=true), + this is included in the response. For scheduled posts, fetch the post + via GET /v1/posts/{postId} after the scheduled time. + + """ + publishedAt: AwareDatetime | None = None + """ + Timestamp when the post was published to this platform + """ class Post(BaseModel): @@ -788,7 +1051,6 @@ class Post(BaseModel): mentions: list[str] | None = None visibility: Visibility | None = None metadata: dict[str, Any] | None = None - tiktokSettings: TikTokSettings | None = None queuedFromProfile: str | None = None """ Profile ID if the post was scheduled via the queue @@ -797,22 +1059,25 @@ class Post(BaseModel): updatedAt: AwareDatetime | None = None -class VideoClipJob(BaseModel): - field_id: Annotated[ - str | None, Field(alias="_id", examples=["507f1f77bcf86cd799439011"]) - ] = None - jobId: Annotated[str | None, Field(examples=["abc123def456"])] = None - videoUrl: Annotated[ - AnyUrl | None, Field(examples=["https://storage.example.com/video.mp4"]) - ] = None - videoFileName: Annotated[str | None, Field(examples=["my-video.mp4"])] = None - status: Annotated[Status1 | None, Field(examples=["completed"])] = None - clips: list[VideoClip] | None = None - totalClips: Annotated[int | None, Field(examples=[5])] = None - error: Annotated[str | None, Field(examples=[None])] = None - createdAt: Annotated[ - AwareDatetime | None, Field(examples=["2025-10-22T10:30:00Z"]) - ] = None - completedAt: Annotated[ - AwareDatetime | None, Field(examples=["2025-10-22T10:45:00Z"]) - ] = None +class PostsListResponse(BaseModel): + posts: list[Post] | None = None + pagination: Pagination | None = None + + +class PostGetResponse(BaseModel): + post: Post | None = None + + +class PostCreateResponse(BaseModel): + message: str | None = None + post: Post | None = None + + +class PostUpdateResponse(BaseModel): + message: str | None = None + post: Post | None = None + + +class PostRetryResponse(BaseModel): + message: str | None = None + post: Post | None = None diff --git a/src/late/models/responses.py b/src/late/models/responses.py new file mode 100644 index 0000000..f1ef4e2 --- /dev/null +++ b/src/late/models/responses.py @@ -0,0 +1,25 @@ +""" +SDK-specific response models. + +These models are NOT generated from OpenAPI and are specific to the SDK implementation. +For API response models, see the generated models in _generated/models.py. +""" + +from __future__ import annotations + +from pydantic import BaseModel + + +class MediaLargeUploadResponse(BaseModel): + """ + Response from media.upload_large() - Vercel Blob upload. + + This is SDK-specific and not from the API, as large file uploads + go directly to Vercel Blob storage. + """ + + url: str + pathname: str + contentType: str + size: int + downloadUrl: str diff --git a/src/late/pipelines/cross_poster.py b/src/late/pipelines/cross_poster.py index 355c29d..20861f9 100644 --- a/src/late/pipelines/cross_poster.py +++ b/src/late/pipelines/cross_poster.py @@ -8,6 +8,8 @@ from datetime import datetime, timedelta from typing import TYPE_CHECKING, Any +from late.enums import Platform + if TYPE_CHECKING: from ..client.late_client import Late @@ -16,7 +18,7 @@ class PlatformConfig: """Configuration for a single platform.""" - platform: str + platform: Platform | str account_id: str custom_content: str | None = None delay_minutes: int = 0 @@ -26,7 +28,7 @@ class PlatformConfig: class CrossPostResult: """Result of cross-posting to a single platform.""" - platform: str + platform: Platform | str success: bool post_id: str | None = None error: str | None = None @@ -42,27 +44,28 @@ class CrossPosterPipeline: - Platform-specific customizations Example: + >>> from late.enums import Platform >>> client = Late(api_key="...") >>> cross_poster = CrossPosterPipeline(client) >>> >>> results = await cross_poster.post( ... content="Exciting news! Our new feature is live.", ... platforms=[ - ... PlatformConfig("twitter", "tw_123"), - ... PlatformConfig("linkedin", "li_456"), - ... PlatformConfig("instagram", "ig_789"), + ... PlatformConfig(Platform.TWITTER, "tw_123"), + ... PlatformConfig(Platform.LINKEDIN, "li_456"), + ... PlatformConfig(Platform.INSTAGRAM, "ig_789"), ... ], ... ) """ - CHAR_LIMITS = { - "twitter": 280, - "threads": 500, - "linkedin": 3000, - "instagram": 2200, - "facebook": 63206, - "tiktok": 2200, - "bluesky": 300, + CHAR_LIMITS: dict[Platform | str, int] = { + Platform.TWITTER: 280, + Platform.THREADS: 500, + Platform.LINKEDIN: 3000, + Platform.INSTAGRAM: 2200, + Platform.FACEBOOK: 63206, + Platform.TIKTOK: 2200, + Platform.BLUESKY: 300, } def __init__( diff --git a/src/late/pipelines/csv_scheduler.py b/src/late/pipelines/csv_scheduler.py index 1201591..7023950 100644 --- a/src/late/pipelines/csv_scheduler.py +++ b/src/late/pipelines/csv_scheduler.py @@ -10,6 +10,8 @@ from pathlib import Path from typing import TYPE_CHECKING, Any +from late.enums import MediaType + if TYPE_CHECKING: from collections.abc import Iterator @@ -95,7 +97,10 @@ def _build_payload(self, row: dict[str, str]) -> dict[str, Any]: if row.get("media_url"): payload["mediaItems"] = [ - {"type": row.get("media_type", "image"), "url": row["media_url"]} + { + "type": row.get("media_type", MediaType.IMAGE), + "url": row["media_url"], + } ] if row.get("tags"): diff --git a/src/late/resources/accounts.py b/src/late/resources/accounts.py index 6441092..21ed908 100644 --- a/src/late/resources/accounts.py +++ b/src/late/resources/accounts.py @@ -4,12 +4,16 @@ from __future__ import annotations -from typing import Any +from late.models import ( + AccountGetResponse, + AccountsListResponse, + FollowerStatsResponse, +) from .base import BaseResource -class AccountsResource(BaseResource[Any]): +class AccountsResource(BaseResource[AccountsListResponse]): """ Resource for managing connected social media accounts. @@ -32,7 +36,7 @@ class AccountsResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self, *, profile_id: str | None = None) -> dict[str, Any]: + def list(self, *, profile_id: str | None = None) -> AccountsListResponse: """ List connected accounts. @@ -40,12 +44,13 @@ def list(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter accounts Returns: - Dict with 'accounts' and 'hasAnalyticsAccess' keys + AccountsListResponse with 'accounts' and 'hasAnalyticsAccess' attributes """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._BASE_PATH, params=params or None) + data = self._client._get(self._BASE_PATH, params=params or None) + return AccountsListResponse.model_validate(data) - def get(self, account_id: str) -> dict[str, Any]: + def get(self, account_id: str) -> AccountGetResponse: """ Get an account by ID. @@ -53,15 +58,16 @@ def get(self, account_id: str) -> dict[str, Any]: account_id: The account ID Returns: - Dict with 'account' key containing the SocialAccount object + AccountGetResponse with 'account' attribute """ - return self._client._get(self._path(account_id)) + data = self._client._get(self._path(account_id)) + return AccountGetResponse.model_validate(data) def get_follower_stats( self, *, account_ids: list[str] | None = None, - ) -> dict[str, Any]: + ) -> FollowerStatsResponse: """ Get follower statistics for accounts. @@ -71,33 +77,37 @@ def get_follower_stats( account_ids: Optional list of account IDs to filter Returns: - Dict with follower statistics + FollowerStatsResponse with 'stats' attribute """ params = None if account_ids: params = {"accountIds": ",".join(account_ids)} - return self._client._get(self._path("follower-stats"), params=params) + data = self._client._get(self._path("follower-stats"), params=params) + return FollowerStatsResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def alist(self, *, profile_id: str | None = None) -> AccountsListResponse: """List connected accounts asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._BASE_PATH, params=params or None) + data = await self._client._aget(self._BASE_PATH, params=params or None) + return AccountsListResponse.model_validate(data) - async def aget(self, account_id: str) -> dict[str, Any]: + async def aget(self, account_id: str) -> AccountGetResponse: """Get an account by ID asynchronously.""" - return await self._client._aget(self._path(account_id)) + data = await self._client._aget(self._path(account_id)) + return AccountGetResponse.model_validate(data) async def aget_follower_stats( self, *, account_ids: list[str] | None = None, - ) -> dict[str, Any]: + ) -> FollowerStatsResponse: """Get follower statistics asynchronously.""" params = None if account_ids: params = {"accountIds": ",".join(account_ids)} - return await self._client._aget(self._path("follower-stats"), params=params) + data = await self._client._aget(self._path("follower-stats"), params=params) + return FollowerStatsResponse.model_validate(data) diff --git a/src/late/resources/base.py b/src/late/resources/base.py index dd2f893..436bc7b 100644 --- a/src/late/resources/base.py +++ b/src/late/resources/base.py @@ -51,9 +51,7 @@ def _build_params(self, **kwargs: Any) -> dict[str, Any]: Returns: Dictionary with non-None values and camelCase keys """ - return { - _to_camel_case(k): v for k, v in kwargs.items() if v is not None - } + return {_to_camel_case(k): v for k, v in kwargs.items() if v is not None} def _build_payload(self, **kwargs: Any) -> dict[str, Any]: """ diff --git a/src/late/resources/media.py b/src/late/resources/media.py index a362f6e..d856aad 100644 --- a/src/late/resources/media.py +++ b/src/late/resources/media.py @@ -1,33 +1,51 @@ """ Media resource for uploading images and videos. + +Supports two upload methods: +- Direct upload: For small files (< 4MB) via API multipart +- Vercel Blob: For large files (up to 5GB) - requires Vercel token """ from __future__ import annotations import mimetypes from pathlib import Path -from typing import Any +from typing import TYPE_CHECKING + +from late.models import ( + MediaLargeUploadResponse, + MediaUploadResponse, + UploadTokenResponse, + UploadTokenStatusResponse, +) from .base import BaseResource +if TYPE_CHECKING: + from collections.abc import Callable + + from late.upload import UploadProgress + + +# Size limit for direct upload (4MB) +DIRECT_UPLOAD_MAX_SIZE = 4 * 1024 * 1024 + -class MediaResource(BaseResource[Any]): +class MediaResource(BaseResource[MediaUploadResponse]): """ Resource for uploading media files. - Supports uploading images, videos, and PDFs up to 5GB total. + Supports uploading images, videos, and PDFs. - Example: - >>> client = Late(api_key="...") - >>> # Upload single file + For small files (< 4MB): >>> result = client.media.upload("photo.jpg") >>> print(result["files"][0]["url"]) - >>> - >>> # Upload multiple files - >>> result = client.media.upload_multiple(["photo1.jpg", "video.mp4"]) - >>> - >>> # Upload from bytes - >>> result = client.media.upload_bytes(image_bytes, "image.png") + + For large files (4MB - 5GB), use upload_large with Vercel token: + >>> result = client.media.upload_large( + ... "large_video.mp4", + ... vercel_token="vercel_blob_rw_xxx" + ... ) """ _BASE_PATH = "/v1/media" @@ -37,37 +55,57 @@ def _get_mime_type(self, file_path: Path) -> str: mime_type, _ = mimetypes.guess_type(str(file_path)) return mime_type or "application/octet-stream" + def _check_file_size(self, file_path: Path) -> int: + """Get file size and validate for direct upload.""" + size = file_path.stat().st_size + if size > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + + raise LargeFileError(size, DIRECT_UPLOAD_MAX_SIZE) + return size + # ------------------------------------------------------------------------- - # Sync methods + # Direct upload (small files < 4MB) # ------------------------------------------------------------------------- - def upload(self, file_path: str | Path) -> dict[str, Any]: + def upload(self, file_path: str | Path) -> MediaUploadResponse: """ - Upload a single media file. + Upload a single media file (direct upload, max 4MB). + + For files larger than 4MB, use upload_large() with a Vercel token. Args: file_path: Path to the file to upload Returns: - Dict with 'files' array containing uploaded file info + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If file exceeds 4MB (use upload_large instead) """ path = Path(file_path) + self._check_file_size(path) + mime_type = self._get_mime_type(path) with path.open("rb") as f: - return self._client._post( + data = self._client._post( self._BASE_PATH, files={"files": (path.name, f, mime_type)}, ) + return MediaUploadResponse.model_validate(data) - def upload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: + def upload_multiple(self, file_paths: list[str | Path]) -> MediaUploadResponse: """ - Upload multiple media files at once. + Upload multiple media files at once (direct upload, each < 4MB). Args: file_paths: List of file paths to upload Returns: - Dict with 'files' array containing all uploaded files + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If any file exceeds 4MB """ files_list = [] file_handles = [] @@ -75,12 +113,14 @@ def upload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: try: for file_path in file_paths: path = Path(file_path) + self._check_file_size(path) mime_type = self._get_mime_type(path) f = path.open("rb") file_handles.append(f) files_list.append(("files", (path.name, f, mime_type))) - return self._client._post(self._BASE_PATH, files=files_list) + data = self._client._post(self._BASE_PATH, files=files_list) + return MediaUploadResponse.model_validate(data) finally: for f in file_handles: f.close() @@ -91,9 +131,9 @@ def upload_bytes( filename: str, *, mime_type: str | None = None, - ) -> dict[str, Any]: + ) -> MediaUploadResponse: """ - Upload media from bytes. + Upload media from bytes (direct upload, max 4MB). Args: content: File content as bytes @@ -101,22 +141,135 @@ def upload_bytes( mime_type: Optional MIME type (auto-detected if not provided) Returns: - Dict with 'files' array containing uploaded file info + MediaUploadResponse with 'files' attribute + + Raises: + LargeFileError: If content exceeds 4MB """ + if len(content) > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) + if mime_type is None: mime_type, _ = mimetypes.guess_type(filename) mime_type = mime_type or "application/octet-stream" - return self._client._post( + data = self._client._post( self._BASE_PATH, files={"files": (filename, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) + + # ------------------------------------------------------------------------- + # Large file upload (Vercel Blob - up to 5GB) + # ------------------------------------------------------------------------- + + def upload_large( + self, + file_path: str | Path, + *, + vercel_token: str, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """ + Upload a large file using Vercel Blob (up to 5GB). + + Requires a Vercel Blob read-write token. + + Args: + file_path: Path to the file to upload + vercel_token: Vercel Blob token (vercel_blob_rw_xxx) + Get one at: https://vercel.com/docs/storage/vercel-blob + on_progress: Optional callback for progress updates + + Returns: + MediaLargeUploadResponse with 'url', 'pathname', 'contentType', 'size' attributes + + Example: + >>> result = client.media.upload_large( + ... "video.mp4", + ... vercel_token="vercel_blob_rw_xxx", + ... on_progress=lambda p: print(f"{p.percentage:.1f}%") + ... ) + >>> print(result.url) + """ + from late.upload import UploadFile, VercelBlobUploader + + path = Path(file_path) + mime_type = self._get_mime_type(path) + + uploader = VercelBlobUploader(vercel_token) + result = uploader.upload( + UploadFile( + filename=path.name, + content=path, + mime_type=mime_type, + size=path.stat().st_size, + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + def upload_large_bytes( + self, + content: bytes, + filename: str, + *, + vercel_token: str, + mime_type: str | None = None, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """ + Upload large content from bytes using Vercel Blob. + + Args: + content: File content as bytes + filename: Name for the file + vercel_token: Vercel Blob token + mime_type: Optional MIME type + on_progress: Optional progress callback + + Returns: + MediaLargeUploadResponse with 'url', 'pathname', 'contentType', 'size' attributes + """ + from late.upload import UploadFile, VercelBlobUploader + + if mime_type is None: + mime_type, _ = mimetypes.guess_type(filename) + mime_type = mime_type or "application/octet-stream" + + uploader = VercelBlobUploader(vercel_token) + result = uploader.upload( + UploadFile( + filename=filename, + content=content, + mime_type=mime_type, + size=len(content), + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) # ------------------------------------------------------------------------- # Upload Token Flow (for Claude Desktop / MCP) # ------------------------------------------------------------------------- - def generate_upload_token(self) -> dict[str, Any]: + def generate_upload_token(self) -> UploadTokenResponse: """ Generate an upload token for browser-based file uploads. @@ -127,11 +280,12 @@ def generate_upload_token(self) -> dict[str, Any]: 3. Call check_upload_token() to get the uploaded file URLs Returns: - Dict with 'token', 'uploadUrl', 'expiresAt', 'status' + UploadTokenResponse with 'token', 'uploadUrl', 'expiresAt', 'status' attributes """ - return self._client._post(self._path("upload-token")) + data = self._client._post(self._path("upload-token")) + return UploadTokenResponse.model_validate(data) - def check_upload_token(self, token: str) -> dict[str, Any]: + def check_upload_token(self, token: str) -> UploadTokenStatusResponse: """ Check the status of an upload token and get uploaded file URLs. @@ -139,36 +293,44 @@ def check_upload_token(self, token: str) -> dict[str, Any]: token: The upload token from generate_upload_token() Returns: - Dict with 'token', 'status', 'files', 'createdAt', 'expiresAt', 'completedAt' + UploadTokenStatusResponse with 'token', 'status', 'files', 'createdAt', 'expiresAt', 'completedAt' attributes """ - return self._client._get(self._path("upload-token"), params={"token": token}) + data = self._client._get(self._path("upload-token"), params={"token": token}) + return UploadTokenStatusResponse.model_validate(data) # ------------------------------------------------------------------------- - # Async methods + # Async methods - Direct upload # ------------------------------------------------------------------------- - async def aupload(self, file_path: str | Path) -> dict[str, Any]: - """Upload a single media file asynchronously.""" + async def aupload(self, file_path: str | Path) -> MediaUploadResponse: + """Upload a single media file asynchronously (max 4MB).""" path = Path(file_path) + self._check_file_size(path) + mime_type = self._get_mime_type(path) with path.open("rb") as f: content = f.read() - return await self._client._apost( + data = await self._client._apost( self._BASE_PATH, files={"files": (path.name, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) - async def aupload_multiple(self, file_paths: list[str | Path]) -> dict[str, Any]: - """Upload multiple media files asynchronously.""" + async def aupload_multiple( + self, file_paths: list[str | Path] + ) -> MediaUploadResponse: + """Upload multiple media files asynchronously (each < 4MB).""" files_list = [] for file_path in file_paths: path = Path(file_path) + self._check_file_size(path) mime_type = self._get_mime_type(path) with path.open("rb") as f: content = f.read() files_list.append(("files", (path.name, content, mime_type))) - return await self._client._apost(self._BASE_PATH, files=files_list) + data = await self._client._apost(self._BASE_PATH, files=files_list) + return MediaUploadResponse.model_validate(data) async def aupload_bytes( self, @@ -176,21 +338,106 @@ async def aupload_bytes( filename: str, *, mime_type: str | None = None, - ) -> dict[str, Any]: - """Upload media from bytes asynchronously.""" + ) -> MediaUploadResponse: + """Upload media from bytes asynchronously (max 4MB).""" + if len(content) > DIRECT_UPLOAD_MAX_SIZE: + from late.upload import LargeFileError + + raise LargeFileError(len(content), DIRECT_UPLOAD_MAX_SIZE) + if mime_type is None: mime_type, _ = mimetypes.guess_type(filename) mime_type = mime_type or "application/octet-stream" - return await self._client._apost( + data = await self._client._apost( self._BASE_PATH, files={"files": (filename, content, mime_type)}, ) + return MediaUploadResponse.model_validate(data) + + # ------------------------------------------------------------------------- + # Async methods - Large file upload + # ------------------------------------------------------------------------- + + async def aupload_large( + self, + file_path: str | Path, + *, + vercel_token: str, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """Upload a large file asynchronously using Vercel Blob.""" + from late.upload import UploadFile, VercelBlobUploader + + path = Path(file_path) + mime_type = self._get_mime_type(path) + + uploader = VercelBlobUploader(vercel_token) + result = await uploader.aupload( + UploadFile( + filename=path.name, + content=path, + mime_type=mime_type, + size=path.stat().st_size, + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + async def aupload_large_bytes( + self, + content: bytes, + filename: str, + *, + vercel_token: str, + mime_type: str | None = None, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> MediaLargeUploadResponse: + """Upload large content from bytes asynchronously.""" + from late.upload import UploadFile, VercelBlobUploader - async def agenerate_upload_token(self) -> dict[str, Any]: + if mime_type is None: + mime_type, _ = mimetypes.guess_type(filename) + mime_type = mime_type or "application/octet-stream" + + uploader = VercelBlobUploader(vercel_token) + result = await uploader.aupload( + UploadFile( + filename=filename, + content=content, + mime_type=mime_type, + size=len(content), + ), + on_progress=on_progress, + ) + + return MediaLargeUploadResponse( + url=result.url, + pathname=result.pathname, + contentType=result.content_type, + size=result.size, + downloadUrl=result.download_url, + ) + + # ------------------------------------------------------------------------- + # Async methods - Upload Token Flow + # ------------------------------------------------------------------------- + + async def agenerate_upload_token(self) -> UploadTokenResponse: """Generate an upload token asynchronously.""" - return await self._client._apost(self._path("upload-token")) + data = await self._client._apost(self._path("upload-token")) + return UploadTokenResponse.model_validate(data) - async def acheck_upload_token(self, token: str) -> dict[str, Any]: + async def acheck_upload_token(self, token: str) -> UploadTokenStatusResponse: """Check the status of an upload token asynchronously.""" - return await self._client._aget(self._path("upload-token"), params={"token": token}) + data = await self._client._aget( + self._path("upload-token"), params={"token": token} + ) + return UploadTokenStatusResponse.model_validate(data) diff --git a/src/late/resources/posts.py b/src/late/resources/posts.py index 6fa50aa..31a412c 100644 --- a/src/late/resources/posts.py +++ b/src/late/resources/posts.py @@ -5,42 +5,38 @@ from __future__ import annotations from pathlib import Path -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any + +from late.models import ( + PostCreateResponse, + PostDeleteResponse, + PostGetResponse, + PostRetryResponse, + PostsListResponse, + PostUpdateResponse, +) from .base import BaseResource if TYPE_CHECKING: from datetime import datetime -# Type aliases for better readability -Platform = Literal[ - "twitter", - "instagram", - "facebook", - "linkedin", - "tiktok", - "youtube", - "pinterest", - "reddit", - "bluesky", - "threads", - "googlebusiness", -] -PostStatus = Literal["draft", "scheduled", "publishing", "published", "failed", "partial"] - - -class PostsResource(BaseResource[Any]): + from late.enums import Platform, PostStatus + + +class PostsResource(BaseResource[PostsListResponse]): """ Resource for managing posts. Example: + >>> from late import Platform, PostStatus >>> client = Late(api_key="...") >>> # List posts - >>> posts = client.posts.list(status="scheduled") + >>> posts = client.posts.list(status=PostStatus.SCHEDULED) >>> # Create a post >>> post = client.posts.create( ... content="Hello!", - ... platforms=[{"platform": "twitter", "accountId": "..."}], + ... platforms=[{"platform": Platform.TWITTER, "accountId": "..."}], ... scheduled_for=datetime.now() + timedelta(hours=1), ... ) >>> # Update a post @@ -67,7 +63,7 @@ def list( date_from: str | None = None, date_to: str | None = None, include_hidden: bool | None = None, - ) -> dict[str, Any]: + ) -> PostsListResponse: """ List posts with optional filters. @@ -83,7 +79,7 @@ def list( include_hidden: Include hidden posts (default: False) Returns: - Dict with 'posts' and 'pagination' keys + PostsListResponse with 'posts' and 'pagination' attributes """ params = self._build_params( page=page, @@ -96,9 +92,10 @@ def list( date_to=date_to, include_hidden=include_hidden, ) - return self._client._get(self._BASE_PATH, params=params) + data = self._client._get(self._BASE_PATH, params=params) + return PostsListResponse.model_validate(data) - def get(self, post_id: str) -> dict[str, Any]: + def get(self, post_id: str) -> PostGetResponse: """ Get a single post by ID. @@ -106,9 +103,10 @@ def get(self, post_id: str) -> dict[str, Any]: post_id: The post ID Returns: - Dict with 'post' key containing the Post object + PostGetResponse with 'post' attribute """ - return self._client._get(self._path(post_id)) + data = self._client._get(self._path(post_id)) + return PostGetResponse.model_validate(data) def create( self, @@ -128,7 +126,7 @@ def create( metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, queued_from_profile: str | None = None, - ) -> dict[str, Any]: + ) -> PostCreateResponse: """ Create a new post. @@ -152,7 +150,7 @@ def create( queued_from_profile: Profile ID if creating via queue Returns: - Dict with 'message' and 'post' keys + PostCreateResponse with 'message' and 'post' attributes """ payload = self._build_payload( content=content, @@ -166,12 +164,15 @@ def create( tags=tags, hashtags=hashtags, mentions=mentions, - crossposting_enabled=crossposting_enabled if not crossposting_enabled else None, + crossposting_enabled=crossposting_enabled + if not crossposting_enabled + else None, metadata=metadata, tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, ) - return self._client._post(self._BASE_PATH, data=payload) + data = self._client._post(self._BASE_PATH, data=payload) + return PostCreateResponse.model_validate(data) def update( self, @@ -188,7 +189,7 @@ def update( mentions: list[str] | None = None, metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, - ) -> dict[str, Any]: + ) -> PostUpdateResponse: """ Update an existing post. @@ -210,7 +211,7 @@ def update( tiktok_settings: New TikTok settings Returns: - Dict with 'message' and 'post' keys + PostUpdateResponse with 'message' and 'post' attributes """ payload = self._build_payload( content=content, @@ -225,9 +226,10 @@ def update( metadata=metadata, tiktok_settings=tiktok_settings, ) - return self._client._put(self._path(post_id), data=payload) + data = self._client._put(self._path(post_id), data=payload) + return PostUpdateResponse.model_validate(data) - def delete(self, post_id: str) -> dict[str, Any]: + def delete(self, post_id: str) -> PostDeleteResponse: """ Delete a post. @@ -239,11 +241,12 @@ def delete(self, post_id: str) -> dict[str, Any]: post_id: ID of the post to delete Returns: - Dict with 'message' key + PostDeleteResponse with 'message' attribute """ - return self._client._delete(self._path(post_id)) + data = self._client._delete(self._path(post_id)) + return PostDeleteResponse.model_validate(data) - def retry(self, post_id: str) -> dict[str, Any]: + def retry(self, post_id: str) -> PostRetryResponse: """ Retry a failed post. @@ -251,9 +254,10 @@ def retry(self, post_id: str) -> dict[str, Any]: post_id: ID of the failed post Returns: - Dict with 'message' and 'post' keys + PostRetryResponse with 'message' attribute """ - return self._client._post(self._path(post_id, "retry")) + data = self._client._post(self._path(post_id, "retry")) + return PostRetryResponse.model_validate(data) def bulk_upload( self, @@ -296,7 +300,7 @@ async def alist( date_from: str | None = None, date_to: str | None = None, include_hidden: bool | None = None, - ) -> dict[str, Any]: + ) -> PostsListResponse: """List posts asynchronously.""" params = self._build_params( page=page, @@ -309,11 +313,13 @@ async def alist( date_to=date_to, include_hidden=include_hidden, ) - return await self._client._aget(self._BASE_PATH, params=params) + data = await self._client._aget(self._BASE_PATH, params=params) + return PostsListResponse.model_validate(data) - async def aget(self, post_id: str) -> dict[str, Any]: + async def aget(self, post_id: str) -> PostGetResponse: """Get a post asynchronously.""" - return await self._client._aget(self._path(post_id)) + data = await self._client._aget(self._path(post_id)) + return PostGetResponse.model_validate(data) async def acreate( self, @@ -333,7 +339,7 @@ async def acreate( metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, queued_from_profile: str | None = None, - ) -> dict[str, Any]: + ) -> PostCreateResponse: """Create a post asynchronously.""" payload = self._build_payload( content=content, @@ -347,12 +353,15 @@ async def acreate( tags=tags, hashtags=hashtags, mentions=mentions, - crossposting_enabled=crossposting_enabled if not crossposting_enabled else None, + crossposting_enabled=crossposting_enabled + if not crossposting_enabled + else None, metadata=metadata, tiktok_settings=tiktok_settings, queued_from_profile=queued_from_profile, ) - return await self._client._apost(self._BASE_PATH, data=payload) + data = await self._client._apost(self._BASE_PATH, data=payload) + return PostCreateResponse.model_validate(data) async def aupdate( self, @@ -369,7 +378,7 @@ async def aupdate( mentions: list[str] | None = None, metadata: dict[str, Any] | None = None, tiktok_settings: dict[str, Any] | None = None, - ) -> dict[str, Any]: + ) -> PostUpdateResponse: """Update a post asynchronously.""" payload = self._build_payload( content=content, @@ -384,12 +393,15 @@ async def aupdate( metadata=metadata, tiktok_settings=tiktok_settings, ) - return await self._client._aput(self._path(post_id), data=payload) + data = await self._client._aput(self._path(post_id), data=payload) + return PostUpdateResponse.model_validate(data) - async def adelete(self, post_id: str) -> dict[str, Any]: + async def adelete(self, post_id: str) -> PostDeleteResponse: """Delete a post asynchronously.""" - return await self._client._adelete(self._path(post_id)) + data = await self._client._adelete(self._path(post_id)) + return PostDeleteResponse.model_validate(data) - async def aretry(self, post_id: str) -> dict[str, Any]: + async def aretry(self, post_id: str) -> PostRetryResponse: """Retry a failed post asynchronously.""" - return await self._client._apost(self._path(post_id, "retry")) + data = await self._client._apost(self._path(post_id, "retry")) + return PostRetryResponse.model_validate(data) diff --git a/src/late/resources/profiles.py b/src/late/resources/profiles.py index aae5e7a..c7e04e3 100644 --- a/src/late/resources/profiles.py +++ b/src/late/resources/profiles.py @@ -4,12 +4,18 @@ from __future__ import annotations -from typing import Any +from late.models import ( + ProfileCreateResponse, + ProfileDeleteResponse, + ProfileGetResponse, + ProfilesListResponse, + ProfileUpdateResponse, +) from .base import BaseResource -class ProfilesResource(BaseResource[Any]): +class ProfilesResource(BaseResource[ProfilesListResponse]): """ Resource for managing profiles. @@ -34,16 +40,17 @@ class ProfilesResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self) -> dict[str, Any]: + def list(self) -> ProfilesListResponse: """ List all profiles. Returns: - Dict with 'profiles' key containing list of Profile objects + ProfilesListResponse with 'profiles' attribute """ - return self._client._get(self._BASE_PATH) + data = self._client._get(self._BASE_PATH) + return ProfilesListResponse.model_validate(data) - def get(self, profile_id: str) -> dict[str, Any]: + def get(self, profile_id: str) -> ProfileGetResponse: """ Get a profile by ID. @@ -51,9 +58,10 @@ def get(self, profile_id: str) -> dict[str, Any]: profile_id: The profile ID Returns: - Dict with 'profile' key containing the Profile object + ProfileGetResponse with 'profile' attribute """ - return self._client._get(self._path(profile_id)) + data = self._client._get(self._path(profile_id)) + return ProfileGetResponse.model_validate(data) def create( self, @@ -61,7 +69,7 @@ def create( name: str, description: str | None = None, color: str | None = None, - ) -> dict[str, Any]: + ) -> ProfileCreateResponse: """ Create a new profile. @@ -71,14 +79,15 @@ def create( color: Optional hex color (e.g., '#ffeda0') Returns: - Dict with 'message' and 'profile' keys + ProfileCreateResponse with 'message' and 'profile' attributes """ payload = self._build_payload( name=name, description=description, color=color, ) - return self._client._post(self._BASE_PATH, data=payload) + data = self._client._post(self._BASE_PATH, data=payload) + return ProfileCreateResponse.model_validate(data) def update( self, @@ -88,7 +97,7 @@ def update( description: str | None = None, color: str | None = None, is_default: bool | None = None, - ) -> dict[str, Any]: + ) -> ProfileUpdateResponse: """ Update a profile. @@ -100,7 +109,7 @@ def update( is_default: Set as default profile Returns: - Dict with 'message' and 'profile' keys + ProfileUpdateResponse with 'message' and 'profile' attributes """ payload = self._build_payload( name=name, @@ -108,9 +117,10 @@ def update( color=color, is_default=is_default, ) - return self._client._put(self._path(profile_id), data=payload) + data = self._client._put(self._path(profile_id), data=payload) + return ProfileUpdateResponse.model_validate(data) - def delete(self, profile_id: str) -> dict[str, Any]: + def delete(self, profile_id: str) -> ProfileDeleteResponse: """ Delete a profile. @@ -120,21 +130,24 @@ def delete(self, profile_id: str) -> dict[str, Any]: profile_id: ID of the profile to delete Returns: - Dict with 'message' key + ProfileDeleteResponse with 'message' attribute """ - return self._client._delete(self._path(profile_id)) + data = self._client._delete(self._path(profile_id)) + return ProfileDeleteResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self) -> dict[str, Any]: + async def alist(self) -> ProfilesListResponse: """List all profiles asynchronously.""" - return await self._client._aget(self._BASE_PATH) + data = await self._client._aget(self._BASE_PATH) + return ProfilesListResponse.model_validate(data) - async def aget(self, profile_id: str) -> dict[str, Any]: + async def aget(self, profile_id: str) -> ProfileGetResponse: """Get a profile by ID asynchronously.""" - return await self._client._aget(self._path(profile_id)) + data = await self._client._aget(self._path(profile_id)) + return ProfileGetResponse.model_validate(data) async def acreate( self, @@ -142,14 +155,15 @@ async def acreate( name: str, description: str | None = None, color: str | None = None, - ) -> dict[str, Any]: + ) -> ProfileCreateResponse: """Create a new profile asynchronously.""" payload = self._build_payload( name=name, description=description, color=color, ) - return await self._client._apost(self._BASE_PATH, data=payload) + data = await self._client._apost(self._BASE_PATH, data=payload) + return ProfileCreateResponse.model_validate(data) async def aupdate( self, @@ -159,7 +173,7 @@ async def aupdate( description: str | None = None, color: str | None = None, is_default: bool | None = None, - ) -> dict[str, Any]: + ) -> ProfileUpdateResponse: """Update a profile asynchronously.""" payload = self._build_payload( name=name, @@ -167,8 +181,10 @@ async def aupdate( color=color, is_default=is_default, ) - return await self._client._aput(self._path(profile_id), data=payload) + data = await self._client._aput(self._path(profile_id), data=payload) + return ProfileUpdateResponse.model_validate(data) - async def adelete(self, profile_id: str) -> dict[str, Any]: + async def adelete(self, profile_id: str) -> ProfileDeleteResponse: """Delete a profile asynchronously.""" - return await self._client._adelete(self._path(profile_id)) + data = await self._client._adelete(self._path(profile_id)) + return ProfileDeleteResponse.model_validate(data) diff --git a/src/late/resources/queue.py b/src/late/resources/queue.py index 15e7872..3e19def 100644 --- a/src/late/resources/queue.py +++ b/src/late/resources/queue.py @@ -4,14 +4,20 @@ from __future__ import annotations -from typing import Any, Literal +from typing import Any -from .base import BaseResource +from late.models import ( + QueueDeleteResponse, + QueueNextSlotResponse, + QueuePreviewResponse, + QueueSlotsResponse, + QueueUpdateResponse, +) -DayOfWeek = Literal[0, 1, 2, 3, 4, 5, 6] # 0=Sunday, 6=Saturday +from .base import BaseResource -class QueueResource(BaseResource[Any]): +class QueueResource(BaseResource[QueueSlotsResponse]): """ Resource for managing the posting queue. @@ -39,7 +45,7 @@ class QueueResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def get_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: + def get_slots(self, *, profile_id: str | None = None) -> QueueSlotsResponse: """ Get queue slots for a profile. @@ -47,10 +53,11 @@ def get_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with queue schedule information + QueueSlotsResponse with queue schedule information """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("slots"), params=params or None) + data = self._client._get(self._path("slots"), params=params or None) + return QueueSlotsResponse.model_validate(data) def update_slots( self, @@ -59,7 +66,7 @@ def update_slots( timezone: str, slots: list[dict[str, Any]], active: bool = True, - ) -> dict[str, Any]: + ) -> QueueUpdateResponse: """ Update queue slots for a profile. @@ -70,7 +77,7 @@ def update_slots( active: Whether the queue is active Returns: - Dict with updated queue schedule + QueueUpdateResponse with updated queue schedule """ payload = self._build_payload( profile_id=profile_id, @@ -78,9 +85,10 @@ def update_slots( slots=slots, active=active, ) - return self._client._put(self._path("slots"), data=payload) + data = self._client._put(self._path("slots"), data=payload) + return QueueUpdateResponse.model_validate(data) - def delete_slots(self, *, profile_id: str) -> dict[str, Any]: + def delete_slots(self, *, profile_id: str) -> QueueDeleteResponse: """ Delete all queue slots for a profile. @@ -88,12 +96,13 @@ def delete_slots(self, *, profile_id: str) -> dict[str, Any]: profile_id: Profile ID to clear slots for Returns: - Dict with 'message' key + QueueDeleteResponse with 'message' attribute """ params = self._build_params(profile_id=profile_id) - return self._client._delete(self._path("slots"), params=params) + data = self._client._delete(self._path("slots"), params=params) + return QueueDeleteResponse.model_validate(data) - def preview(self, *, profile_id: str | None = None) -> dict[str, Any]: + def preview(self, *, profile_id: str | None = None) -> QueuePreviewResponse: """ Preview the next scheduled slot times. @@ -101,12 +110,13 @@ def preview(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with preview of next scheduled times + QueuePreviewResponse with preview of next scheduled times """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("preview"), params=params or None) + data = self._client._get(self._path("preview"), params=params or None) + return QueuePreviewResponse.model_validate(data) - def next_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: + def next_slot(self, *, profile_id: str | None = None) -> QueueNextSlotResponse: """ Get the next available queue slot. @@ -114,19 +124,21 @@ def next_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: profile_id: Optional profile ID to filter Returns: - Dict with next available slot information + QueueNextSlotResponse with next available slot information """ params = self._build_params(profile_id=profile_id) - return self._client._get(self._path("next-slot"), params=params or None) + data = self._client._get(self._path("next-slot"), params=params or None) + return QueueNextSlotResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def aget_slots(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def aget_slots(self, *, profile_id: str | None = None) -> QueueSlotsResponse: """Get queue slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("slots"), params=params or None) + data = await self._client._aget(self._path("slots"), params=params or None) + return QueueSlotsResponse.model_validate(data) async def aupdate_slots( self, @@ -135,7 +147,7 @@ async def aupdate_slots( timezone: str, slots: list[dict[str, Any]], active: bool = True, - ) -> dict[str, Any]: + ) -> QueueUpdateResponse: """Update queue slots asynchronously.""" payload = self._build_payload( profile_id=profile_id, @@ -143,19 +155,25 @@ async def aupdate_slots( slots=slots, active=active, ) - return await self._client._aput(self._path("slots"), data=payload) + data = await self._client._aput(self._path("slots"), data=payload) + return QueueUpdateResponse.model_validate(data) - async def adelete_slots(self, *, profile_id: str) -> dict[str, Any]: + async def adelete_slots(self, *, profile_id: str) -> QueueDeleteResponse: """Delete queue slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._adelete(self._path("slots"), params=params) + data = await self._client._adelete(self._path("slots"), params=params) + return QueueDeleteResponse.model_validate(data) - async def apreview(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def apreview(self, *, profile_id: str | None = None) -> QueuePreviewResponse: """Preview next scheduled slots asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("preview"), params=params or None) + data = await self._client._aget(self._path("preview"), params=params or None) + return QueuePreviewResponse.model_validate(data) - async def anext_slot(self, *, profile_id: str | None = None) -> dict[str, Any]: + async def anext_slot( + self, *, profile_id: str | None = None + ) -> QueueNextSlotResponse: """Get next available slot asynchronously.""" params = self._build_params(profile_id=profile_id) - return await self._client._aget(self._path("next-slot"), params=params or None) + data = await self._client._aget(self._path("next-slot"), params=params or None) + return QueueNextSlotResponse.model_validate(data) diff --git a/src/late/resources/tools.py b/src/late/resources/tools.py index 165ded3..cdf1376 100644 --- a/src/late/resources/tools.py +++ b/src/late/resources/tools.py @@ -4,14 +4,22 @@ from __future__ import annotations -from typing import Any, Literal +from typing import TYPE_CHECKING + +from late.models import ( + CaptionResponse, + DownloadResponse, + HashtagCheckResponse, + TranscriptResponse, +) from .base import BaseResource -Tone = Literal["professional", "casual", "humorous", "inspirational", "informative"] +if TYPE_CHECKING: + from late.enums import CaptionTone -class ToolsResource(BaseResource[Any]): +class ToolsResource(BaseResource[DownloadResponse]): """ Resource for media download and utility tools. @@ -21,13 +29,14 @@ class ToolsResource(BaseResource[Any]): - Unlimited: unlimited Example: + >>> from late import CaptionTone >>> client = Late(api_key="...") >>> # Download YouTube video >>> result = client.tools.youtube_download("https://youtube.com/watch?v=...") >>> # Generate AI caption >>> caption = client.tools.generate_caption( ... image_url="https://example.com/image.jpg", - ... tone="professional", + ... tone=CaptionTone.PROFESSIONAL, ... ) """ @@ -42,7 +51,7 @@ def youtube_download( url: str, *, format_id: str | None = None, - ) -> dict[str, Any]: + ) -> DownloadResponse: """ Download YouTube video or audio. @@ -51,17 +60,18 @@ def youtube_download( format_id: Optional format ID for specific quality Returns: - Dict with download information + DownloadResponse with download information """ params = self._build_params(url=url, format_id=format_id) - return self._client._get(self._path("youtube", "download"), params=params) + data = self._client._get(self._path("youtube", "download"), params=params) + return DownloadResponse.model_validate(data) def youtube_transcript( self, url: str, *, lang: str | None = None, - ) -> dict[str, Any]: + ) -> TranscriptResponse: """ Get YouTube video transcript. @@ -70,16 +80,17 @@ def youtube_transcript( lang: Optional language code for transcript Returns: - Dict with transcript data + TranscriptResponse with transcript data """ params = self._build_params(url=url, lang=lang) - return self._client._get(self._path("youtube", "transcript"), params=params) + data = self._client._get(self._path("youtube", "transcript"), params=params) + return TranscriptResponse.model_validate(data) # ------------------------------------------------------------------------- # Instagram # ------------------------------------------------------------------------- - def instagram_download(self, url: str) -> dict[str, Any]: + def instagram_download(self, url: str) -> DownloadResponse: """ Download Instagram reel or post. @@ -87,11 +98,14 @@ def instagram_download(self, url: str) -> dict[str, Any]: url: Instagram post/reel URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("instagram", "download"), params={"url": url}) + data = self._client._get( + self._path("instagram", "download"), params={"url": url} + ) + return DownloadResponse.model_validate(data) - def instagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: + def instagram_hashtag_check(self, hashtags: list[str]) -> HashtagCheckResponse: """ Check Instagram hashtags for bans. @@ -99,12 +113,13 @@ def instagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: hashtags: List of hashtags to check Returns: - Dict with hashtag status information + HashtagCheckResponse with hashtag status information """ - return self._client._post( + data = self._client._post( self._path("instagram", "hashtag-checker"), data={"hashtags": hashtags}, ) + return HashtagCheckResponse.model_validate(data) # ------------------------------------------------------------------------- # TikTok @@ -115,7 +130,7 @@ def tiktok_download( url: str, *, no_watermark: bool = True, - ) -> dict[str, Any]: + ) -> DownloadResponse: """ Download TikTok video. @@ -124,16 +139,17 @@ def tiktok_download( no_watermark: If True, download without watermark Returns: - Dict with download information + DownloadResponse with download information """ params = {"url": url, "noWatermark": str(no_watermark).lower()} - return self._client._get(self._path("tiktok", "download"), params=params) + data = self._client._get(self._path("tiktok", "download"), params=params) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Twitter # ------------------------------------------------------------------------- - def twitter_download(self, url: str) -> dict[str, Any]: + def twitter_download(self, url: str) -> DownloadResponse: """ Download Twitter/X video. @@ -141,15 +157,16 @@ def twitter_download(self, url: str) -> dict[str, Any]: url: Twitter/X video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("twitter", "download"), params={"url": url}) + data = self._client._get(self._path("twitter", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Facebook # ------------------------------------------------------------------------- - def facebook_download(self, url: str) -> dict[str, Any]: + def facebook_download(self, url: str) -> DownloadResponse: """ Download Facebook video. @@ -157,15 +174,18 @@ def facebook_download(self, url: str) -> dict[str, Any]: url: Facebook video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("facebook", "download"), params={"url": url}) + data = self._client._get( + self._path("facebook", "download"), params={"url": url} + ) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # LinkedIn # ------------------------------------------------------------------------- - def linkedin_download(self, url: str) -> dict[str, Any]: + def linkedin_download(self, url: str) -> DownloadResponse: """ Download LinkedIn video. @@ -173,15 +193,18 @@ def linkedin_download(self, url: str) -> dict[str, Any]: url: LinkedIn video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("linkedin", "download"), params={"url": url}) + data = self._client._get( + self._path("linkedin", "download"), params={"url": url} + ) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # Bluesky # ------------------------------------------------------------------------- - def bluesky_download(self, url: str) -> dict[str, Any]: + def bluesky_download(self, url: str) -> DownloadResponse: """ Download Bluesky video. @@ -189,9 +212,10 @@ def bluesky_download(self, url: str) -> dict[str, Any]: url: Bluesky video URL Returns: - Dict with download information + DownloadResponse with download information """ - return self._client._get(self._path("bluesky", "download"), params={"url": url}) + data = self._client._get(self._path("bluesky", "download"), params={"url": url}) + return DownloadResponse.model_validate(data) # ------------------------------------------------------------------------- # AI Caption Generator @@ -202,8 +226,8 @@ def generate_caption( image_url: str, *, prompt: str | None = None, - tone: Tone | None = None, - ) -> dict[str, Any]: + tone: CaptionTone | str | None = None, + ) -> CaptionResponse: """ Generate AI captions for an image. @@ -213,14 +237,15 @@ def generate_caption( tone: Optional tone (professional, casual, humorous, etc.) Returns: - Dict with generated caption(s) + CaptionResponse with generated caption """ payload = self._build_payload( image_url=image_url, prompt=prompt, tone=tone, ) - return self._client._post(self._path("caption-generator"), data=payload) + data = self._client._post(self._path("caption-generator"), data=payload) + return CaptionResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods @@ -231,79 +256,95 @@ async def ayoutube_download( url: str, *, format_id: str | None = None, - ) -> dict[str, Any]: + ) -> DownloadResponse: """Download YouTube video asynchronously.""" params = self._build_params(url=url, format_id=format_id) - return await self._client._aget(self._path("youtube", "download"), params=params) + data = await self._client._aget( + self._path("youtube", "download"), params=params + ) + return DownloadResponse.model_validate(data) async def ayoutube_transcript( self, url: str, *, lang: str | None = None, - ) -> dict[str, Any]: + ) -> TranscriptResponse: """Get YouTube transcript asynchronously.""" params = self._build_params(url=url, lang=lang) - return await self._client._aget(self._path("youtube", "transcript"), params=params) + data = await self._client._aget( + self._path("youtube", "transcript"), params=params + ) + return TranscriptResponse.model_validate(data) - async def ainstagram_download(self, url: str) -> dict[str, Any]: + async def ainstagram_download(self, url: str) -> DownloadResponse: """Download Instagram content asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("instagram", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def ainstagram_hashtag_check(self, hashtags: list[str]) -> dict[str, Any]: + async def ainstagram_hashtag_check( + self, hashtags: list[str] + ) -> HashtagCheckResponse: """Check Instagram hashtags asynchronously.""" - return await self._client._apost( + data = await self._client._apost( self._path("instagram", "hashtag-checker"), data={"hashtags": hashtags}, ) + return HashtagCheckResponse.model_validate(data) async def atiktok_download( self, url: str, *, no_watermark: bool = True, - ) -> dict[str, Any]: + ) -> DownloadResponse: """Download TikTok video asynchronously.""" params = {"url": url, "noWatermark": str(no_watermark).lower()} - return await self._client._aget(self._path("tiktok", "download"), params=params) + data = await self._client._aget(self._path("tiktok", "download"), params=params) + return DownloadResponse.model_validate(data) - async def atwitter_download(self, url: str) -> dict[str, Any]: + async def atwitter_download(self, url: str) -> DownloadResponse: """Download Twitter video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("twitter", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def afacebook_download(self, url: str) -> dict[str, Any]: + async def afacebook_download(self, url: str) -> DownloadResponse: """Download Facebook video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("facebook", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def alinkedin_download(self, url: str) -> dict[str, Any]: + async def alinkedin_download(self, url: str) -> DownloadResponse: """Download LinkedIn video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("linkedin", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) - async def abluesky_download(self, url: str) -> dict[str, Any]: + async def abluesky_download(self, url: str) -> DownloadResponse: """Download Bluesky video asynchronously.""" - return await self._client._aget( + data = await self._client._aget( self._path("bluesky", "download"), params={"url": url} ) + return DownloadResponse.model_validate(data) async def agenerate_caption( self, image_url: str, *, prompt: str | None = None, - tone: Tone | None = None, - ) -> dict[str, Any]: + tone: CaptionTone | str | None = None, + ) -> CaptionResponse: """Generate AI caption asynchronously.""" payload = self._build_payload( image_url=image_url, prompt=prompt, tone=tone, ) - return await self._client._apost(self._path("caption-generator"), data=payload) + data = await self._client._apost(self._path("caption-generator"), data=payload) + return CaptionResponse.model_validate(data) diff --git a/src/late/resources/users.py b/src/late/resources/users.py index 024f656..a3599dc 100644 --- a/src/late/resources/users.py +++ b/src/late/resources/users.py @@ -4,12 +4,12 @@ from __future__ import annotations -from typing import Any +from late.models import UserGetResponse, UsersListResponse from .base import BaseResource -class UsersResource(BaseResource[Any]): +class UsersResource(BaseResource[UsersListResponse]): """ Resource for managing team users. @@ -27,16 +27,17 @@ class UsersResource(BaseResource[Any]): # Sync methods # ------------------------------------------------------------------------- - def list(self) -> dict[str, Any]: + def list(self) -> UsersListResponse: """ List team users. Returns: - Dict with 'users' key containing list of User objects + UsersListResponse with 'users' attribute """ - return self._client._get(self._BASE_PATH) + data = self._client._get(self._BASE_PATH) + return UsersListResponse.model_validate(data) - def get(self, user_id: str) -> dict[str, Any]: + def get(self, user_id: str) -> UserGetResponse: """ Get a user by ID. @@ -44,18 +45,21 @@ def get(self, user_id: str) -> dict[str, Any]: user_id: The user ID Returns: - Dict with 'user' key containing the User object + UserGetResponse with 'user' attribute """ - return self._client._get(self._path(user_id)) + data = self._client._get(self._path(user_id)) + return UserGetResponse.model_validate(data) # ------------------------------------------------------------------------- # Async methods # ------------------------------------------------------------------------- - async def alist(self) -> dict[str, Any]: + async def alist(self) -> UsersListResponse: """List team users asynchronously.""" - return await self._client._aget(self._BASE_PATH) + data = await self._client._aget(self._BASE_PATH) + return UsersListResponse.model_validate(data) - async def aget(self, user_id: str) -> dict[str, Any]: + async def aget(self, user_id: str) -> UserGetResponse: """Get a user by ID asynchronously.""" - return await self._client._aget(self._path(user_id)) + data = await self._client._aget(self._path(user_id)) + return UserGetResponse.model_validate(data) diff --git a/src/late/upload/__init__.py b/src/late/upload/__init__.py new file mode 100644 index 0000000..2003f49 --- /dev/null +++ b/src/late/upload/__init__.py @@ -0,0 +1,74 @@ +""" +Upload module for Late SDK. + +Provides flexible file upload strategies: +- DirectUploader: For small files (< 4MB) via API multipart +- VercelBlobUploader: For large files (up to 5GB) via Vercel Blob SDK +- SmartUploader: Automatic strategy selection based on file size + +Example (simple - small files): + >>> from late import Late + >>> client = Late(api_key="...") + >>> result = client.media.upload("small_image.jpg") + +Example (large files - requires Vercel Blob token): + >>> result = client.media.upload_large( + ... "large_video.mp4", + ... vercel_token="vercel_blob_rw_xxx" + ... ) + +Example (smart uploader with auto-selection): + >>> from late.upload import SmartUploader, UploadFile + >>> uploader = SmartUploader(client, vercel_token="vercel_blob_rw_xxx") + >>> result = uploader.upload(file) # Auto-selects strategy +""" + +from .config import ( + ALLOWED_CONTENT_TYPES, + ALLOWED_DOCUMENT_TYPES, + ALLOWED_IMAGE_TYPES, + ALLOWED_VIDEO_TYPES, + UploadConfig, + UploadEndpoints, + UploadLimits, + get_content_category, + is_content_type_allowed, +) +from .direct import DirectUploader +from .protocols import ( + FileTooLargeError, + UnsupportedContentTypeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) +from .smart import LargeFileError, SmartUploader +from .vercel import VercelBlobUploader + +__all__ = [ + # Main uploaders + "SmartUploader", + "DirectUploader", + "VercelBlobUploader", + # Data types + "UploadFile", + "UploadResult", + "UploadProgress", + # Configuration + "UploadConfig", + "UploadLimits", + "UploadEndpoints", + # Content type helpers + "ALLOWED_CONTENT_TYPES", + "ALLOWED_IMAGE_TYPES", + "ALLOWED_VIDEO_TYPES", + "ALLOWED_DOCUMENT_TYPES", + "is_content_type_allowed", + "get_content_category", + # Exceptions + "UploadError", + "FileTooLargeError", + "LargeFileError", + "UnsupportedContentTypeError", +] diff --git a/src/late/upload/config.py b/src/late/upload/config.py new file mode 100644 index 0000000..bd3aa27 --- /dev/null +++ b/src/late/upload/config.py @@ -0,0 +1,153 @@ +""" +Configuration for upload module. + +Centralized configuration with sensible defaults and easy customization. +""" + +from __future__ import annotations + +from dataclasses import dataclass, field + +from late.enums import MediaType + + +@dataclass(frozen=True) +class UploadLimits: + """ + Size limits for different upload methods. + + All values are in bytes. Use the helper class methods for + convenient size specifications. + + Example: + >>> limits = UploadLimits.default() + >>> custom = UploadLimits(direct_max=UploadLimits.mb(10)) + """ + + direct_max: int = 4 * 1024 * 1024 # 4MB - server body limit + blob_max: int = 5 * 1024 * 1024 * 1024 # 5GB - Vercel Blob limit + multipart_threshold: int = 100 * 1024 * 1024 # 100MB - use chunked above this + chunk_size: int = 5 * 1024 * 1024 # 5MB - chunk size for multipart + + @classmethod + def kb(cls, n: int) -> int: + """Convert kilobytes to bytes.""" + return n * 1024 + + @classmethod + def mb(cls, n: int) -> int: + """Convert megabytes to bytes.""" + return n * 1024 * 1024 + + @classmethod + def gb(cls, n: int) -> int: + """Convert gigabytes to bytes.""" + return n * 1024 * 1024 * 1024 + + @classmethod + def default(cls) -> UploadLimits: + """Create default limits configuration.""" + return cls() + + +@dataclass(frozen=True) +class UploadEndpoints: + """ + API endpoints for upload operations. + + Example: + >>> endpoints = UploadEndpoints.default() + >>> custom = UploadEndpoints(media="/api/v2/media") + """ + + media: str = "/v1/media" + client_upload: str = "/v1/media" # Same endpoint, different flow + + @classmethod + def default(cls) -> UploadEndpoints: + """Create default endpoints configuration.""" + return cls() + + +@dataclass(frozen=True) +class UploadConfig: + """ + Complete upload configuration. + + Combines limits and endpoints into a single configuration object. + Immutable by design to prevent accidental modifications. + + Example: + >>> config = UploadConfig.default() + >>> # Custom configuration + >>> config = UploadConfig( + ... limits=UploadLimits(direct_max=UploadLimits.mb(8)), + ... endpoints=UploadEndpoints(media="/api/v2/media"), + ... ) + """ + + limits: UploadLimits = field(default_factory=UploadLimits.default) + endpoints: UploadEndpoints = field(default_factory=UploadEndpoints.default) + auto_select_strategy: bool = True # Automatically choose uploader based on size + verify_upload: bool = True # Verify upload completed successfully + + @classmethod + def default(cls) -> UploadConfig: + """Create default configuration.""" + return cls() + + +# Supported content types for uploads +ALLOWED_IMAGE_TYPES: frozenset[str] = frozenset( + { + "image/jpeg", + "image/jpg", + "image/png", + "image/webp", + "image/gif", + } +) + +ALLOWED_VIDEO_TYPES: frozenset[str] = frozenset( + { + "video/mp4", + "video/mpeg", + "video/quicktime", + "video/avi", + "video/x-msvideo", + "video/webm", + "video/x-m4v", + } +) + +ALLOWED_DOCUMENT_TYPES: frozenset[str] = frozenset( + { + "application/pdf", + } +) + +ALLOWED_CONTENT_TYPES: frozenset[str] = ( + ALLOWED_IMAGE_TYPES | ALLOWED_VIDEO_TYPES | ALLOWED_DOCUMENT_TYPES +) + + +def is_content_type_allowed(content_type: str) -> bool: + """Check if a content type is allowed for upload.""" + return content_type.lower() in ALLOWED_CONTENT_TYPES + + +def get_content_category(content_type: str) -> MediaType | None: + """ + Get the category of a content type. + + Returns: + MediaType.IMAGE, MediaType.VIDEO, MediaType.DOCUMENT, or None if not allowed + """ + content_type = content_type.lower() + if content_type in ALLOWED_IMAGE_TYPES: + return MediaType.IMAGE + if content_type in ALLOWED_VIDEO_TYPES: + return MediaType.VIDEO + if content_type in ALLOWED_DOCUMENT_TYPES: + return MediaType.DOCUMENT + return None diff --git a/src/late/upload/direct.py b/src/late/upload/direct.py new file mode 100644 index 0000000..9bba7d7 --- /dev/null +++ b/src/late/upload/direct.py @@ -0,0 +1,213 @@ +""" +Direct upload strategy for small files. + +This uploader sends files directly to the API endpoint via multipart/form-data. +Suitable for small files that don't exceed server body size limits (~4MB). +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from .config import UploadConfig +from .protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadResult, +) + +if TYPE_CHECKING: + from late.client.base import BaseClient + + +class DirectUploader: + """ + Direct multipart upload for small files. + + Uploads files directly to the API endpoint using multipart/form-data. + Best for files under ~4MB to avoid 413 (Request Entity Too Large) errors. + + Example: + >>> uploader = DirectUploader(client) + >>> result = uploader.upload(UploadFile( + ... filename="image.jpg", + ... content=image_bytes, + ... mime_type="image/jpeg" + ... )) + >>> print(result.url) + """ + + def __init__( + self, + client: BaseClient, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the direct uploader. + + Args: + client: The HTTP client for making requests + config: Upload configuration (uses defaults if not provided) + """ + self._client = client + self._config = config or UploadConfig.default() + + @property + def max_size(self) -> int: + """Maximum supported file size in bytes.""" + return self._config.limits.direct_max + + @property + def endpoint(self) -> str: + """API endpoint for uploads.""" + return self._config.endpoints.media + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + return size <= self.max_size + + def _read_content(self, file: UploadFile) -> bytes: + """ + Read content bytes from an UploadFile. + + Handles bytes, Path, and file handle content types. + """ + content = file.content + if isinstance(content, bytes): + return content + if isinstance(content, Path): + return content.read_bytes() + # File handle - read and return to start if seekable + data = content.read() + if hasattr(content, "seek"): + content.seek(0) + return data + + def _build_multipart_files( + self, files: list[UploadFile] + ) -> list[tuple[str, tuple[str, bytes, str]]]: + """Build multipart files list for httpx.""" + return [ + ("files", (f.filename, self._read_content(f), f.mime_type)) for f in files + ] + + def _parse_response(self, response: dict[str, Any]) -> list[UploadResult]: + """Parse API response into UploadResult objects.""" + files_data = response.get("files", []) + return [ + UploadResult( + url=f["url"], + pathname=f.get("pathname", ""), + content_type=f.get("contentType", ""), + size=f.get("size", 0), + download_url=f.get("downloadUrl"), + ) + for f in files_data + ] + + def _validate_file(self, file: UploadFile) -> None: + """Validate file before upload.""" + if file.size and file.size > self.max_size: + raise FileTooLargeError(file.size, self.max_size) + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload(self, file: UploadFile) -> UploadResult: + """ + Upload a single file. + + Args: + file: The file to upload + + Returns: + UploadResult with the uploaded file information + + Raises: + FileTooLargeError: If file exceeds max_size + UploadError: On upload failure + """ + self._validate_file(file) + + try: + multipart = self._build_multipart_files([file]) + response = self._client._post(self.endpoint, files=multipart) + results = self._parse_response(response) + + if not results: + raise UploadError("Server returned no files in response") + + return results[0] + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + def upload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """ + Upload multiple files in a single request. + + Args: + files: List of files to upload + + Returns: + List of UploadResults for each file + + Raises: + FileTooLargeError: If any file exceeds max_size + UploadError: On upload failure + """ + for file in files: + self._validate_file(file) + + try: + multipart = self._build_multipart_files(files) + response = self._client._post(self.endpoint, files=multipart) + return self._parse_response(response) + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload(self, file: UploadFile) -> UploadResult: + """Upload a single file asynchronously.""" + self._validate_file(file) + + try: + multipart = self._build_multipart_files([file]) + response = await self._client._apost(self.endpoint, files=multipart) + results = self._parse_response(response) + + if not results: + raise UploadError("Server returned no files in response") + + return results[0] + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + async def aupload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """Upload multiple files asynchronously.""" + for file in files: + self._validate_file(file) + + try: + multipart = self._build_multipart_files(files) + response = await self._client._apost(self.endpoint, files=multipart) + return self._parse_response(response) + + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e diff --git a/src/late/upload/protocols.py b/src/late/upload/protocols.py new file mode 100644 index 0000000..901a371 --- /dev/null +++ b/src/late/upload/protocols.py @@ -0,0 +1,273 @@ +""" +Upload protocols and interfaces for extensible file upload strategies. + +This module defines the contracts that all uploaders must follow, +enabling easy extension with new upload strategies (S3, GCS, etc.) +""" + +from __future__ import annotations + +from dataclasses import dataclass, field +from pathlib import Path +from typing import TYPE_CHECKING, BinaryIO, Protocol, runtime_checkable + +if TYPE_CHECKING: + from collections.abc import Callable + + +@dataclass +class UploadFile: + """ + Represents a file to be uploaded. + + Attributes: + filename: Name of the file (used in Content-Disposition) + content: File content as bytes, file handle, or path + mime_type: MIME type of the file + size: Size in bytes (optional, calculated if not provided) + """ + + filename: str + content: bytes | BinaryIO | Path + mime_type: str + size: int | None = None + + def __post_init__(self) -> None: + """Calculate size if not provided.""" + if self.size is None: + if isinstance(self.content, bytes): + self.size = len(self.content) + elif isinstance(self.content, Path): + self.size = self.content.stat().st_size + + +@dataclass +class UploadResult: + """ + Result of a successful file upload. + + Attributes: + url: Public URL of the uploaded file + pathname: Path/key in the storage system + content_type: MIME type of the uploaded file + size: Size in bytes + download_url: Direct download URL (may differ from url) + metadata: Additional metadata from the storage provider + """ + + url: str + pathname: str + content_type: str + size: int + download_url: str | None = None + metadata: dict[str, str] = field(default_factory=dict) + + +@dataclass +class UploadProgress: + """ + Progress information for chunked/multipart uploads. + + Attributes: + uploaded_bytes: Bytes uploaded so far + total_bytes: Total bytes to upload + part_number: Current part number (for multipart) + total_parts: Total number of parts + """ + + uploaded_bytes: int + total_bytes: int + part_number: int | None = None + total_parts: int | None = None + + @property + def percentage(self) -> float: + """Calculate upload percentage.""" + if self.total_bytes == 0: + return 100.0 + return (self.uploaded_bytes / self.total_bytes) * 100 + + +@runtime_checkable +class Uploader(Protocol): + """ + Protocol for synchronous file uploaders. + + Implement this protocol to create custom upload strategies. + """ + + def upload(self, file: UploadFile) -> UploadResult: + """ + Upload a single file. + + Args: + file: The file to upload + + Returns: + UploadResult with the uploaded file information + """ + ... + + def upload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """ + Upload multiple files. + + Args: + files: List of files to upload + + Returns: + List of UploadResults for each file + """ + ... + + def supports_size(self, size: int) -> bool: + """ + Check if this uploader supports files of the given size. + + Args: + size: File size in bytes + + Returns: + True if this uploader can handle files of this size + """ + ... + + +@runtime_checkable +class AsyncUploader(Protocol): + """ + Protocol for asynchronous file uploaders. + + Implement this protocol for async upload strategies. + """ + + async def aupload(self, file: UploadFile) -> UploadResult: + """Upload a single file asynchronously.""" + ... + + async def aupload_multiple(self, files: list[UploadFile]) -> list[UploadResult]: + """Upload multiple files asynchronously.""" + ... + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + ... + + +@runtime_checkable +class ProgressUploader(Protocol): + """ + Protocol for uploaders that support progress tracking. + + Extend Uploader with progress callback support. + """ + + def upload_with_progress( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file with progress tracking. + + Args: + file: The file to upload + on_progress: Callback called with progress updates + + Returns: + UploadResult with the uploaded file information + """ + ... + + +@runtime_checkable +class ChunkedUploader(Protocol): + """ + Protocol for uploaders that support chunked/resumable uploads. + + Useful for very large files where upload might be interrupted. + """ + + def start_chunked_upload(self, file: UploadFile) -> str: + """ + Initialize a chunked upload session. + + Args: + file: The file to upload (metadata only at this stage) + + Returns: + Upload session ID for continuing the upload + """ + ... + + def upload_chunk( + self, + session_id: str, + chunk: bytes, + part_number: int, + ) -> bool: + """ + Upload a single chunk. + + Args: + session_id: Upload session ID from start_chunked_upload + chunk: Chunk data + part_number: Part number (1-indexed) + + Returns: + True if chunk was uploaded successfully + """ + ... + + def complete_chunked_upload(self, session_id: str) -> UploadResult: + """ + Complete a chunked upload. + + Args: + session_id: Upload session ID + + Returns: + UploadResult with the final uploaded file information + """ + ... + + def abort_chunked_upload(self, session_id: str) -> bool: + """ + Abort a chunked upload and clean up. + + Args: + session_id: Upload session ID + + Returns: + True if abort was successful + """ + ... + + +class UploadError(Exception): + """Base exception for upload errors.""" + + def __init__(self, message: str, cause: Exception | None = None) -> None: + super().__init__(message) + self.cause = cause + + +class FileTooLargeError(UploadError): + """Raised when file exceeds maximum allowed size.""" + + def __init__(self, size: int, max_size: int) -> None: + super().__init__(f"File size {size:,} bytes exceeds maximum {max_size:,} bytes") + self.size = size + self.max_size = max_size + + +class UnsupportedContentTypeError(UploadError): + """Raised when file type is not supported.""" + + def __init__(self, content_type: str, supported: list[str]) -> None: + super().__init__( + f"Content type '{content_type}' not supported. " + f"Supported types: {', '.join(supported)}" + ) + self.content_type = content_type + self.supported = supported diff --git a/src/late/upload/smart.py b/src/late/upload/smart.py new file mode 100644 index 0000000..0e1cfc7 --- /dev/null +++ b/src/late/upload/smart.py @@ -0,0 +1,204 @@ +""" +Smart uploader with automatic strategy selection. + +Chooses the best upload method based on file size. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from .config import UploadConfig +from .direct import DirectUploader +from .protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) + +if TYPE_CHECKING: + from collections.abc import Callable + + from late.client.base import BaseClient + + +class LargeFileError(UploadError): + """ + Raised when trying to upload a large file without Vercel Blob token. + + Provides clear guidance on how to upload large files. + """ + + def __init__(self, file_size: int, max_direct_size: int) -> None: + message = ( + f"File size ({file_size:,} bytes) exceeds direct upload limit " + f"({max_direct_size:,} bytes / {max_direct_size // (1024 * 1024)}MB).\n\n" + "For files larger than 4MB, provide a Vercel Blob token:\n\n" + " from late.upload import SmartUploader\n\n" + " uploader = SmartUploader(client, vercel_token='vercel_blob_rw_xxx')\n" + " result = uploader.upload(file)\n\n" + "Get your token at: https://vercel.com/docs/storage/vercel-blob" + ) + super().__init__(message) + self.file_size = file_size + self.max_direct_size = max_direct_size + + +class SmartUploader: + """ + Intelligent uploader that selects the best strategy automatically. + + - Files < 4MB: Uses direct multipart upload to API + - Files >= 4MB: Uses Vercel Blob SDK (requires token) + + Example (small files only): + >>> uploader = SmartUploader(client) + >>> result = uploader.upload(small_file) + + Example (with Vercel token for large files): + >>> uploader = SmartUploader(client, vercel_token="vercel_blob_rw_xxx") + >>> result = uploader.upload(large_file) # Auto-selects strategy + """ + + def __init__( + self, + client: BaseClient, + *, + vercel_token: str | None = None, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the smart uploader. + + Args: + client: The Late API client + vercel_token: Optional Vercel Blob token for large files + config: Upload configuration + """ + self._client = client + self._config = config or UploadConfig.default() + self._direct = DirectUploader(client, self._config) + self._vercel_token = vercel_token + self._vercel_uploader = None + + # Initialize Vercel uploader if token provided + if vercel_token: + from .vercel import VercelBlobUploader + + self._vercel_uploader = VercelBlobUploader(vercel_token, self._config) + + @property + def direct_max_size(self) -> int: + """Maximum size for direct upload (4MB).""" + return self._config.limits.direct_max + + @property + def blob_max_size(self) -> int: + """Maximum size for Vercel Blob upload (5GB).""" + return self._config.limits.blob_max + + @property + def has_vercel_token(self) -> bool: + """Check if Vercel Blob token is configured.""" + return self._vercel_uploader is not None + + def _select_strategy(self, file: UploadFile) -> str: + """ + Select the appropriate upload strategy. + + Returns: + "direct" or "vercel" + + Raises: + LargeFileError: If file > 4MB and no Vercel token + FileTooLargeError: If file exceeds all limits + """ + size = file.size or 0 + + # Check absolute maximum + if size > self.blob_max_size: + raise FileTooLargeError(size, self.blob_max_size) + + # Small file - use direct upload + if size <= self.direct_max_size: + return "direct" + + # Large file - need Vercel token + if not self.has_vercel_token: + raise LargeFileError(size, self.direct_max_size) + + return "vercel" + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file using the best available strategy. + + Args: + file: File to upload + on_progress: Progress callback (Vercel only) + + Returns: + UploadResult with file URL + + Raises: + LargeFileError: If file > 4MB and no Vercel token + FileTooLargeError: If file > 5GB + UploadError: On upload failure + """ + strategy = self._select_strategy(file) + + if strategy == "direct": + return self._direct.upload(file) + else: + return self._vercel_uploader.upload(file, on_progress) # type: ignore + + def upload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files.""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(self.upload(file, cb)) + return results + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """Upload a file (async).""" + strategy = self._select_strategy(file) + + if strategy == "direct": + return await self._direct.aupload(file) + else: + return await self._vercel_uploader.aupload(file, on_progress) # type: ignore + + async def aupload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files (async).""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(await self.aupload(file, cb)) + return results diff --git a/src/late/upload/utils.py b/src/late/upload/utils.py new file mode 100644 index 0000000..82dbc7c --- /dev/null +++ b/src/late/upload/utils.py @@ -0,0 +1,70 @@ +""" +Utility functions for upload module. +""" + +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Generator + + from .protocols import UploadFile + + +def read_file_content(file: UploadFile) -> bytes: + """ + Read content bytes from an UploadFile. + + Handles bytes, Path, and file handle content types. + """ + content = file.content + + if isinstance(content, bytes): + return content + + if isinstance(content, Path): + return content.read_bytes() + + # File handle - read and optionally reset position + data = content.read() + if hasattr(content, "seek"): + content.seek(0) + return data + + +def iter_file_chunks( + file: UploadFile, chunk_size: int +) -> Generator[tuple[int, bytes], None, None]: + """ + Iterate over file content in chunks. + + Yields: + Tuple of (part_number, chunk_bytes) starting from 1 + """ + content = file.content + part_number = 1 + + if isinstance(content, bytes): + for i in range(0, len(content), chunk_size): + yield part_number, content[i : i + chunk_size] + part_number += 1 + + elif isinstance(content, Path): + with content.open("rb") as f: + while True: + chunk = f.read(chunk_size) + if not chunk: + break + yield part_number, chunk + part_number += 1 + + else: + # File handle + while True: + chunk = content.read(chunk_size) + if not chunk: + break + yield part_number, chunk + part_number += 1 diff --git a/src/late/upload/vercel/__init__.py b/src/late/upload/vercel/__init__.py new file mode 100644 index 0000000..49e1afa --- /dev/null +++ b/src/late/upload/vercel/__init__.py @@ -0,0 +1,14 @@ +""" +Vercel Blob upload module. + +Uses the official Vercel SDK for uploading large files (up to 5GB). +Requires a Vercel Blob read-write token. +""" + +from .client import VercelBlobClient +from .uploader import VercelBlobUploader + +__all__ = [ + "VercelBlobClient", + "VercelBlobUploader", +] diff --git a/src/late/upload/vercel/client.py b/src/late/upload/vercel/client.py new file mode 100644 index 0000000..420d757 --- /dev/null +++ b/src/late/upload/vercel/client.py @@ -0,0 +1,158 @@ +""" +Vercel Blob client using official Vercel SDK. + +Wraps the official `vercel.blob` SDK for uploading large files. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from late.upload.protocols import UploadError, UploadFile, UploadProgress, UploadResult +from late.upload.utils import read_file_content + +if TYPE_CHECKING: + from collections.abc import Callable + + +class VercelBlobClient: + """ + Client for Vercel Blob using the official SDK. + + Requires a Vercel Blob read-write token (BLOB_READ_WRITE_TOKEN). + Get one from: https://vercel.com/docs/storage/vercel-blob + + Example: + >>> client = VercelBlobClient(token="vercel_blob_rw_xxx") + >>> result = client.upload(file) + """ + + def __init__(self, token: str) -> None: + """ + Initialize the Vercel Blob client. + + Args: + token: Vercel Blob read-write token + """ + if not token: + raise ValueError( + "Vercel Blob token required. " + "Get one at: https://vercel.com/docs/storage/vercel-blob" + ) + self._token = token + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL + """ + from vercel.blob import BlobClient, UploadProgressEvent + + content = read_file_content(file) + total_size = len(content) + + # Create progress wrapper + progress_cb = None + if on_progress: + + def progress_cb(event: UploadProgressEvent) -> None: + on_progress( + UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + ) + ) + + try: + client = BlobClient(token=self._token) + result = client.put( + file.filename, + content, + access="public", + content_type=file.mime_type, + add_random_suffix=True, + multipart=total_size > 100 * 1024 * 1024, # Use multipart for > 100MB + on_upload_progress=progress_cb, + ) + + return UploadResult( + url=result.url, + pathname=result.pathname, + content_type=result.content_type, + size=total_size, + download_url=result.download_url, + metadata={"provider": "vercel-blob"}, + ) + + except Exception as e: + raise UploadError(f"Vercel Blob upload failed: {e}") from e + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob asynchronously. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL + """ + from vercel.blob import AsyncBlobClient, UploadProgressEvent + + content = read_file_content(file) + total_size = len(content) + + # Create progress wrapper + progress_cb = None + if on_progress: + + def progress_cb(event: UploadProgressEvent) -> None: + on_progress( + UploadProgress( + uploaded_bytes=event.loaded, + total_bytes=event.total, + part_number=None, + total_parts=None, + ) + ) + + try: + client = AsyncBlobClient(token=self._token) + result = await client.put( + file.filename, + content, + access="public", + content_type=file.mime_type, + add_random_suffix=True, + multipart=total_size > 100 * 1024 * 1024, + on_upload_progress=progress_cb, + ) + + return UploadResult( + url=result.url, + pathname=result.pathname, + content_type=result.content_type, + size=total_size, + download_url=result.download_url, + metadata={"provider": "vercel-blob"}, + ) + + except Exception as e: + raise UploadError(f"Vercel Blob upload failed: {e}") from e diff --git a/src/late/upload/vercel/uploader.py b/src/late/upload/vercel/uploader.py new file mode 100644 index 0000000..6ef52e8 --- /dev/null +++ b/src/late/upload/vercel/uploader.py @@ -0,0 +1,157 @@ +""" +Vercel Blob uploader - main entry point. + +Uses the official Vercel SDK to upload files to Vercel Blob storage. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from late.upload.config import UploadConfig +from late.upload.protocols import ( + FileTooLargeError, + UploadError, + UploadFile, + UploadProgress, + UploadResult, +) + +from .client import VercelBlobClient + +if TYPE_CHECKING: + from collections.abc import Callable + + +class VercelBlobUploader: + """ + Upload files to Vercel Blob storage. + + Uses the official Vercel SDK. Supports files up to 5GB. + + Requires a Vercel Blob read-write token (BLOB_READ_WRITE_TOKEN). + Get one from: https://vercel.com/docs/storage/vercel-blob + + Example: + >>> uploader = VercelBlobUploader(token="vercel_blob_rw_xxx") + >>> result = uploader.upload(UploadFile( + ... filename="video.mp4", + ... content=Path("large_video.mp4"), + ... mime_type="video/mp4", + ... size=500_000_000 + ... )) + >>> print(result.url) + """ + + def __init__( + self, + token: str, + config: UploadConfig | None = None, + ) -> None: + """ + Initialize the Vercel Blob uploader. + + Args: + token: Vercel Blob read-write token + config: General upload configuration + """ + self._config = config or UploadConfig.default() + self._client = VercelBlobClient(token) + + @property + def max_size(self) -> int: + """Maximum supported file size (5GB).""" + return self._config.limits.blob_max + + def supports_size(self, size: int) -> bool: + """Check if this uploader supports files of the given size.""" + return size <= self.max_size + + def _validate(self, file: UploadFile) -> None: + """Validate file before upload.""" + if file.size and file.size > self.max_size: + raise FileTooLargeError(file.size, self.max_size) + + # ------------------------------------------------------------------------- + # Sync API + # ------------------------------------------------------------------------- + + def upload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """ + Upload a file to Vercel Blob. + + Args: + file: File to upload + on_progress: Optional progress callback + + Returns: + UploadResult with blob URL and metadata + + Raises: + FileTooLargeError: If file exceeds 5GB + UploadError: On upload failure + """ + self._validate(file) + + try: + return self._client.upload(file, on_progress) + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + def upload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """ + Upload multiple files. + + Args: + files: List of files to upload + on_progress: Callback with (file_index, progress) + + Returns: + List of UploadResults + """ + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(self.upload(file, cb)) + return results + + # ------------------------------------------------------------------------- + # Async API + # ------------------------------------------------------------------------- + + async def aupload( + self, + file: UploadFile, + on_progress: Callable[[UploadProgress], None] | None = None, + ) -> UploadResult: + """Upload a file to Vercel Blob (async).""" + self._validate(file) + + try: + return await self._client.aupload(file, on_progress) + except (FileTooLargeError, UploadError): + raise + except Exception as e: + raise UploadError(str(e), cause=e) from e + + async def aupload_multiple( + self, + files: list[UploadFile], + on_progress: Callable[[int, UploadProgress], None] | None = None, + ) -> list[UploadResult]: + """Upload multiple files (async).""" + results = [] + for idx, file in enumerate(files): + cb = (lambda p, i=idx: on_progress(i, p)) if on_progress else None + results.append(await self.aupload(file, cb)) + return results diff --git a/test_upload.py b/test_upload.py new file mode 100644 index 0000000..3e718fa --- /dev/null +++ b/test_upload.py @@ -0,0 +1,82 @@ +""" +Test script for upload module. +""" + +import sys +from pathlib import Path + +# Add src to path +sys.path.insert(0, str(Path(__file__).parent / "src")) + +# Test imports +print("Testing imports...") +from late import Late +from late.upload import ( + SmartUploader, + DirectUploader, + VercelBlobUploader, + UploadFile, + UploadResult, + UploadProgress, + LargeFileError, +) +print("βœ“ All imports successful") + +# Test files +SMALL_IMAGE = "/Users/carlos/Documents/WebDev/Freelance/miquel-palet/Schedule-Posts-API/app/apple-icon.png" +LARGE_VIDEO = "/Users/carlos/Documents/Video recordings/screen-studio/Built-in Retina Display.mp4" + +# Credentials +LATE_API_KEY = "sk_fb144cafa04c50eecb9102bb240657d4871f6fc5fd43eb9c22e4b869ff030c7e" +LATE_BASE_URL = "https://getlate.dev/api" +VERCEL_BLOB_TOKEN = "vercel_blob_rw_qf6opyLdArRJW0lJ_GBJHZ2I9KR0O1zo8iq31z96CrCAnUR" + +# Verify files exist +for path, name in [(SMALL_IMAGE, "Small image"), (LARGE_VIDEO, "Large video")]: + if Path(path).exists(): + size = Path(path).stat().st_size + print(f"βœ“ {name}: {size:,} bytes ({size / (1024*1024):.1f} MB)") + else: + print(f"βœ— {name} not found: {path}") + +# Create client +client = Late(api_key=LATE_API_KEY, base_url=LATE_BASE_URL) +print(f"\nβœ“ Late client created (base_url: {LATE_BASE_URL})") + +# Test 1: Direct upload (small file) +print("\n" + "="*60) +print("TEST 1: Direct upload (small file < 4MB)") +print("="*60) +try: + result = client.media.upload(SMALL_IMAGE) + print(f"βœ“ Upload successful!") + print(f" URL: {result['files'][0]['url']}") +except Exception as e: + print(f"βœ— Upload failed: {e}") + +# Test 2: Vercel Blob upload (large file) +print("\n" + "="*60) +print("TEST 2: Vercel Blob upload (large file ~278MB)") +print("="*60) + +def progress_callback(p: UploadProgress): + pct = p.percentage + bar = "β–ˆ" * int(pct / 5) + "β–‘" * (20 - int(pct / 5)) + print(f" [{bar}] {pct:.1f}%", end="\r") + +try: + result = client.media.upload_large( + LARGE_VIDEO, + vercel_token=VERCEL_BLOB_TOKEN, + on_progress=progress_callback + ) + print(f"\nβœ“ Vercel Blob upload successful!") + print(f" URL: {result['url']}") +except Exception as e: + print(f"\nβœ— Vercel Blob upload failed: {e}") + import traceback + traceback.print_exc() + +print("\n" + "="*60) +print("All tests completed!") +print("="*60) diff --git a/tests/test_exhaustive.py b/tests/test_exhaustive.py index 9211942..9f6a243 100644 --- a/tests/test_exhaustive.py +++ b/tests/test_exhaustive.py @@ -868,3 +868,491 @@ async def test_async_generate_content(self): assert response.text is not None assert len(response.text) > 0 + + +# ============================================================================ +# MEDIA RESOURCE TESTS +# ============================================================================ + + +class TestMediaResourceMethods: + """Test MediaResource has all expected methods.""" + + def test_media_has_upload_method(self, api_key: str): + """Test media resource has upload method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload") + assert callable(client.media.upload) + + def test_media_has_upload_multiple_method(self, api_key: str): + """Test media resource has upload_multiple method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_multiple") + assert callable(client.media.upload_multiple) + + def test_media_has_upload_bytes_method(self, api_key: str): + """Test media resource has upload_bytes method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_bytes") + assert callable(client.media.upload_bytes) + + def test_media_has_upload_large_method(self, api_key: str): + """Test media resource has upload_large method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_large") + assert callable(client.media.upload_large) + + def test_media_has_upload_large_bytes_method(self, api_key: str): + """Test media resource has upload_large_bytes method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "upload_large_bytes") + assert callable(client.media.upload_large_bytes) + + def test_media_has_generate_upload_token_method(self, api_key: str): + """Test media resource has generate_upload_token method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "generate_upload_token") + assert callable(client.media.generate_upload_token) + + def test_media_has_check_upload_token_method(self, api_key: str): + """Test media resource has check_upload_token method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "check_upload_token") + assert callable(client.media.check_upload_token) + + def test_media_has_async_methods(self, api_key: str): + """Test media resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.media, "aupload") + assert hasattr(client.media, "aupload_multiple") + assert hasattr(client.media, "aupload_bytes") + assert hasattr(client.media, "aupload_large") + assert hasattr(client.media, "aupload_large_bytes") + assert hasattr(client.media, "agenerate_upload_token") + assert hasattr(client.media, "acheck_upload_token") + + +class TestUploadModule: + """Test upload module classes.""" + + def test_import_smart_uploader(self): + """Test SmartUploader import.""" + from late.upload import SmartUploader + + assert SmartUploader is not None + + def test_import_direct_uploader(self): + """Test DirectUploader import.""" + from late.upload import DirectUploader + + assert DirectUploader is not None + + def test_import_vercel_blob_uploader(self): + """Test VercelBlobUploader import.""" + from late.upload import VercelBlobUploader + + assert VercelBlobUploader is not None + + def test_import_upload_file(self): + """Test UploadFile import.""" + from late.upload import UploadFile + + assert UploadFile is not None + + def test_import_upload_result(self): + """Test UploadResult import.""" + from late.upload import UploadResult + + assert UploadResult is not None + + def test_import_upload_progress(self): + """Test UploadProgress import.""" + from late.upload import UploadProgress + + assert UploadProgress is not None + + def test_import_large_file_error(self): + """Test LargeFileError import.""" + from late.upload import LargeFileError + + assert LargeFileError is not None + + def test_upload_progress_creation(self): + """Test UploadProgress dataclass.""" + from late.upload import UploadProgress + + progress = UploadProgress( + uploaded_bytes=500, + total_bytes=1000, + ) + assert progress.uploaded_bytes == 500 + assert progress.total_bytes == 1000 + assert progress.percentage == 50.0 + + def test_large_file_error_creation(self): + """Test LargeFileError creation.""" + from late.upload import LargeFileError + + error = LargeFileError(file_size=5_000_000, max_direct_size=4_000_000) + assert "5,000,000" in str(error) + assert "4,000,000" in str(error) or "4MB" in str(error) + + +# ============================================================================ +# QUEUE RESOURCE TESTS +# ============================================================================ + + +class TestQueueResourceMethods: + """Test QueueResource has all expected methods.""" + + def test_queue_has_get_slots_method(self, api_key: str): + """Test queue resource has get_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "get_slots") + assert callable(client.queue.get_slots) + + def test_queue_has_update_slots_method(self, api_key: str): + """Test queue resource has update_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "update_slots") + assert callable(client.queue.update_slots) + + def test_queue_has_delete_slots_method(self, api_key: str): + """Test queue resource has delete_slots method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "delete_slots") + assert callable(client.queue.delete_slots) + + def test_queue_has_preview_method(self, api_key: str): + """Test queue resource has preview method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "preview") + assert callable(client.queue.preview) + + def test_queue_has_next_slot_method(self, api_key: str): + """Test queue resource has next_slot method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "next_slot") + assert callable(client.queue.next_slot) + + def test_queue_has_async_methods(self, api_key: str): + """Test queue resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.queue, "aget_slots") + assert hasattr(client.queue, "aupdate_slots") + assert hasattr(client.queue, "adelete_slots") + assert hasattr(client.queue, "apreview") + assert hasattr(client.queue, "anext_slot") + + +# ============================================================================ +# TOOLS RESOURCE TESTS +# ============================================================================ + + +class TestToolsResourceMethods: + """Test ToolsResource has all expected methods.""" + + def test_tools_has_youtube_download_method(self, api_key: str): + """Test tools resource has youtube_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "youtube_download") + assert callable(client.tools.youtube_download) + + def test_tools_has_youtube_transcript_method(self, api_key: str): + """Test tools resource has youtube_transcript method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "youtube_transcript") + assert callable(client.tools.youtube_transcript) + + def test_tools_has_instagram_download_method(self, api_key: str): + """Test tools resource has instagram_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "instagram_download") + assert callable(client.tools.instagram_download) + + def test_tools_has_instagram_hashtag_check_method(self, api_key: str): + """Test tools resource has instagram_hashtag_check method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "instagram_hashtag_check") + assert callable(client.tools.instagram_hashtag_check) + + def test_tools_has_tiktok_download_method(self, api_key: str): + """Test tools resource has tiktok_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "tiktok_download") + assert callable(client.tools.tiktok_download) + + def test_tools_has_twitter_download_method(self, api_key: str): + """Test tools resource has twitter_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "twitter_download") + assert callable(client.tools.twitter_download) + + def test_tools_has_facebook_download_method(self, api_key: str): + """Test tools resource has facebook_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "facebook_download") + assert callable(client.tools.facebook_download) + + def test_tools_has_linkedin_download_method(self, api_key: str): + """Test tools resource has linkedin_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "linkedin_download") + assert callable(client.tools.linkedin_download) + + def test_tools_has_bluesky_download_method(self, api_key: str): + """Test tools resource has bluesky_download method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "bluesky_download") + assert callable(client.tools.bluesky_download) + + def test_tools_has_generate_caption_method(self, api_key: str): + """Test tools resource has generate_caption method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "generate_caption") + assert callable(client.tools.generate_caption) + + def test_tools_has_async_methods(self, api_key: str): + """Test tools resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.tools, "ayoutube_download") + assert hasattr(client.tools, "ayoutube_transcript") + assert hasattr(client.tools, "ainstagram_download") + assert hasattr(client.tools, "ainstagram_hashtag_check") + assert hasattr(client.tools, "atiktok_download") + assert hasattr(client.tools, "atwitter_download") + assert hasattr(client.tools, "afacebook_download") + assert hasattr(client.tools, "alinkedin_download") + assert hasattr(client.tools, "abluesky_download") + assert hasattr(client.tools, "agenerate_caption") + + +# ============================================================================ +# PROFILES RESOURCE METHODS TESTS +# ============================================================================ + + +class TestProfilesResourceMethods: + """Test ProfilesResource has all expected methods.""" + + def test_profiles_has_list_method(self, api_key: str): + """Test profiles resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "list") + assert callable(client.profiles.list) + + def test_profiles_has_get_method(self, api_key: str): + """Test profiles resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "get") + assert callable(client.profiles.get) + + def test_profiles_has_create_method(self, api_key: str): + """Test profiles resource has create method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "create") + assert callable(client.profiles.create) + + def test_profiles_has_update_method(self, api_key: str): + """Test profiles resource has update method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "update") + assert callable(client.profiles.update) + + def test_profiles_has_delete_method(self, api_key: str): + """Test profiles resource has delete method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "delete") + assert callable(client.profiles.delete) + + def test_profiles_has_async_methods(self, api_key: str): + """Test profiles resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.profiles, "alist") + assert hasattr(client.profiles, "aget") + assert hasattr(client.profiles, "acreate") + assert hasattr(client.profiles, "aupdate") + assert hasattr(client.profiles, "adelete") + + +# ============================================================================ +# ACCOUNTS RESOURCE METHODS TESTS +# ============================================================================ + + +class TestAccountsResourceMethods: + """Test AccountsResource has all expected methods.""" + + def test_accounts_has_list_method(self, api_key: str): + """Test accounts resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "list") + assert callable(client.accounts.list) + + def test_accounts_has_get_method(self, api_key: str): + """Test accounts resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "get") + assert callable(client.accounts.get) + + def test_accounts_has_get_follower_stats_method(self, api_key: str): + """Test accounts resource has get_follower_stats method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "get_follower_stats") + assert callable(client.accounts.get_follower_stats) + + def test_accounts_has_async_methods(self, api_key: str): + """Test accounts resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.accounts, "alist") + assert hasattr(client.accounts, "aget") + assert hasattr(client.accounts, "aget_follower_stats") + + +# ============================================================================ +# POSTS RESOURCE METHODS TESTS +# ============================================================================ + + +class TestPostsResourceMethods: + """Test PostsResource has all expected methods.""" + + def test_posts_has_list_method(self, api_key: str): + """Test posts resource has list method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "list") + assert callable(client.posts.list) + + def test_posts_has_get_method(self, api_key: str): + """Test posts resource has get method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "get") + assert callable(client.posts.get) + + def test_posts_has_create_method(self, api_key: str): + """Test posts resource has create method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "create") + assert callable(client.posts.create) + + def test_posts_has_update_method(self, api_key: str): + """Test posts resource has update method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "update") + assert callable(client.posts.update) + + def test_posts_has_delete_method(self, api_key: str): + """Test posts resource has delete method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "delete") + assert callable(client.posts.delete) + + def test_posts_has_retry_method(self, api_key: str): + """Test posts resource has retry method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "retry") + assert callable(client.posts.retry) + + def test_posts_has_bulk_upload_method(self, api_key: str): + """Test posts resource has bulk_upload method.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "bulk_upload") + assert callable(client.posts.bulk_upload) + + def test_posts_has_async_methods(self, api_key: str): + """Test posts resource has async methods.""" + from late import Late + + client = Late(api_key=api_key) + assert hasattr(client.posts, "alist") + assert hasattr(client.posts, "aget") + assert hasattr(client.posts, "acreate") + assert hasattr(client.posts, "aupdate") + assert hasattr(client.posts, "adelete") + assert hasattr(client.posts, "aretry") diff --git a/tests/test_integration.py b/tests/test_integration.py index 9176980..2e84a16 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -105,9 +105,9 @@ def test_list_posts(self, client: Late, mock_post: dict) -> None: result = client.posts.list() assert route.called - assert len(result["posts"]) == 1 - assert result["posts"][0]["_id"] == "post_123" - assert result["pagination"]["total"] == 1 + assert len(result.posts) == 1 + assert result.posts[0].field_id == "post_123" + assert result.pagination.total == 1 @respx.mock def test_list_posts_with_filters(self, client: Late, mock_post: dict) -> None: @@ -152,7 +152,7 @@ def test_get_post(self, client: Late, mock_post: dict) -> None: result = client.posts.get("post_123") assert route.called - assert result["post"]["_id"] == "post_123" + assert result.post.field_id == "post_123" @respx.mock def test_create_post_scheduled(self, client: Late, mock_post: dict) -> None: @@ -172,7 +172,7 @@ def test_create_post_scheduled(self, client: Late, mock_post: dict) -> None: ) assert route.called - assert result["message"] == "Post scheduled successfully" + assert result.message == "Post scheduled successfully" # Verify request payload request = route.calls[0].request @@ -296,7 +296,7 @@ def test_delete_post(self, client: Late) -> None: result = client.posts.delete("post_123") assert route.called - assert result["message"] == "Post deleted successfully" + assert result.message == "Post deleted successfully" @respx.mock def test_retry_post(self, client: Late, mock_post: dict) -> None: @@ -308,7 +308,7 @@ def test_retry_post(self, client: Late, mock_post: dict) -> None: result = client.posts.retry("post_123") assert route.called - assert result["message"] == "Retrying" + assert result.message == "Retrying" # ============================================================================= @@ -329,8 +329,8 @@ def test_list_profiles(self, client: Late, mock_profile: dict) -> None: result = client.profiles.list() assert route.called - assert len(result["profiles"]) == 1 - assert result["profiles"][0]["name"] == "Test Profile" + assert len(result.profiles) == 1 + assert result.profiles[0].name == "Test Profile" @respx.mock def test_get_profile(self, client: Late, mock_profile: dict) -> None: @@ -342,7 +342,7 @@ def test_get_profile(self, client: Late, mock_profile: dict) -> None: result = client.profiles.get("profile_123") assert route.called - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" @respx.mock def test_create_profile(self, client: Late, mock_profile: dict) -> None: @@ -398,7 +398,7 @@ def test_delete_profile(self, client: Late) -> None: result = client.profiles.delete("profile_123") assert route.called - assert result["message"] == "Profile deleted" + assert result.message == "Profile deleted" # ============================================================================= @@ -421,8 +421,8 @@ def test_list_accounts(self, client: Late, mock_account: dict) -> None: result = client.accounts.list() assert route.called - assert len(result["accounts"]) == 1 - assert result["accounts"][0]["platform"] == "twitter" + assert len(result.accounts) == 1 + assert result.accounts[0].platform == "twitter" @respx.mock def test_list_accounts_by_profile(self, client: Late, mock_account: dict) -> None: @@ -447,7 +447,7 @@ def test_get_account(self, client: Late, mock_account: dict) -> None: result = client.accounts.get("acc_123") assert route.called - assert result["account"]["_id"] == "acc_123" + assert result.account.field_id == "acc_123" @respx.mock def test_get_follower_stats(self, client: Late) -> None: @@ -500,8 +500,8 @@ def test_generate_upload_token(self, client: Late) -> None: result = client.media.generate_upload_token() assert route.called - assert result["token"] == "tok_123" - assert "uploadUrl" in result + assert result.token == "tok_123" + assert result.uploadUrl is not None @respx.mock def test_check_upload_token(self, client: Late) -> None: @@ -522,8 +522,8 @@ def test_check_upload_token(self, client: Late) -> None: assert route.called request = route.calls[0].request assert "token=tok_123" in str(request.url) - assert result["status"] == "completed" - assert len(result["files"]) == 1 + assert result.status.value == "completed" + assert len(result.files) == 1 # ============================================================================= @@ -592,7 +592,7 @@ def test_next_slot(self, client: Late) -> None: result = client.queue.next_slot() assert route.called - assert "nextSlot" in result + assert result.nextSlot is not None # ============================================================================= @@ -756,7 +756,7 @@ def test_list_users(self, client: Late) -> None: result = client.users.list() assert route.called - assert len(result["users"]) == 1 + assert len(result.users) == 1 @respx.mock def test_get_user(self, client: Late) -> None: @@ -770,7 +770,7 @@ def test_get_user(self, client: Late) -> None: result = client.users.get("user_123") assert route.called - assert result["user"]["_id"] == "user_123" + assert result.user.field_id == "user_123" # ============================================================================= @@ -865,7 +865,7 @@ async def test_async_list_posts(self, async_client: Late, mock_post: dict) -> No result = await async_client.posts.alist() assert route.called - assert len(result["posts"]) == 1 + assert len(result.posts) == 1 @respx.mock @pytest.mark.asyncio @@ -883,7 +883,7 @@ async def test_async_create_post(self, async_client: Late, mock_post: dict) -> N ) assert route.called - assert result["message"] == "Created" + assert result.message == "Created" @respx.mock @pytest.mark.asyncio @@ -905,15 +905,15 @@ async def test_async_profile_crud(self, async_client: Late, mock_profile: dict) async with async_client: # Create result = await async_client.profiles.acreate(name="Test") - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" # Update result = await async_client.profiles.aupdate("profile_123", name="Updated") - assert result["profile"]["_id"] == "profile_123" + assert result.profile.field_id == "profile_123" # Delete result = await async_client.profiles.adelete("profile_123") - assert result["message"] == "Deleted" + assert result.message == "Deleted" # ============================================================================= diff --git a/uv.lock b/uv.lock index c023bc1..ef65568 100644 --- a/uv.lock +++ b/uv.lock @@ -680,7 +680,7 @@ wheels = [ [[package]] name = "late-sdk" -version = "1.0.1" +version = "1.1.0" source = { editable = "." } dependencies = [ { name = "httpx" },