From 6f45e037e2d8469ec9fc4bd7110152df28f9afdc Mon Sep 17 00:00:00 2001 From: ptaindia Date: Mon, 8 Dec 2025 00:07:01 +0530 Subject: [PATCH] Fix critical bugs and add missing storage module Critical fixes: - Created missing storage module with backends: - storage/base.py: Abstract StorageBackend base class - storage/factory.py: Backend factory function - storage/local.py: Local filesystem backend - storage/s3.py: S3-compatible backend - storage/azure.py: Azure Blob Storage placeholder - storage/gcs.py: Google Cloud Storage placeholder - Fixed api/models/api_key.py: - Corrected generate_key() return type annotation to tuple[str, str, str] - Fixed api/models/job.py: - Added missing columns: current_stage, status_message, updated_at, processing_stats - Fixed api/services/job_service.py: - Changed input_url/output_url to input_path/output_path to match Job model - Added null check for job.operations --- api/models/api_key.py | 6 +- api/models/job.py | 4 + api/services/job_service.py | 6 +- storage/__init__.py | 9 ++ storage/azure.py | 80 +++++++++++++ storage/base.py | 147 +++++++++++++++++++++++ storage/factory.py | 51 ++++++++ storage/gcs.py | 79 +++++++++++++ storage/local.py | 215 +++++++++++++++++++++++++++++++++ storage/s3.py | 229 ++++++++++++++++++++++++++++++++++++ 10 files changed, 820 insertions(+), 6 deletions(-) create mode 100644 storage/__init__.py create mode 100644 storage/azure.py create mode 100644 storage/base.py create mode 100644 storage/factory.py create mode 100644 storage/gcs.py create mode 100644 storage/local.py create mode 100644 storage/s3.py diff --git a/api/models/api_key.py b/api/models/api_key.py index 01bd166..00e8ff4 100644 --- a/api/models/api_key.py +++ b/api/models/api_key.py @@ -47,12 +47,12 @@ class APIKey(Base): created_by = Column(String(255), nullable=True) @classmethod - def generate_key(cls) -> tuple[str, str]: + def generate_key(cls) -> tuple[str, str, str]: """ Generate a new API key. - + Returns: - tuple: (raw_key, key_hash) where raw_key should be shown to user only once + tuple: (raw_key, key_hash, key_prefix) where raw_key should be shown to user only once """ # Generate 32 random bytes (256 bits) raw_key = secrets.token_urlsafe(32) diff --git a/api/models/job.py b/api/models/job.py index 3471ca0..3e537a7 100644 --- a/api/models/job.py +++ b/api/models/job.py @@ -81,8 +81,12 @@ class Job(Base): # Progress tracking progress = Column(Float, default=0.0) stage = Column(String, default="queued") + current_stage = Column(String, default="queued") # Alias for compatibility + status_message = Column(String, nullable=True) fps = Column(Float, nullable=True) eta_seconds = Column(Integer, nullable=True) + updated_at = Column(DateTime, nullable=True) + processing_stats = Column(JSON, nullable=True) # Quality metrics vmaf_score = Column(Float, nullable=True) diff --git a/api/services/job_service.py b/api/services/job_service.py index 05708b5..f44add6 100644 --- a/api/services/job_service.py +++ b/api/services/job_service.py @@ -43,8 +43,8 @@ async def get_job_logs( # Job creation logs.append(f"[{job.created_at.isoformat()}] Job created: {job_id}") logs.append(f"[{job.created_at.isoformat()}] Status: QUEUED") - logs.append(f"[{job.created_at.isoformat()}] Input URL: {job.input_url}") - logs.append(f"[{job.created_at.isoformat()}] Operations: {len(job.operations)} operations requested") + logs.append(f"[{job.created_at.isoformat()}] Input: {job.input_path}") + logs.append(f"[{job.created_at.isoformat()}] Operations: {len(job.operations) if job.operations else 0} operations requested") # Job parameters if job.options: @@ -85,7 +85,7 @@ async def get_job_logs( if job.completed_at: if job.status == JobStatus.COMPLETED: logs.append(f"[{job.completed_at.isoformat()}] Status: COMPLETED") - logs.append(f"[{job.completed_at.isoformat()}] Output URL: {job.output_url}") + logs.append(f"[{job.completed_at.isoformat()}] Output: {job.output_path}") logs.append(f"[{job.completed_at.isoformat()}] Processing completed successfully") # Calculate processing time diff --git a/storage/__init__.py b/storage/__init__.py new file mode 100644 index 0000000..ded8386 --- /dev/null +++ b/storage/__init__.py @@ -0,0 +1,9 @@ +""" +Storage module for managing multiple storage backends. + +Supports local filesystem, S3-compatible storage, and other backends. +""" +from storage.base import StorageBackend +from storage.factory import create_storage_backend + +__all__ = ["StorageBackend", "create_storage_backend"] diff --git a/storage/azure.py b/storage/azure.py new file mode 100644 index 0000000..9aad8c8 --- /dev/null +++ b/storage/azure.py @@ -0,0 +1,80 @@ +""" +Azure Blob Storage backend. + +Placeholder implementation - full implementation requires azure-storage-blob. +""" +from typing import Any, AsyncIterator, Dict, List, Optional, Union + +from storage.base import StorageBackend + + +class AzureStorageBackend(StorageBackend): + """Azure Blob Storage backend.""" + + def __init__(self, config: Dict[str, Any]): + """ + Initialize Azure storage backend. + + Args: + config: Configuration with: + - container: Azure container name + - connection_string: Azure connection string + - account_name: Storage account name (alternative to connection_string) + - account_key: Storage account key (alternative to connection_string) + """ + super().__init__(config) + self.container = config.get("container") + + if not self.container: + raise ValueError("Azure backend requires 'container' in configuration") + + # Check for azure-storage-blob + try: + from azure.storage.blob.aio import BlobServiceClient + self._available = True + except ImportError: + self._available = False + + async def exists(self, path: str) -> bool: + """Check if blob exists.""" + if not self._available: + raise ImportError("Azure storage requires azure-storage-blob. Install with: pip install azure-storage-blob") + raise NotImplementedError("Azure storage backend not fully implemented") + + async def read(self, path: str) -> AsyncIterator[bytes]: + """Read blob as async iterator.""" + if not self._available: + raise ImportError("Azure storage requires azure-storage-blob") + raise NotImplementedError("Azure storage backend not fully implemented") + + async def write(self, path: str, data: Union[bytes, AsyncIterator[bytes]]) -> int: + """Write data to blob.""" + if not self._available: + raise ImportError("Azure storage requires azure-storage-blob") + raise NotImplementedError("Azure storage backend not fully implemented") + + async def delete(self, path: str) -> bool: + """Delete a blob.""" + if not self._available: + raise ImportError("Azure storage requires azure-storage-blob") + raise NotImplementedError("Azure storage backend not fully implemented") + + async def list(self, path: str = "", recursive: bool = False) -> List[str]: + """List blobs in container.""" + if not self._available: + raise ImportError("Azure storage requires azure-storage-blob") + raise NotImplementedError("Azure storage backend not fully implemented") + + async def ensure_dir(self, path: str) -> None: + """Azure doesn't need directory creation.""" + pass + + async def get_status(self) -> Dict[str, Any]: + """Get backend status.""" + return { + "name": self.name, + "type": "azure", + "container": self.container, + "available": self._available, + "implemented": False, + } diff --git a/storage/base.py b/storage/base.py new file mode 100644 index 0000000..6c4cb86 --- /dev/null +++ b/storage/base.py @@ -0,0 +1,147 @@ +""" +Abstract base class for storage backends. +""" +from abc import ABC, abstractmethod +from typing import Any, AsyncIterator, Dict, List, Optional, Union +from pathlib import Path + + +class StorageBackend(ABC): + """Abstract base class for storage backends.""" + + def __init__(self, config: Dict[str, Any]): + """ + Initialize storage backend. + + Args: + config: Backend configuration dictionary + """ + self.config = config + self.name = config.get("name", "unknown") + + @abstractmethod + async def exists(self, path: str) -> bool: + """ + Check if a file exists. + + Args: + path: File path relative to backend root + + Returns: + True if file exists, False otherwise + """ + pass + + @abstractmethod + async def read(self, path: str) -> AsyncIterator[bytes]: + """ + Read file contents as an async iterator of chunks. + + Args: + path: File path relative to backend root + + Yields: + File content chunks as bytes + """ + pass + + @abstractmethod + async def write(self, path: str, data: Union[bytes, AsyncIterator[bytes]]) -> int: + """ + Write data to a file. + + Args: + path: File path relative to backend root + data: File content as bytes or async iterator of chunks + + Returns: + Number of bytes written + """ + pass + + @abstractmethod + async def delete(self, path: str) -> bool: + """ + Delete a file. + + Args: + path: File path relative to backend root + + Returns: + True if deleted, False if not found + """ + pass + + @abstractmethod + async def list(self, path: str = "", recursive: bool = False) -> List[str]: + """ + List files in a directory. + + Args: + path: Directory path relative to backend root + recursive: Whether to list recursively + + Returns: + List of file paths + """ + pass + + @abstractmethod + async def ensure_dir(self, path: str) -> None: + """ + Ensure a directory exists, creating it if necessary. + + Args: + path: Directory path relative to backend root + """ + pass + + async def get_file_info(self, path: str) -> Optional[Dict[str, Any]]: + """ + Get file metadata. + + Args: + path: File path relative to backend root + + Returns: + Dictionary with file info or None if not found + """ + if not await self.exists(path): + return None + return { + "path": path, + "exists": True, + } + + async def get_size(self, path: str) -> int: + """ + Get file size in bytes. + + Args: + path: File path relative to backend root + + Returns: + File size in bytes + """ + info = await self.get_file_info(path) + return info.get("size", 0) if info else 0 + + async def get_status(self) -> Dict[str, Any]: + """ + Get backend status. + + Returns: + Dictionary with backend status information + """ + return { + "name": self.name, + "type": self.__class__.__name__, + "available": True, + } + + async def cleanup(self) -> None: + """Clean up backend resources.""" + pass + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} name={self.name}>" diff --git a/storage/factory.py b/storage/factory.py new file mode 100644 index 0000000..a98d9d0 --- /dev/null +++ b/storage/factory.py @@ -0,0 +1,51 @@ +""" +Factory for creating storage backends. +""" +from typing import Any, Dict + +from storage.base import StorageBackend + + +def create_storage_backend(config: Dict[str, Any]) -> StorageBackend: + """ + Create a storage backend from configuration. + + Args: + config: Backend configuration dictionary with at least: + - type: Backend type (filesystem, s3, azure, gcs) + - name: Backend name for identification + + Returns: + Configured StorageBackend instance + + Raises: + ValueError: If backend type is unknown or config is invalid + """ + backend_type = config.get("type", "").lower() + + if not backend_type: + raise ValueError("Backend configuration must include 'type'") + + if backend_type in ("filesystem", "local", "file"): + from storage.local import LocalStorageBackend + return LocalStorageBackend(config) + + elif backend_type in ("s3", "aws", "minio"): + from storage.s3 import S3StorageBackend + return S3StorageBackend(config) + + elif backend_type in ("azure", "blob", "azure_blob"): + from storage.azure import AzureStorageBackend + return AzureStorageBackend(config) + + elif backend_type in ("gcs", "google", "google_cloud"): + from storage.gcs import GCSStorageBackend + return GCSStorageBackend(config) + + elif backend_type in ("nfs", "smb", "cifs", "network"): + # Network storage uses local backend with network path + from storage.local import LocalStorageBackend + return LocalStorageBackend(config) + + else: + raise ValueError(f"Unknown storage backend type: {backend_type}") diff --git a/storage/gcs.py b/storage/gcs.py new file mode 100644 index 0000000..f0cc788 --- /dev/null +++ b/storage/gcs.py @@ -0,0 +1,79 @@ +""" +Google Cloud Storage backend. + +Placeholder implementation - full implementation requires google-cloud-storage. +""" +from typing import Any, AsyncIterator, Dict, List, Optional, Union + +from storage.base import StorageBackend + + +class GCSStorageBackend(StorageBackend): + """Google Cloud Storage backend.""" + + def __init__(self, config: Dict[str, Any]): + """ + Initialize GCS storage backend. + + Args: + config: Configuration with: + - bucket: GCS bucket name + - project: GCP project ID (optional) + - credentials: Path to service account JSON (optional) + """ + super().__init__(config) + self.bucket = config.get("bucket") + + if not self.bucket: + raise ValueError("GCS backend requires 'bucket' in configuration") + + # Check for google-cloud-storage + try: + from google.cloud import storage + self._available = True + except ImportError: + self._available = False + + async def exists(self, path: str) -> bool: + """Check if object exists.""" + if not self._available: + raise ImportError("GCS storage requires google-cloud-storage. Install with: pip install google-cloud-storage") + raise NotImplementedError("GCS storage backend not fully implemented") + + async def read(self, path: str) -> AsyncIterator[bytes]: + """Read object as async iterator.""" + if not self._available: + raise ImportError("GCS storage requires google-cloud-storage") + raise NotImplementedError("GCS storage backend not fully implemented") + + async def write(self, path: str, data: Union[bytes, AsyncIterator[bytes]]) -> int: + """Write data to object.""" + if not self._available: + raise ImportError("GCS storage requires google-cloud-storage") + raise NotImplementedError("GCS storage backend not fully implemented") + + async def delete(self, path: str) -> bool: + """Delete an object.""" + if not self._available: + raise ImportError("GCS storage requires google-cloud-storage") + raise NotImplementedError("GCS storage backend not fully implemented") + + async def list(self, path: str = "", recursive: bool = False) -> List[str]: + """List objects in bucket.""" + if not self._available: + raise ImportError("GCS storage requires google-cloud-storage") + raise NotImplementedError("GCS storage backend not fully implemented") + + async def ensure_dir(self, path: str) -> None: + """GCS doesn't need directory creation.""" + pass + + async def get_status(self) -> Dict[str, Any]: + """Get backend status.""" + return { + "name": self.name, + "type": "gcs", + "bucket": self.bucket, + "available": self._available, + "implemented": False, + } diff --git a/storage/local.py b/storage/local.py new file mode 100644 index 0000000..628fea9 --- /dev/null +++ b/storage/local.py @@ -0,0 +1,215 @@ +""" +Local filesystem storage backend. +""" +import os +import asyncio +import aiofiles +import aiofiles.os +from pathlib import Path +from typing import Any, AsyncIterator, Dict, List, Optional, Union + +from storage.base import StorageBackend + + +class LocalStorageBackend(StorageBackend): + """Local filesystem storage backend.""" + + def __init__(self, config: Dict[str, Any]): + """ + Initialize local storage backend. + + Args: + config: Configuration with: + - base_path: Root directory for storage + - name: Backend name (optional) + """ + super().__init__(config) + self.base_path = Path(config.get("base_path", "/storage")).resolve() + + # Ensure base path exists + self.base_path.mkdir(parents=True, exist_ok=True) + + def _resolve_path(self, path: str) -> Path: + """ + Resolve and validate a path. + + Args: + path: Relative path within the storage + + Returns: + Absolute Path object + + Raises: + ValueError: If path would escape base directory + """ + # Handle empty path + if not path: + return self.base_path + + # Resolve the full path + full_path = (self.base_path / path).resolve() + + # Security check: ensure path is within base_path + try: + full_path.relative_to(self.base_path) + except ValueError: + raise ValueError(f"Path '{path}' would escape storage directory") + + return full_path + + async def exists(self, path: str) -> bool: + """Check if file exists.""" + try: + full_path = self._resolve_path(path) + return await aiofiles.os.path.exists(full_path) + except ValueError: + return False + + async def read(self, path: str) -> AsyncIterator[bytes]: + """Read file as async iterator of chunks.""" + full_path = self._resolve_path(path) + + if not await aiofiles.os.path.exists(full_path): + raise FileNotFoundError(f"File not found: {path}") + + chunk_size = 8192 # 8KB chunks + + async with aiofiles.open(full_path, 'rb') as f: + while True: + chunk = await f.read(chunk_size) + if not chunk: + break + yield chunk + + async def write(self, path: str, data: Union[bytes, AsyncIterator[bytes]]) -> int: + """Write data to file.""" + full_path = self._resolve_path(path) + + # Ensure parent directory exists + await self.ensure_dir(str(full_path.parent.relative_to(self.base_path))) + + bytes_written = 0 + + async with aiofiles.open(full_path, 'wb') as f: + if isinstance(data, bytes): + await f.write(data) + bytes_written = len(data) + else: + # Handle async iterator + async for chunk in data: + await f.write(chunk) + bytes_written += len(chunk) + + return bytes_written + + async def delete(self, path: str) -> bool: + """Delete a file.""" + try: + full_path = self._resolve_path(path) + + if not await aiofiles.os.path.exists(full_path): + return False + + if await aiofiles.os.path.isdir(full_path): + # Remove directory recursively + import shutil + await asyncio.to_thread(shutil.rmtree, full_path) + else: + await aiofiles.os.remove(full_path) + + return True + except (OSError, ValueError): + return False + + async def list(self, path: str = "", recursive: bool = False) -> List[str]: + """List files in directory.""" + full_path = self._resolve_path(path) + + if not await aiofiles.os.path.exists(full_path): + return [] + + if not await aiofiles.os.path.isdir(full_path): + return [path] if path else [] + + files = [] + + if recursive: + # Walk directory tree + for root, dirs, filenames in os.walk(full_path): + root_path = Path(root) + for filename in filenames: + file_path = root_path / filename + rel_path = str(file_path.relative_to(self.base_path)) + files.append(rel_path) + else: + # List immediate children + entries = await aiofiles.os.listdir(full_path) + for entry in entries: + entry_path = full_path / entry + if path: + rel_path = f"{path}/{entry}" + else: + rel_path = entry + files.append(rel_path) + + return sorted(files) + + async def ensure_dir(self, path: str) -> None: + """Ensure directory exists.""" + if not path: + return + + full_path = self._resolve_path(path) + + if not await aiofiles.os.path.exists(full_path): + await aiofiles.os.makedirs(full_path, exist_ok=True) + + async def get_file_info(self, path: str) -> Optional[Dict[str, Any]]: + """Get file metadata.""" + try: + full_path = self._resolve_path(path) + + if not await aiofiles.os.path.exists(full_path): + return None + + stat = await aiofiles.os.stat(full_path) + + return { + "path": path, + "exists": True, + "size": stat.st_size, + "modified": stat.st_mtime, + "created": stat.st_ctime, + "is_dir": await aiofiles.os.path.isdir(full_path), + } + except (OSError, ValueError): + return None + + async def get_size(self, path: str) -> int: + """Get file size in bytes.""" + info = await self.get_file_info(path) + return info.get("size", 0) if info else 0 + + async def get_status(self) -> Dict[str, Any]: + """Get backend status.""" + import shutil + + # Get disk usage + try: + usage = await asyncio.to_thread(shutil.disk_usage, self.base_path) + disk_info = { + "total": usage.total, + "used": usage.used, + "free": usage.free, + "percent_used": round((usage.used / usage.total) * 100, 2), + } + except OSError: + disk_info = {"error": "Unable to get disk usage"} + + return { + "name": self.name, + "type": "filesystem", + "base_path": str(self.base_path), + "available": self.base_path.exists(), + "disk": disk_info, + } diff --git a/storage/s3.py b/storage/s3.py new file mode 100644 index 0000000..e4e2020 --- /dev/null +++ b/storage/s3.py @@ -0,0 +1,229 @@ +""" +S3-compatible storage backend. + +Supports AWS S3, MinIO, and other S3-compatible object stores. +""" +import asyncio +from io import BytesIO +from typing import Any, AsyncIterator, Dict, List, Optional, Union + +from storage.base import StorageBackend + + +class S3StorageBackend(StorageBackend): + """S3-compatible storage backend.""" + + def __init__(self, config: Dict[str, Any]): + """ + Initialize S3 storage backend. + + Args: + config: Configuration with: + - bucket: S3 bucket name + - region: AWS region (optional) + - endpoint_url: Custom endpoint for MinIO/compatible stores + - access_key: AWS access key (optional, uses default credentials) + - secret_key: AWS secret key (optional, uses default credentials) + - prefix: Path prefix within bucket (optional) + """ + super().__init__(config) + self.bucket = config.get("bucket") + self.region = config.get("region", "us-east-1") + self.endpoint_url = config.get("endpoint_url") + self.prefix = config.get("prefix", "").strip("/") + + if not self.bucket: + raise ValueError("S3 backend requires 'bucket' in configuration") + + self._client = None + + async def _get_client(self): + """Get or create S3 client.""" + if self._client is None: + try: + import aioboto3 + except ImportError: + raise ImportError( + "S3 storage requires aioboto3. Install with: pip install aioboto3" + ) + + session = aioboto3.Session() + + client_kwargs = { + "region_name": self.region, + } + + if self.endpoint_url: + client_kwargs["endpoint_url"] = self.endpoint_url + + # Check for explicit credentials in config + if self.config.get("access_key") and self.config.get("secret_key"): + client_kwargs["aws_access_key_id"] = self.config["access_key"] + client_kwargs["aws_secret_access_key"] = self.config["secret_key"] + + self._session = session + self._client_kwargs = client_kwargs + + return self._session.client("s3", **self._client_kwargs) + + def _full_path(self, path: str) -> str: + """Get full path including prefix.""" + if not path: + return self.prefix + if self.prefix: + return f"{self.prefix}/{path.lstrip('/')}" + return path.lstrip("/") + + async def exists(self, path: str) -> bool: + """Check if object exists.""" + try: + async with await self._get_client() as client: + await client.head_object(Bucket=self.bucket, Key=self._full_path(path)) + return True + except Exception: + return False + + async def read(self, path: str) -> AsyncIterator[bytes]: + """Read object as async iterator of chunks.""" + async with await self._get_client() as client: + response = await client.get_object( + Bucket=self.bucket, + Key=self._full_path(path) + ) + + async with response["Body"] as stream: + chunk_size = 8192 + while True: + chunk = await stream.read(chunk_size) + if not chunk: + break + yield chunk + + async def write(self, path: str, data: Union[bytes, AsyncIterator[bytes]]) -> int: + """Write data to object.""" + # Collect data if it's an iterator + if isinstance(data, bytes): + content = data + else: + chunks = [] + async for chunk in data: + chunks.append(chunk) + content = b"".join(chunks) + + async with await self._get_client() as client: + await client.put_object( + Bucket=self.bucket, + Key=self._full_path(path), + Body=content + ) + + return len(content) + + async def delete(self, path: str) -> bool: + """Delete an object.""" + try: + async with await self._get_client() as client: + await client.delete_object( + Bucket=self.bucket, + Key=self._full_path(path) + ) + return True + except Exception: + return False + + async def list(self, path: str = "", recursive: bool = False) -> List[str]: + """List objects in prefix.""" + prefix = self._full_path(path) + if prefix and not prefix.endswith("/"): + prefix += "/" + + files = [] + delimiter = "" if recursive else "/" + + async with await self._get_client() as client: + paginator = client.get_paginator("list_objects_v2") + + async for page in paginator.paginate( + Bucket=self.bucket, + Prefix=prefix, + Delimiter=delimiter + ): + # Add files + for obj in page.get("Contents", []): + key = obj["Key"] + # Remove prefix to get relative path + if self.prefix: + rel_path = key[len(self.prefix):].lstrip("/") + else: + rel_path = key + files.append(rel_path) + + # Add "directories" (common prefixes) if not recursive + if not recursive: + for prefix_obj in page.get("CommonPrefixes", []): + pref = prefix_obj["Prefix"] + if self.prefix: + rel_path = pref[len(self.prefix):].strip("/") + else: + rel_path = pref.strip("/") + if rel_path: + files.append(rel_path + "/") + + return sorted(files) + + async def ensure_dir(self, path: str) -> None: + """ + Ensure "directory" exists in S3. + + Note: S3 doesn't have real directories, but we create a placeholder + object to simulate directory structure. + """ + # S3 doesn't need explicit directory creation + pass + + async def get_file_info(self, path: str) -> Optional[Dict[str, Any]]: + """Get object metadata.""" + try: + async with await self._get_client() as client: + response = await client.head_object( + Bucket=self.bucket, + Key=self._full_path(path) + ) + + return { + "path": path, + "exists": True, + "size": response.get("ContentLength", 0), + "modified": response.get("LastModified"), + "etag": response.get("ETag", "").strip('"'), + "content_type": response.get("ContentType"), + } + except Exception: + return None + + async def get_status(self) -> Dict[str, Any]: + """Get backend status.""" + try: + async with await self._get_client() as client: + # Test access by listing bucket (limited to 1 object) + await client.list_objects_v2( + Bucket=self.bucket, + MaxKeys=1 + ) + available = True + except Exception as e: + available = False + + return { + "name": self.name, + "type": "s3", + "bucket": self.bucket, + "region": self.region, + "endpoint_url": self.endpoint_url, + "prefix": self.prefix, + "available": available, + } + + async def cleanup(self) -> None: + """Clean up S3 client.""" + self._client = None