From 1e7d12662bc81ebd197e10af46f60f3b2f345c6f Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Fri, 9 Jan 2026 14:25:28 -0800 Subject: [PATCH 1/6] feat: Add suspend/resume support for RPA invocations in evaluations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change implements the suspend/resume pattern for RPA process invocations within the evaluation runtime, ensuring evaluations can pause when waiting for external job completion and resume after the job finishes. Key Changes: 1. Functions Runtime (src/uipath/functions/runtime.py): - Added _detect_langgraph_interrupt() method to detect LangGraph's __interrupt__ field and extract trigger information - Modified execute() to check for interrupts and return SUSPENDED status - Converts InvokeProcess objects to UiPathResumeTrigger for serverless executor 2. Evaluation Runtime (src/uipath/_cli/_evals/_runtime.py): - Added SUSPENDED status detection after agent execution - Populates agentExecutionOutput with trigger data when suspended - Skips evaluator execution for suspended runs - Publishes EvalRunUpdatedEvent with suspended status to event bus 3. Test Agent (samples/event-trigger/test_suspend_resume_agent.py): - Updated to use MemorySaver checkpointer (required for LangGraph interrupts) - Returns raw dict to preserve __interrupt__ field for runtime detection The implementation follows the established pattern from UiPathResumableRuntime and ensures proper trigger extraction and status handling throughout the evaluation lifecycle. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 --- .../test_suspend_resume_agent.py | 109 ++++++++++++++++++ src/uipath/_cli/_evals/_runtime.py | 60 ++++++++++ src/uipath/functions/runtime.py | 85 ++++++++++++++ 3 files changed, 254 insertions(+) create mode 100644 samples/event-trigger/test_suspend_resume_agent.py diff --git a/samples/event-trigger/test_suspend_resume_agent.py b/samples/event-trigger/test_suspend_resume_agent.py new file mode 100644 index 000000000..14a72b50e --- /dev/null +++ b/samples/event-trigger/test_suspend_resume_agent.py @@ -0,0 +1,109 @@ +"""Test agent that demonstrates suspend/resume pattern with RPA process invocation.""" + +from typing import TypedDict + +from langgraph.graph import END, START, StateGraph +from langgraph.types import interrupt +from pydantic import BaseModel + +from uipath.platform.common import InvokeProcess + + +class Input(BaseModel): + """Input for the test agent.""" + + query: str + + +class Output(BaseModel): + """Output from the test agent.""" + + result: str + process_output: dict | None = None + + +class State(TypedDict): + """Agent state.""" + + query: str + process_result: dict | None + final_result: str + + +def prepare_input(state: State) -> State: + """Prepare input for RPA process.""" + print(f"Preparing to call RPA process with query: {state['query']}") + return state + + +def call_rpa_process(state: State) -> State: + """Call RPA process - this will suspend execution.""" + print("Calling RPA process - execution will suspend here") + + # This interrupt() call will cause the runtime to suspend + # The serverless executor will detect SUSPENDED status, poll the job, + # and then resume execution once the job completes + process_result = interrupt( + InvokeProcess( + name="TestProcess", # Replace with actual process name + input_arguments={"query": state["query"], "timestamp": "2024-01-08"}, + process_folder_path="Shared", # Replace with actual folder + process_folder_key=None, + ) + ) + + print(f"RPA process completed with result: {process_result}") + + return {**state, "process_result": process_result} + + +def format_output(state: State) -> State: + """Format final output after RPA process completes.""" + process_result = state.get("process_result", {}) + + final_result = ( + f"Processed query '{state['query']}' via RPA. Result: {process_result}" + ) + + return {**state, "final_result": final_result} + + +# Build the graph +builder = StateGraph(State) +builder.add_node("prepare", prepare_input) +builder.add_node("call_rpa", call_rpa_process) +builder.add_node("format", format_output) + +builder.add_edge(START, "prepare") +builder.add_edge("prepare", "call_rpa") +builder.add_edge("call_rpa", "format") +builder.add_edge("format", END) + + +def main(input_data: Input): + """Main entry point for the agent. + + Returns raw dict to preserve __interrupt__ field for suspend/resume. + When execution suspends, the dict will contain __interrupt__ field with trigger data. + When execution completes, the dict will contain final_result. + """ + from langgraph.checkpoint.memory import MemorySaver + + # IMPORTANT: Must use checkpointer for interrupt() to work + checkpointer = MemorySaver() + graph = builder.compile(checkpointer=checkpointer) + + # Generate unique thread ID for this execution + import uuid + + thread_id = f"agent-{uuid.uuid4()}" + + config = {"configurable": {"thread_id": thread_id}} + + result = graph.invoke( + {"query": input_data.query, "process_result": None, "final_result": ""}, config + ) + + # Return raw dict - preserves __interrupt__ field if suspended + # Runtime will detect __interrupt__ and create UiPath trigger + return result diff --git a/src/uipath/_cli/_evals/_runtime.py b/src/uipath/_cli/_evals/_runtime.py index 60202d12d..abe3efbde 100644 --- a/src/uipath/_cli/_evals/_runtime.py +++ b/src/uipath/_cli/_evals/_runtime.py @@ -448,6 +448,14 @@ async def _execute_eval( agent_execution_output = await self.execute_runtime( eval_item, execution_id, runtime ) + + logger.info( + f"DEBUG: Agent execution result status: {agent_execution_output.result.status}" + ) + logger.info( + f"DEBUG: Agent execution result trigger: {agent_execution_output.result.trigger}" + ) + except Exception as e: if self.context.verbose: if isinstance(e, EvaluationRuntimeException): @@ -483,6 +491,58 @@ async def _execute_eval( ) raise + # Check if execution was suspended (e.g., waiting for RPA job completion) + if ( + agent_execution_output.result.status + == UiPathRuntimeStatus.SUSPENDED + ): + # For suspended executions, we don't run evaluators yet + # The serverless executor should save the triggers and resume later + logger.info( + f"Evaluation execution suspended for eval '{eval_item.name}' (id: {eval_item.id})" + ) + + # Extract triggers from result + triggers = [] + if agent_execution_output.result.trigger: + triggers.append(agent_execution_output.result.trigger) + if agent_execution_output.result.triggers: + triggers.extend(agent_execution_output.result.triggers) + + # IMPORTANT: Always include execution output with triggers when suspended + # This ensures triggers are visible in the output JSON for serverless executor + evaluation_run_results.agent_execution_output = ( + convert_eval_execution_output_to_serializable( + agent_execution_output + ) + ) + + # Publish suspended status event + await self.event_bus.publish( + EvaluationEvents.UPDATE_EVAL_RUN, + EvalRunUpdatedEvent( + execution_id=execution_id, + eval_item=eval_item, + eval_results=[], + success=True, # Not failed, just suspended + agent_output={ + "status": "suspended", + "triggers": [ + t.model_dump(by_alias=True) for t in triggers + ], + }, + agent_execution_time=agent_execution_output.execution_time, + spans=agent_execution_output.spans, + logs=agent_execution_output.logs, + exception_details=None, + ), + wait_for_completion=False, + ) + + # Return partial results with trigger information + # The evaluation will be completed when resumed + return evaluation_run_results + if self.context.verbose: evaluation_run_results.agent_execution_output = ( convert_eval_execution_output_to_serializable( diff --git a/src/uipath/functions/runtime.py b/src/uipath/functions/runtime.py index 3267216fd..a6b701c08 100644 --- a/src/uipath/functions/runtime.py +++ b/src/uipath/functions/runtime.py @@ -23,6 +23,11 @@ UiPathRuntimeError, ) from uipath.runtime.schema import UiPathRuntimeSchema +from uipath.runtime.resumable.trigger import ( + UiPathResumeTrigger, + UiPathResumeTriggerType, + UiPathResumeTriggerName, +) from .schema_gen import get_type_schema from .type_conversion import ( @@ -124,6 +129,71 @@ async def _execute_function( return convert_from_class(result) if result is not None else {} + def _detect_langgraph_interrupt( + self, output: dict[str, Any] + ) -> UiPathResumeTrigger | None: + """Detect LangGraph __interrupt__ field and extract InvokeProcess trigger. + + LangGraph's interrupt() creates an __interrupt__ field in the output dict: + { + "query": "...", + "final_result": "", + "__interrupt__": [Interrupt(value=InvokeProcess(...), id="...")] + } + + We extract the InvokeProcess from the interrupt and convert it to a UiPath trigger. + """ + try: + if not isinstance(output, dict): + return None + + # Check for LangGraph's __interrupt__ field + if "__interrupt__" not in output: + return None + + interrupts = output["__interrupt__"] + if not interrupts or not isinstance(interrupts, list): + logger.warning("__interrupt__ field exists but is not a list") + return None + + # Extract first interrupt + interrupt_obj = interrupts[0] + if not hasattr(interrupt_obj, "value"): + logger.warning("Interrupt object missing 'value' attribute") + return None + + invoke_process = interrupt_obj.value + + # Check if it's an InvokeProcess object (has name and input_arguments) + if not ( + hasattr(invoke_process, "name") + and hasattr(invoke_process, "input_arguments") + ): + logger.warning( + f"Interrupt value is not InvokeProcess (type: {type(invoke_process)})" + ) + return None + + logger.info( + f"Detected LangGraph interrupt - suspending execution for process: {invoke_process.name}" + ) + + # Convert InvokeProcess to UiPath trigger + return UiPathResumeTrigger( + trigger_type=UiPathResumeTriggerType.JOB, + trigger_name=UiPathResumeTriggerName.JOB, + item_key=f"job-{uuid.uuid4()}", # Generate unique job key + folder_path=getattr(invoke_process, "process_folder_path", "Shared"), + payload={ + "process_name": invoke_process.name, + "input_arguments": invoke_process.input_arguments or {}, + "folder_key": getattr(invoke_process, "process_folder_key", None), + }, + ) + except Exception as e: + logger.warning(f"Failed to detect LangGraph interrupt: {e}") + return None + async def execute( self, input: dict[str, Any] | None = None, @@ -134,6 +204,21 @@ async def execute( func = self._load_function() output = await self._execute_function(func, input or {}) + logger.info(f"Output type: {type(output)}, has __interrupt__: {'__interrupt__' in output if isinstance(output, dict) else False}") + + # Check if output represents a LangGraph interrupt (suspend) + trigger = self._detect_langgraph_interrupt(output) + logger.info(f"Trigger detected: {trigger}") + if trigger: + logger.info( + f"Detected LangGraph interrupt - suspending execution with trigger: {trigger.item_key}" + ) + return UiPathRuntimeResult( + output=None, # No final output yet (suspended) + status=UiPathRuntimeStatus.SUSPENDED, + trigger=trigger, + ) + return UiPathRuntimeResult( output=output, status=UiPathRuntimeStatus.SUCCESSFUL, From 9013c4f967da4c55dda4deef5f8c14fa57f072da Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Mon, 12 Jan 2026 05:47:19 -0800 Subject: [PATCH 2/6] feat: Add --resume option and trigger pass-through to eval runtime This commit adds three key features to support suspend/resume in evaluations: 1. Add --resume option to uipath eval command - Added resume flag to CLI arguments - Passed through to UiPathEvalContext - Enables resuming suspended evaluations 2. Use known execution_id for single runtime runs - Added job_id field to UiPathEvalContext - Passed ctx.job_id from UiPathRuntimeContext to eval context - UiPathEvalRuntime now uses job_id as execution_id when available - Falls back to UUID generation when job_id is None 3. Return inner runtime triggers to eval runtime result - Collect all triggers from evaluation run results - Pass triggers through to top-level UiPathRuntimeResult - Enables serverless executor to detect and process suspend triggers - Preserves trigger information from nested runtime executions These changes work with the suspend/resume detection already implemented to enable full suspend/resume flow for RPA process invocations in evaluations. Testing will be done with the test agent in uipath-langchain-python repo. --- src/uipath/_cli/_evals/_runtime.py | 21 ++++++++++++++++++++- src/uipath/_cli/cli_eval.py | 12 ++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/uipath/_cli/_evals/_runtime.py b/src/uipath/_cli/_evals/_runtime.py index abe3efbde..a2d5fc2c9 100644 --- a/src/uipath/_cli/_evals/_runtime.py +++ b/src/uipath/_cli/_evals/_runtime.py @@ -192,6 +192,8 @@ class UiPathEvalContext: enable_mocker_cache: bool = False report_coverage: bool = False model_settings_id: str = "default" + resume: bool = False + job_id: str | None = None class UiPathEvalRuntime: @@ -218,7 +220,8 @@ def __init__( self.trace_manager.tracer_provider.add_span_processor(span_processor) self.logs_exporter: ExecutionLogsExporter = ExecutionLogsExporter() - self.execution_id = str(uuid.uuid4()) + # Use job_id if available (for single runtime runs), otherwise generate UUID + self.execution_id = context.job_id or str(uuid.uuid4()) self.coverage = coverage.Coverage(branch=True) async def __aenter__(self) -> "UiPathEvalRuntime": @@ -381,9 +384,25 @@ async def execute(self) -> UiPathRuntimeResult: wait_for_completion=False, ) + # Collect triggers from all evaluation runs (pass-through from inner runtime) + all_triggers = [] + for eval_run_result in results.evaluation_set_results: + if ( + eval_run_result.agent_execution_output + and eval_run_result.agent_execution_output.result + ): + runtime_result = ( + eval_run_result.agent_execution_output.result + ) + if runtime_result.trigger: + all_triggers.append(runtime_result.trigger) + if runtime_result.triggers: + all_triggers.extend(runtime_result.triggers) + result = UiPathRuntimeResult( output={**results.model_dump(by_alias=True)}, status=UiPathRuntimeStatus.SUCCESSFUL, + triggers=all_triggers if all_triggers else None, ) return result except Exception as e: diff --git a/src/uipath/_cli/cli_eval.py b/src/uipath/_cli/cli_eval.py index 570832b47..b7e667337 100644 --- a/src/uipath/_cli/cli_eval.py +++ b/src/uipath/_cli/cli_eval.py @@ -106,6 +106,12 @@ def setup_reporting_prereq(no_report: bool) -> bool: type=click.Path(exists=False), help="File path where traces will be written in JSONL format", ) +@click.option( + "--resume", + is_flag=True, + default=False, + help="Resume execution from a previous suspended state", +) def eval( entrypoint: str | None, eval_set: str | None, @@ -118,6 +124,7 @@ def eval( report_coverage: bool, model_settings_id: str, trace_file: str | None, + resume: bool, ) -> None: """Run an evaluation set against the agent. @@ -131,6 +138,7 @@ def eval( enable_mocker_cache: Enable caching for LLM mocker responses report_coverage: Report evaluation coverage model_settings_id: Model settings ID to override agent settings + resume: Resume execution from a previous suspended state """ should_register_progress_reporter = setup_reporting_prereq(no_report) @@ -166,6 +174,7 @@ def eval( eval_context.eval_ids = eval_ids eval_context.report_coverage = report_coverage eval_context.model_settings_id = model_settings_id + eval_context.resume = resume try: @@ -189,6 +198,9 @@ async def execute_eval(): trace_manager=trace_manager, command="eval", ) as ctx: + # Set job_id in eval context for single runtime runs + eval_context.job_id = ctx.job_id + if ctx.job_id: trace_manager.add_span_exporter(LlmOpsHttpExporter()) From 2f213ef0da7e83f5a73646656535369317e2b4f3 Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Mon, 12 Jan 2026 05:55:41 -0800 Subject: [PATCH 3/6] feat: Add comprehensive logging for suspend/resume detection in eval runtime Added detailed logging to track the suspend/resume flow: Execution Start: - Log execution ID, job ID, and resume mode status - Clear indication when resume mode is enabled Suspension Detection: - Log when SUSPENDED status is detected - Show number of triggers extracted - Display each trigger's details Trigger Pass-through: - Log trigger collection from all evaluation runs - Show count and details of triggers being passed through - Clear confirmation when triggers reach top-level result This makes the suspend/resume mechanism transparent and easy to debug for both development and production troubleshooting. --- src/uipath/_cli/_evals/_runtime.py | 40 +++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/src/uipath/_cli/_evals/_runtime.py b/src/uipath/_cli/_evals/_runtime.py index a2d5fc2c9..0a06c9d8e 100644 --- a/src/uipath/_cli/_evals/_runtime.py +++ b/src/uipath/_cli/_evals/_runtime.py @@ -299,6 +299,17 @@ async def initiate_evaluation( ) async def execute(self) -> UiPathRuntimeResult: + logger.info("=" * 80) + logger.info("EVAL RUNTIME: Starting evaluation execution") + logger.info(f"EVAL RUNTIME: Execution ID: {self.execution_id}") + logger.info(f"EVAL RUNTIME: Job ID: {self.context.job_id}") + logger.info(f"EVAL RUNTIME: Resume mode: {self.context.resume}") + if self.context.resume: + logger.info( + "🟢 EVAL RUNTIME: RESUME MODE ENABLED - Will resume from suspended state" + ) + logger.info("=" * 80) + # Configure model settings override before creating runtime await self._configure_model_settings_override() @@ -385,6 +396,10 @@ async def execute(self) -> UiPathRuntimeResult: ) # Collect triggers from all evaluation runs (pass-through from inner runtime) + logger.info("=" * 80) + logger.info( + "EVAL RUNTIME: Collecting triggers from all evaluation runs" + ) all_triggers = [] for eval_run_result in results.evaluation_set_results: if ( @@ -399,6 +414,18 @@ async def execute(self) -> UiPathRuntimeResult: if runtime_result.triggers: all_triggers.extend(runtime_result.triggers) + if all_triggers: + logger.info( + f"EVAL RUNTIME: ✅ Passing through {len(all_triggers)} trigger(s) to top-level result" + ) + for i, trigger in enumerate(all_triggers, 1): + logger.info( + f"EVAL RUNTIME: Pass-through trigger {i}: {trigger.model_dump(by_alias=True)}" + ) + else: + logger.info("EVAL RUNTIME: No triggers to pass through") + logger.info("=" * 80) + result = UiPathRuntimeResult( output={**results.model_dump(by_alias=True)}, status=UiPathRuntimeStatus.SUCCESSFUL, @@ -517,9 +544,11 @@ async def _execute_eval( ): # For suspended executions, we don't run evaluators yet # The serverless executor should save the triggers and resume later + logger.info("=" * 80) logger.info( - f"Evaluation execution suspended for eval '{eval_item.name}' (id: {eval_item.id})" + f"🔴 EVAL RUNTIME: DETECTED SUSPENSION for eval '{eval_item.name}' (id: {eval_item.id})" ) + logger.info("EVAL RUNTIME: Agent returned SUSPENDED status") # Extract triggers from result triggers = [] @@ -528,6 +557,15 @@ async def _execute_eval( if agent_execution_output.result.triggers: triggers.extend(agent_execution_output.result.triggers) + logger.info( + f"EVAL RUNTIME: Extracted {len(triggers)} trigger(s) from suspended execution" + ) + for i, trigger in enumerate(triggers, 1): + logger.info( + f"EVAL RUNTIME: Trigger {i}: {trigger.model_dump(by_alias=True)}" + ) + logger.info("=" * 80) + # IMPORTANT: Always include execution output with triggers when suspended # This ensures triggers are visible in the output JSON for serverless executor evaluation_run_results.agent_execution_output = ( From 617ffba0f9fc62e8ab88d6643627fc354876953b Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Mon, 12 Jan 2026 07:04:12 -0800 Subject: [PATCH 4/6] docs: add suspend/resume eval runtime architecture diagram --- eval-suspend-resume-architecture.drawio | 96 +++++++++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 eval-suspend-resume-architecture.drawio diff --git a/eval-suspend-resume-architecture.drawio b/eval-suspend-resume-architecture.drawio new file mode 100644 index 000000000..f75696e9f --- /dev/null +++ b/eval-suspend-resume-architecture.drawio @@ -0,0 +1,96 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 3722a6987391f5638bf8fc10bcd39328f4cad9ab Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Mon, 12 Jan 2026 07:07:39 -0800 Subject: [PATCH 5/6] chore: remove draw.io diagram and event-trigger test agent The draw.io diagram is not working properly. Testing will be done in the uipath-langchain-python repo sample instead. --- eval-suspend-resume-architecture.drawio | 96 --------------- .../test_suspend_resume_agent.py | 109 ------------------ 2 files changed, 205 deletions(-) delete mode 100644 eval-suspend-resume-architecture.drawio delete mode 100644 samples/event-trigger/test_suspend_resume_agent.py diff --git a/eval-suspend-resume-architecture.drawio b/eval-suspend-resume-architecture.drawio deleted file mode 100644 index f75696e9f..000000000 --- a/eval-suspend-resume-architecture.drawio +++ /dev/null @@ -1,96 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/event-trigger/test_suspend_resume_agent.py b/samples/event-trigger/test_suspend_resume_agent.py deleted file mode 100644 index 14a72b50e..000000000 --- a/samples/event-trigger/test_suspend_resume_agent.py +++ /dev/null @@ -1,109 +0,0 @@ -"""Test agent that demonstrates suspend/resume pattern with RPA process invocation.""" - -from typing import TypedDict - -from langgraph.graph import END, START, StateGraph -from langgraph.types import interrupt -from pydantic import BaseModel - -from uipath.platform.common import InvokeProcess - - -class Input(BaseModel): - """Input for the test agent.""" - - query: str - - -class Output(BaseModel): - """Output from the test agent.""" - - result: str - process_output: dict | None = None - - -class State(TypedDict): - """Agent state.""" - - query: str - process_result: dict | None - final_result: str - - -def prepare_input(state: State) -> State: - """Prepare input for RPA process.""" - print(f"Preparing to call RPA process with query: {state['query']}") - return state - - -def call_rpa_process(state: State) -> State: - """Call RPA process - this will suspend execution.""" - print("Calling RPA process - execution will suspend here") - - # This interrupt() call will cause the runtime to suspend - # The serverless executor will detect SUSPENDED status, poll the job, - # and then resume execution once the job completes - process_result = interrupt( - InvokeProcess( - name="TestProcess", # Replace with actual process name - input_arguments={"query": state["query"], "timestamp": "2024-01-08"}, - process_folder_path="Shared", # Replace with actual folder - process_folder_key=None, - ) - ) - - print(f"RPA process completed with result: {process_result}") - - return {**state, "process_result": process_result} - - -def format_output(state: State) -> State: - """Format final output after RPA process completes.""" - process_result = state.get("process_result", {}) - - final_result = ( - f"Processed query '{state['query']}' via RPA. Result: {process_result}" - ) - - return {**state, "final_result": final_result} - - -# Build the graph -builder = StateGraph(State) -builder.add_node("prepare", prepare_input) -builder.add_node("call_rpa", call_rpa_process) -builder.add_node("format", format_output) - -builder.add_edge(START, "prepare") -builder.add_edge("prepare", "call_rpa") -builder.add_edge("call_rpa", "format") -builder.add_edge("format", END) - - -def main(input_data: Input): - """Main entry point for the agent. - - Returns raw dict to preserve __interrupt__ field for suspend/resume. - When execution suspends, the dict will contain __interrupt__ field with trigger data. - When execution completes, the dict will contain final_result. - """ - from langgraph.checkpoint.memory import MemorySaver - - # IMPORTANT: Must use checkpointer for interrupt() to work - checkpointer = MemorySaver() - graph = builder.compile(checkpointer=checkpointer) - - # Generate unique thread ID for this execution - import uuid - - thread_id = f"agent-{uuid.uuid4()}" - - config = {"configurable": {"thread_id": thread_id}} - - result = graph.invoke( - {"query": input_data.query, "process_result": None, "final_result": ""}, config - ) - - # Return raw dict - preserves __interrupt__ field if suspended - # Runtime will detect __interrupt__ and create UiPath trigger - return result From a35d2a5e0b736634b5298d02b2cb00efadf4dfc3 Mon Sep 17 00:00:00 2001 From: Chibi Vikram Date: Mon, 12 Jan 2026 07:19:31 -0800 Subject: [PATCH 6/6] style: fix linting issues in functions runtime --- src/uipath/functions/runtime.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/uipath/functions/runtime.py b/src/uipath/functions/runtime.py index a6b701c08..3867fde8f 100644 --- a/src/uipath/functions/runtime.py +++ b/src/uipath/functions/runtime.py @@ -22,12 +22,12 @@ UiPathErrorContract, UiPathRuntimeError, ) -from uipath.runtime.schema import UiPathRuntimeSchema from uipath.runtime.resumable.trigger import ( UiPathResumeTrigger, - UiPathResumeTriggerType, UiPathResumeTriggerName, + UiPathResumeTriggerType, ) +from uipath.runtime.schema import UiPathRuntimeSchema from .schema_gen import get_type_schema from .type_conversion import ( @@ -204,7 +204,9 @@ async def execute( func = self._load_function() output = await self._execute_function(func, input or {}) - logger.info(f"Output type: {type(output)}, has __interrupt__: {'__interrupt__' in output if isinstance(output, dict) else False}") + logger.info( + f"Output type: {type(output)}, has __interrupt__: {'__interrupt__' in output if isinstance(output, dict) else False}" + ) # Check if output represents a LangGraph interrupt (suspend) trigger = self._detect_langgraph_interrupt(output)