From dcd40d3e59f3b81b0d53c02051cb420299e921f5 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 6 Feb 2026 15:27:23 -0800 Subject: [PATCH 01/21] adds draft of coupled block generator and example --- examples/coupled_trial_generator.py | 19 ++ .../coupled_trial_generator.py | 272 ++++++++++-------- .../integration_test_trial_generator.py | 26 +- 3 files changed, 186 insertions(+), 131 deletions(-) create mode 100644 examples/coupled_trial_generator.py diff --git a/examples/coupled_trial_generator.py b/examples/coupled_trial_generator.py new file mode 100644 index 0000000..7a782ca --- /dev/null +++ b/examples/coupled_trial_generator.py @@ -0,0 +1,19 @@ +import os +import random + +from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import CoupledTrialGeneratorSpec +from aind_behavior_dynamic_foraging.task_logic.trial_models import TrialOutcome, Trial + +def main(): + coupled_trial_generator = CoupledTrialGeneratorSpec().create_generator() + + for i in range(100): + trial_outcome = TrialOutcome(trial=Trial(), + is_right_choice=random.choice([True, False, None]), + is_rewarded=random.choice([True, False])) + coupled_trial_generator.update(trial_outcome) + coupled_trial_generator.next() + + +if __name__ == "__main__": + main() diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 2ba06c2..deb04b5 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -13,38 +13,44 @@ from ..trial_models import Trial, TrialOutcome from ._base import ITrialGenerator, _BaseTrialGeneratorSpecModel +from datetime import datetime, timedelta -AutoWaterModes = Literal["Natural", "Both", "High pro"] BlockBehaviorEvaluationMode = Literal[ - "ignore", # do not take behavior into account when switching blocks - "end", # behavior must be stable at end of block to allow switching - "anytime", -] # behavior can be stable anytime in block to allow switching + "end", # behavior stable at end of block to allow switching + "anytime", # behavior stable anytime in block to allow switching +] -class RewardProbability(BaseModel): - base_reward_sum: float = Field(default=0.8, title="Sum of p_reward") - family: int = Field(default=1, title="Reward family") - pairs_n: int = Field(default=1, title="Number of pairs") - - -class AutoWater(BaseModel): - auto_water_type: AutoWaterModes = Field(default="Natural", title="Auto water mode") - multiplier: float = Field(default=0.8, title="Multiplier for auto reward") - unrewarded: int = Field(default=200, title="Number of unrewarded trials before auto water") - ignored: int = Field(default=100, title="Number of ignored trials before auto water") +class TrialGenerationEndConditions(BaseModel): + ignore_win: int = Field(default=30, title="Window of trials to check ignored responses") + ignore_ratio_threshold: float = Field( + default=0.8, title="Threshold for acceptable ignored trials within window.", ge=0, le=1 + ) + max_trial: int = Field(default=1000, title="Maximal number of trials") + max_time: timedelta = Field(timedelta(minutes=75), title="Maximal session time (min)") + min_time: timedelta = Field(default=timedelta(minutes=30), title="Minimum session time (min)") -class Warmup(BaseModel): - min_trial: int = Field(default=50, title="Warmup finish criteria: minimal trials") - max_choice_ratio_bias: float = Field( - default=0.1, title="Warmup finish criteria: maximal choice ratio bias from 0.5" +class BehaviorStabilityParameters(BaseModel): + behavior_evaluation_mode: BlockBehaviorEvaluationMode = Field( + default="end", title="Mode to evaluate behavior stability.", validate_default=True ) - min_finish_ratio: float = Field(default=0.8, title="Warmup finish criteria: minimal finish ratio") - windowsize: int = Field( - default=20, - title="Warmup finish criteria: window size to compute the bias and ratio", + behavior_stability_fraction: float = Field( + default=0.5, + ge=0, + le=1, + description="Fraction scaling reward-probability difference for behavior.", ) + min_consecutive_stable_trials: int = Field( + default=5, + description="Minimum number of consecutive trials satisfying the behavioral stability fraction.", + ) + + +class RewardProbabilityParameters(BaseModel): + base_reward_sum: float = Field(default=0.8, title="Sum of p_reward") + family: int = Field(default=1, title="Reward family") + pairs_n: int = Field(default=1, title="Number of pairs") class Block(BaseModel): @@ -56,46 +62,49 @@ class Block(BaseModel): class CoupledTrialGeneratorSpec(_BaseTrialGeneratorSpecModel): type: Literal["CoupledTrialGenerator"] = "CoupledTrialGenerator" - iti: Union[UniformDistribution, ExponentialDistribution] = Field( - default=ExponentialDistribution( - distribution_parameters=ExponentialDistributionParameters(rate=1 / 2), - truncation_parameters=TruncationParameters(min=1, max=8), - ) - ) - quiescent_period: Union[UniformDistribution, ExponentialDistribution] = Field( + quiescent_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( default=ExponentialDistribution( distribution_parameters=ExponentialDistributionParameters(rate=1), truncation_parameters=TruncationParameters(min=0, max=1), - ) + ), + description="Duration of the quiescence period before trial starts (in seconds). Each lick resets the timer.", ) - response_time: float = Field(default=1.0, title="Response time") - reward_consume_time: float = Field( + response_duration: float = Field(default=1.0, description="Duration after go cue for animal response.") + + reward_consumption_duration: float = Field( default=3.0, - title="Reward consume time", - description="Time of the no-lick period before trial end", + description="Duration of reward consumption before transition to ITI (in seconds).", ) - block_parameters: Union[UniformDistribution, ExponentialDistribution] = Field( + + inter_trial_interval_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( + default=ExponentialDistribution( + distribution_parameters=ExponentialDistributionParameters(rate=1 / 2), + truncation_parameters=TruncationParameters(min=1, max=8), + ), + description="Duration of the inter-trial interval (in seconds).", + ) + + block_len_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( default=ExponentialDistribution( distribution_parameters=ExponentialDistributionParameters(rate=1 / 20), truncation_parameters=TruncationParameters(min=20, max=60), ) ) - min_reward: int = Field(default=1, title="Minimal rewards in a block to switch") - auto_water: Optional[AutoWater] = Field(default=None, description="Parameters describing auto water.") - behavior_evaluation_mode: BlockBehaviorEvaluationMode = Field( - default="ignore", title="Auto block mode", validate_default=True + trial_generation_end_parameters: TrialGenerationEndConditions = Field( + default=TrialGenerationEndConditions(), description="Conditions to end trial generation." + ) + min_block_reward: int = Field(default=1, title="Minimal rewards in a block to switch") + behavior_stability_parameters: Optional[BehaviorStabilityParameters] = Field( + default=None, description="Parameters describing behavior stability required to switch blocks." ) - switch_thr: float = Field(default=0.5, title="Switch threshold for auto block") - points_in_a_row: int = Field(default=5, title="Points in a row for auto block") - warmup: Optional[Warmup] = Field(default=None, description="Parameters describing warmup.") - no_response_trial_addition: bool = Field( + extend_block_on_no_response: bool = Field( default=True, - description="Add one trial to the block length on both lickspouts.", + description="Add one trial to the min block length.", ) - kernel_size: int - reward_probability_specs: RewardProbability = Field(default=RewardProbability()) + kernel_size: int = Field(default=2, description="Kernel to evaluate choice fraction.") + reward_probability_parameters: RewardProbabilityParameters = Field(default=RewardProbabilityParameters()) reward_family: list = [ [[8, 1], [6, 1], [3, 1], [1, 1]], [[8, 1], [1, 1]], @@ -122,39 +131,79 @@ def __init__(self, spec: CoupledTrialGeneratorSpec) -> None: self.is_right_choice_history: list[bool | None] = [] self.reward_history: list[bool] = [] self.block_history: list[Block] = [] - self.block: Block = Block() + self.block: Block = self.generate_next_block( + reward_families=self.spec.reward_family, + reward_family_index=self.spec.reward_probability_parameters.family, + reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, + base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=self.spec.block_len_distribution, + ) self.trials_in_block = 0 + self.start_time = datetime.now() def next(self) -> Trial | None: """ - generate next trial + Generate next trial - :param self: Description - :return: Description - :rtype: Trial | None """ - iti = self.evaluate_distribution(self.spec.iti) - quiescent = self.evaluate_distribution(self.spec.quiescent_period) + # check end conditions + if not self.are_end_conditions_met( + self.spec.trial_generation_end_parameters, self.is_right_choice_history, self.start_time + ): + return + + # determine iti and quiescent period duration + iti = self.evaluate_distribution(self.spec.inter_trial_interval_duration_distribution) + quiescent = self.evaluate_distribution(self.spec.quiescent_duration_distribution) + # iterate trials in block self.trials_in_block += 1 return Trial( p_reward_left=self.block.left_reward_prob, p_reward_right=self.block.right_reward_prob, - reward_consumption_duration=self.spec.reward_consume_time, - response_deadline_duration=self.spec.response_time, + reward_consumption_duration=self.spec.reward_consumption_duration, + response_deadline_duration=self.spec.response_duration, quiescence_period_duration=quiescent, inter_trial_interval_duration=iti, ) + @staticmethod + def are_end_conditions_met( + end_conditions: TrialGenerationEndConditions, choice_history: list[bool | None], start_time: datetime + ) -> bool: + """ + + Check if end conditions are met to stop session + + :param end_conditons: conditions to be met for trial generation to stop + + """ + time_elapsed = datetime.now() - start_time + if time_elapsed < end_conditions.min_time: + return True + + if end_conditions.max_trial < len(choice_history): + return False + + if end_conditions.max_time < time_elapsed: + return False + + frac = end_conditions.ignore_ratio_threshold + win = end_conditions.ignore_win + if choice_history[-win:].count(None) > frac * win: + return False + + return True + @staticmethod def evaluate_distribution( distribution: Union[UniformDistribution, ExponentialDistribution], - ) -> Union[UniformDistribution, ExponentialDistribution]: + ) -> float: if distribution.family == DistributionFamily.EXPONENTIAL: return ( - np.random.exponential(1 / distribution.distribution_parameters.rate, 1) + np.random.exponential(1 / distribution.distribution_parameters.rate) + distribution.truncation_parameters.min ) elif distribution.family == DistributionFamily.UNIFORM: @@ -170,51 +219,49 @@ def update(self, outcome: TrialOutcome) -> None: """ Check if block should switch, generate next block if necessary, and generate next trial - :param self: Description - :param outcome: Description - :type outcome: TrialOutcome + :param outcome: trial outcome of previous trial """ - - self.is_right_choice_history.append[outcome.is_right_choice] - self.reward_history.append[outcome.is_rewarded] + + self.is_right_choice_history.append(outcome.is_right_choice) + self.reward_history.append(outcome.is_rewarded) self.trials_in_block += 1 + if self.spec.extend_block_on_no_response and outcome.is_right_choice == None: + self.block.min_length += 1 + switch_block = self.switch_block( trials_in_block=self.trials_in_block, - min_block_reward=self.spec.min_reward, + min_block_reward=self.spec.min_block_reward, block_left_rewards=self.reward_history.count(False), block_right_rewards=self.reward_history.count(True), choice_history=self.is_right_choice_history, right_reward_prob=self.block.right_reward_prob, left_reward_prob=self.block.left_reward_prob, - beh_eval_mode=self.spec.behavior_evaluation_mode, + beh_stability_params=self.spec.behavior_stability_parameters, block_length=self.block.min_length, - points_in_a_row=self.spec.points_in_a_row, - switch_thr=self.spec.switch_thr, kernel_size=self.spec.kernel_size, ) - + if switch_block: + print("block switch") self.trials_in_block = 0 - self.block = self.generate_block( + self.block = self.generate_next_block( reward_families=self.spec.reward_family, - reward_family_index=self.spec.reward_probability_specs.family, - reward_pairs_n=self.spec.reward_probability_specs.pairs_n, - base_reward_sum=self.spec.reward_probability_specs.base_reward_sum, - block_history=self.block_history, - block_distribution=self.spec.block_parameters, + reward_family_index=self.spec.reward_probability_parameters.family, + reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, + base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, + current_block=self.block, + block_len_distribution=self.spec.block_len_distribution, ) self.block_history.append(self.block) def is_behavior_stable( self, - choice_history: np.ndarray, + choice_history: list, right_reward_prob: float, left_reward_prob: float, - beh_eval_mode: BlockBehaviorEvaluationMode, + beh_stability_params: BehaviorStabilityParameters, trials_in_block: int, - points_in_a_row: int = 3, - switch_thr: float = 0.8, kernel_size: int = 2, ) -> Optional[bool]: """ @@ -224,16 +271,14 @@ def is_behavior_stable( choice_history: 1D array with 0: left, 1: right and None: ignored entries. right_reward_prob: reward probability for right side left_reward_prob: reward probability for left side - beh_eval_mode: mode to evaluate behavior + beh_stability_params: Parameters to evaluate behavior trials_in_block: number of trials in current block. In couple trials, both sides have same block length so block length is int. - points_in_a_row: number of consecutive trials above threshold required - switch_thr: fraction threshold to define stable behavior kernel_size: kernel to evaluate choice fraction """ # do not prohibit block transition if does not rely on behavior or not enough trials to evaluate or reward probs are the same. - if beh_eval_mode == "ignore" or left_reward_prob == right_reward_prob or len(choice_history) < kernel_size: + if not beh_stability_params or left_reward_prob == right_reward_prob or len(choice_history) < kernel_size: return True # compute fraction of right choices with running average using a sliding window @@ -241,7 +286,7 @@ def is_behavior_stable( block_choice_frac = self.compute_choice_fraction(kernel_size, block_history) # margin based on right and left probabilities and scaled by switch threshold. Window for evaluating behavior - delta = abs((left_reward_prob - right_reward_prob) * float(switch_thr)) + delta = abs((left_reward_prob - right_reward_prob) * float(beh_stability_params.behavior_stability_fraction)) threshold = ( [0, left_reward_prob - delta] if left_reward_prob > right_reward_prob else [left_reward_prob + delta, 1] ) @@ -252,34 +297,33 @@ def is_behavior_stable( block_choice_frac <= threshold[1], ) - # check consecutive pts above threshold - if points_in_a_row <= 0: - return True - - if beh_eval_mode == "end": + if beh_stability_params.behavior_evaluation_mode == "end": # requires consecutive trials ending on the last trial # check if the current trial occurs at the end of a long enough consecutive run above threshold - if len(points_above_threshold) < points_in_a_row: + if len(points_above_threshold) < beh_stability_params.min_consecutive_stable_trials: return False - return np.all(points_above_threshold[-points_in_a_row:]) + return np.all(points_above_threshold[-beh_stability_params.min_consecutive_stable_trials :]) - elif beh_eval_mode == "anytime": + elif beh_stability_params.behavior_evaluation_mode == "anytime": # allows consecutive trials any time in the behavior run_len = 0 for v in points_above_threshold: if v: run_len += 1 else: - if run_len >= points_in_a_row: + if run_len >= beh_stability_params.min_consecutive_stable_trials: return True else: run_len = 0 - return run_len >= points_in_a_row + return run_len >= beh_stability_params.min_consecutive_stable_trials else: - raise ValueError(f"Behavior evaluation mode {beh_eval_mode} not recognized.") + raise ValueError( + f"Behavior evaluation mode {beh_stability_params.behavior_evaluation_mode} not recognized." + ) - def compute_choice_fraction(self, kernel_size: int, choice_history: list[int | None]): + @staticmethod + def compute_choice_fraction(kernel_size: int, choice_history: list[int | None]): """ Compute fraction of right choices with running average using a sliding window @@ -300,13 +344,11 @@ def switch_block( min_block_reward: int, block_left_rewards: int, block_right_rewards: int, - choice_history: np.ndarray, + choice_history: list, right_reward_prob: float, left_reward_prob: float, - beh_eval_mode: BlockBehaviorEvaluationMode, + beh_stability_params: BehaviorStabilityParameters, block_length: int, - points_in_a_row: int = 3, - switch_thr: float = 0.8, kernel_size: int = 2, ) -> bool: """ @@ -317,10 +359,8 @@ def switch_block( choice_history: 2D array (rows = sides, columns = trials) with 0: left, 1: right and 2: ignored entries. right_reward_prob: reward probability for right side left_reward_prob: reward probability for left side - beh_eval_mode: mode to evaluate behavior + beh_stability_params: parameters to evaluate behavior block_length: planned number of trials in current block. In couple trials, both sides have same block length so block length is int. - points_in_a_row: number of consecutive trials above threshold required - switch_thr: fraction threshold to define stable behavior kernel_size: kernel to evaluate choice fraction """ @@ -332,10 +372,8 @@ def switch_block( choice_history, right_reward_prob, left_reward_prob, - beh_eval_mode, + beh_stability_params, trials_in_block, - points_in_a_row, - switch_thr, kernel_size, ) @@ -349,14 +387,14 @@ def switch_block( return block_length_ok and reward_ok and behavior_ok - def generate_block( + def generate_next_block( self, reward_families: list, reward_family_index: int, reward_pairs_n: int, base_reward_sum: float, - block_history: list[Block], - block_distribution: Union[UniformDistribution, ExponentialDistribution], + block_len_distribution: Union[UniformDistribution, ExponentialDistribution], + current_block: Optional[None] = None, ) -> Block: """ Generate the next block for a coupled task. @@ -365,8 +403,8 @@ def generate_block( :param reward_family_index: Description :param reward_pairs_n: Description :param base_reward_sum: Description - :param reward_prob_history: Description - :param block_distribution: Description + :param current_block: Description + :param block_len_distribution: Description """ # determine candidate reward pairs @@ -378,10 +416,9 @@ def generate_block( # create pool including all reward probabiliteis and mirrored pairs reward_prob_pool = np.vstack([reward_prob, np.fliplr(reward_prob)]) - if block_history: # exclude previous block if history exists - reward_prob_history = [[block.right_reward_prob, block.left_reward_prob] for block in block_history] - last_block_reward_prob = reward_prob_history[:, -1] - + if current_block: # exclude previous block if history exists + last_block_reward_prob = [current_block.right_reward_prob, current_block.left_reward_prob] + # remove blocks identical to last block reward_prob_pool = reward_prob_pool[np.any(reward_prob_pool != last_block_reward_prob, axis=1)] @@ -398,8 +435,7 @@ def generate_block( right_reward_prob, left_reward_prob = reward_prob_pool[random.choice(range(reward_prob_pool.shape[0]))] # randomly pick block length - next_block_len = self.evaluate_distribution(block_distribution) - + next_block_len = round(self.evaluate_distribution(block_len_distribution)) return Block( right_reward_prob=right_reward_prob, left_reward_prob=left_reward_prob, diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/integration_test_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/integration_test_trial_generator.py index 947a262..66ae28d 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/integration_test_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/integration_test_trial_generator.py @@ -31,37 +31,37 @@ def __init__(self, spec: IntegrationTestTrialGeneratorSpec) -> None: ), # 6: right reward, auto response right Trial( p_reward_left=1.0, p_reward_right=1.0, is_auto_response_right=True - ), # 9: both reward, auto response right, + ), # 7: both reward, auto response right, Trial( p_reward_left=0.0, p_reward_right=0.0, is_auto_response_right=True - ), # 10: no reward, auto response right, + ), # 8: no reward, auto response right, # auto response left Trial( p_reward_left=1.0, p_reward_right=0.0, is_auto_response_right=False - ), # 11: left reward, auto response left, + ), # 9: left reward, auto response left, Trial( p_reward_left=0.0, p_reward_right=1.0, is_auto_response_right=False - ), # 12: right reward, auto response left, + ), # 10: right reward, auto response left, Trial( p_reward_left=0.0, p_reward_right=1.0, is_auto_response_right=False - ), # 13: both reward, auto response left, + ), # 11: both reward, auto response left, Trial( p_reward_left=0.0, p_reward_right=0.0, is_auto_response_right=False - ), # 14: no reward, auto response left, + ), # 12: no reward, auto response left, # fast retract - Trial(enable_fast_retract=True), # 15: enable fast retract + Trial(enable_fast_retract=True), # 13: enable fast retract # secondary reinforcer - # Trial(secondary_reinforcer=SecondaryReinforcer()), # 16: enable secondary reinforcer + # Trial(secondary_reinforcer=SecondaryReinforcer()), # 14: enable secondary reinforcer # no reward consumption duration - Trial(reward_consumption_duration=0), # 17: no reward consumption duration + Trial(reward_consumption_duration=0), # 15: no reward consumption duration # no reward delay - Trial(reward_delay_duration=0), # 18: no reward delay duration + Trial(reward_delay_duration=0), # 16: no reward delay duration # no response deadline duration - Trial(response_deadline_duration=0), # 19: no response deadline duration + Trial(response_deadline_duration=0), # 17: no response deadline duration # no quiescence period duration - Trial(quiescence_period_duration=0), # 20: no quiescence period duration + Trial(quiescence_period_duration=0), # 18: no quiescence period duration # no inter trial interval - Trial(inter_trial_interval_duration=0.5), # 21: no inter trial interval duration + Trial(inter_trial_interval_duration=0.5), # 19: no inter trial interval duration ] def next(self) -> Trial | None: From df5dc7fb7ed0fed8172aad52a84d40b4e5f5e3b6 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 6 Feb 2026 15:28:28 -0800 Subject: [PATCH 02/21] regenerates schemas --- schema/aind_behavior_dynamic_foraging.json | 307 ++-- .../AindBehaviorDynamicForaging.Generated.cs | 1281 ++++++++++------- 2 files changed, 918 insertions(+), 670 deletions(-) diff --git a/schema/aind_behavior_dynamic_foraging.json b/schema/aind_behavior_dynamic_foraging.json index 14035b8..0a51576 100644 --- a/schema/aind_behavior_dynamic_foraging.json +++ b/schema/aind_behavior_dynamic_foraging.json @@ -253,7 +253,7 @@ "description": "Parameters describing auto water.", "oneOf": [ { - "$ref": "#/$defs/aind_behavior_dynamic_foraging__task_logic__AutoWater" + "$ref": "#/$defs/AutoWater" }, { "type": "null" @@ -589,6 +589,43 @@ "title": "AutoBlock", "type": "object" }, + "AutoWater": { + "properties": { + "auto_water_type": { + "default": "Natural", + "enum": [ + "Natural", + "Both", + "High pro" + ], + "title": "Auto water mode", + "type": "string" + }, + "multiplier": { + "default": 0.8, + "title": "Multiplier for auto reward", + "type": "number" + }, + "unrewarded": { + "default": 200, + "title": "Number of unrewarded trials before auto water", + "type": "integer" + }, + "ignored": { + "default": 100, + "title": "Number of ignored trials before auto water", + "type": "integer" + }, + "include_reward": { + "default": false, + "description": "Include auto water in total rewards.", + "title": "Include Reward", + "type": "boolean" + } + }, + "title": "AutoWater", + "type": "object" + }, "Axis": { "description": "Motor axis available", "enum": [ @@ -670,6 +707,35 @@ "title": "BaseModel", "type": "object" }, + "BehaviorStabilityParameters": { + "properties": { + "behavior_evaluation_mode": { + "default": "end", + "enum": [ + "end", + "anytime" + ], + "title": "Mode to evaluate behavior stability.", + "type": "string" + }, + "behavior_stability_fraction": { + "default": 0.5, + "description": "Fraction scaling reward-probability difference for behavior.", + "maximum": 1, + "minimum": 0, + "title": "Behavior Stability Fraction", + "type": "number" + }, + "min_consecutive_stable_trials": { + "default": 5, + "description": "Minimum number of consecutive trials satisfying the behavioral stability fraction.", + "title": "Min Consecutive Stable Trials", + "type": "integer" + } + }, + "title": "BehaviorStabilityParameters", + "type": "object" + }, "BlockParameters": { "properties": { "min": { @@ -830,20 +896,21 @@ "title": "Type", "type": "string" }, - "iti": { + "quiescent_duration_distribution": { "default": { "family": "Exponential", "distribution_parameters": { "family": "Exponential", - "rate": 0.5 + "rate": 1.0 }, "truncation_parameters": { - "max": 8.0, - "min": 1.0, + "max": 1.0, + "min": 0.0, "truncation_mode": "exclude" }, "scaling_parameters": null }, + "description": "Duration of the quiescence period before trial starts (in seconds). Each lick resets the timer.", "oneOf": [ { "$ref": "#/$defs/UniformDistribution" @@ -852,22 +919,35 @@ "$ref": "#/$defs/ExponentialDistribution" } ], - "title": "Iti" + "title": "Quiescent Duration Distribution" + }, + "response_duration": { + "default": 1.0, + "description": "Duration after go cue for animal response.", + "title": "Response Duration", + "type": "number" }, - "quiescent_period": { + "reward_consumption_duration": { + "default": 3.0, + "description": "Duration of reward consumption before transition to ITI (in seconds).", + "title": "Reward Consumption Duration", + "type": "number" + }, + "inter_trial_interval_duration_distribution": { "default": { "family": "Exponential", "distribution_parameters": { "family": "Exponential", - "rate": 1.0 + "rate": 0.5 }, "truncation_parameters": { - "max": 1.0, - "min": 0.0, + "max": 8.0, + "min": 1.0, "truncation_mode": "exclude" }, "scaling_parameters": null }, + "description": "Duration of the inter-trial interval (in seconds).", "oneOf": [ { "$ref": "#/$defs/UniformDistribution" @@ -876,20 +956,9 @@ "$ref": "#/$defs/ExponentialDistribution" } ], - "title": "Quiescent Period" - }, - "response_time": { - "default": 1.0, - "title": "Response time", - "type": "number" + "title": "Inter Trial Interval Duration Distribution" }, - "reward_consume_time": { - "default": 3.0, - "description": "Time of the no-lick period before trial end", - "title": "Reward consume time", - "type": "number" - }, - "block_parameters": { + "block_len_distribution": { "default": { "family": "Exponential", "distribution_parameters": { @@ -911,69 +980,50 @@ "$ref": "#/$defs/ExponentialDistribution" } ], - "title": "Block Parameters" + "title": "Block Len Distribution" }, - "min_reward": { + "trial_generation_end_parameters": { + "$ref": "#/$defs/TrialGenerationEndConditions", + "default": { + "ignore_win": 30, + "ignore_ratio_threshold": 0.8, + "max_trial": 1000, + "max_time": "PT1H15M", + "min_time": "PT30M" + }, + "description": "Conditions to end trial generation." + }, + "min_block_reward": { "default": 1, "title": "Minimal rewards in a block to switch", "type": "integer" }, - "auto_water": { + "behavior_stability_parameters": { "default": null, - "description": "Parameters describing auto water.", + "description": "Parameters describing behavior stability required to switch blocks.", "oneOf": [ { - "$ref": "#/$defs/aind_behavior_dynamic_foraging__task_logic__trial_generators__coupled_trial_generator__AutoWater" + "$ref": "#/$defs/BehaviorStabilityParameters" }, { "type": "null" } ] }, - "behavior_evaluation_mode": { - "default": "ignore", - "enum": [ - "ignore", - "end", - "anytime" - ], - "title": "Auto block mode", - "type": "string" - }, - "switch_thr": { - "default": 0.5, - "title": "Switch threshold for auto block", - "type": "number" - }, - "points_in_a_row": { - "default": 5, - "title": "Points in a row for auto block", - "type": "integer" - }, - "warmup": { - "default": null, - "description": "Parameters describing warmup.", - "oneOf": [ - { - "$ref": "#/$defs/Warmup" - }, - { - "type": "null" - } - ] - }, - "no_response_trial_addition": { + "extend_block_on_no_response": { "default": true, - "description": "Add one trial to the block length on both lickspouts.", - "title": "No Response Trial Addition", + "description": "Add one trial to the min block length.", + "title": "Extend Block On No Response", "type": "boolean" }, "kernel_size": { + "default": 2, + "description": "Kernel to evaluate choice fraction.", "title": "Kernel Size", "type": "integer" }, - "reward_probability_specs": { - "$ref": "#/$defs/RewardProbability", + "reward_probability_parameters": { + "$ref": "#/$defs/RewardProbabilityParameters", "default": { "base_reward_sum": 0.8, "family": 1, @@ -1056,9 +1106,6 @@ "type": "array" } }, - "required": [ - "kernel_size" - ], "title": "CoupledTrialGeneratorSpec", "type": "object" }, @@ -1686,6 +1733,27 @@ "title": "RewardProbability", "type": "object" }, + "RewardProbabilityParameters": { + "properties": { + "base_reward_sum": { + "default": 0.8, + "title": "Sum of p_reward", + "type": "number" + }, + "family": { + "default": 1, + "title": "Reward family", + "type": "integer" + }, + "pairs_n": { + "default": 1, + "title": "Number of pairs", + "type": "integer" + } + }, + "title": "RewardProbabilityParameters", + "type": "object" + }, "RewardSize": { "properties": { "right_value_volume": { @@ -2401,6 +2469,41 @@ "title": "Trial", "type": "object" }, + "TrialGenerationEndConditions": { + "properties": { + "ignore_win": { + "default": 30, + "title": "Window of trials to check ignored responses", + "type": "integer" + }, + "ignore_ratio_threshold": { + "default": 0.8, + "maximum": 1, + "minimum": 0, + "title": "Threshold for acceptable ignored trials within window.", + "type": "number" + }, + "max_trial": { + "default": 1000, + "title": "Maximal number of trials", + "type": "integer" + }, + "max_time": { + "default": "PT1H15M", + "format": "duration", + "title": "Maximal session time (min)", + "type": "string" + }, + "min_time": { + "default": "PT30M", + "format": "duration", + "title": "Minimum session time (min)", + "type": "string" + } + }, + "title": "TrialGenerationEndConditions", + "type": "object" + }, "TrialGeneratorSpec": { "description": "Type of trial generator", "discriminator": { @@ -2794,74 +2897,6 @@ }, "title": "WebCamera", "type": "object" - }, - "aind_behavior_dynamic_foraging__task_logic__AutoWater": { - "properties": { - "auto_water_type": { - "default": "Natural", - "enum": [ - "Natural", - "Both", - "High pro" - ], - "title": "Auto water mode", - "type": "string" - }, - "multiplier": { - "default": 0.8, - "title": "Multiplier for auto reward", - "type": "number" - }, - "unrewarded": { - "default": 200, - "title": "Number of unrewarded trials before auto water", - "type": "integer" - }, - "ignored": { - "default": 100, - "title": "Number of ignored trials before auto water", - "type": "integer" - }, - "include_reward": { - "default": false, - "description": "Include auto water in total rewards.", - "title": "Include Reward", - "type": "boolean" - } - }, - "title": "AutoWater", - "type": "object" - }, - "aind_behavior_dynamic_foraging__task_logic__trial_generators__coupled_trial_generator__AutoWater": { - "properties": { - "auto_water_type": { - "default": "Natural", - "enum": [ - "Natural", - "Both", - "High pro" - ], - "title": "Auto water mode", - "type": "string" - }, - "multiplier": { - "default": 0.8, - "title": "Multiplier for auto reward", - "type": "number" - }, - "unrewarded": { - "default": 200, - "title": "Number of unrewarded trials before auto water", - "type": "integer" - }, - "ignored": { - "default": 100, - "title": "Number of ignored trials before auto water", - "type": "integer" - } - }, - "title": "AutoWater", - "type": "object" } } } \ No newline at end of file diff --git a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs index 6dcd18c..32b97b8 100644 --- a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs +++ b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs @@ -598,7 +598,7 @@ public partial class AindDynamicForagingTaskParameters private double _rewardDelay; - private AindBehaviorDynamicForagingTaskLogicAutoWater _autoWater; + private AutoWater _autoWater; private InterTrialInterval _interTrialInterval; @@ -785,7 +785,7 @@ public double RewardDelay [System.Xml.Serialization.XmlIgnoreAttribute()] [Newtonsoft.Json.JsonPropertyAttribute("auto_water")] [System.ComponentModel.DescriptionAttribute("Parameters describing auto water.")] - public AindBehaviorDynamicForagingTaskLogicAutoWater AutoWater + public AutoWater AutoWater { get { @@ -1474,6 +1474,144 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class AutoWater + { + + private AutoWaterType _autoWaterType; + + private double _multiplier; + + private int _unrewarded; + + private int _ignored; + + private bool _includeReward; + + public AutoWater() + { + _autoWaterType = AutoWaterType.Natural; + _multiplier = 0.8D; + _unrewarded = 200; + _ignored = 100; + _includeReward = false; + } + + protected AutoWater(AutoWater other) + { + _autoWaterType = other._autoWaterType; + _multiplier = other._multiplier; + _unrewarded = other._unrewarded; + _ignored = other._ignored; + _includeReward = other._includeReward; + } + + [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] + public AutoWaterType AutoWaterType + { + get + { + return _autoWaterType; + } + set + { + _autoWaterType = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] + public double Multiplier + { + get + { + return _multiplier; + } + set + { + _multiplier = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] + public int Unrewarded + { + get + { + return _unrewarded; + } + set + { + _unrewarded = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("ignored")] + public int Ignored + { + get + { + return _ignored; + } + set + { + _ignored = value; + } + } + + /// + /// Include auto water in total rewards. + /// + [Newtonsoft.Json.JsonPropertyAttribute("include_reward")] + [System.ComponentModel.DescriptionAttribute("Include auto water in total rewards.")] + public bool IncludeReward + { + get + { + return _includeReward; + } + set + { + _includeReward = value; + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AutoWater(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new AutoWater(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); + stringBuilder.Append("Multiplier = " + _multiplier + ", "); + stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); + stringBuilder.Append("Ignored = " + _ignored + ", "); + stringBuilder.Append("IncludeReward = " + _includeReward); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + /// /// Motor axis available /// @@ -1741,6 +1879,113 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class BehaviorStabilityParameters + { + + private BehaviorStabilityParametersBehaviorEvaluationMode _behaviorEvaluationMode; + + private double _behaviorStabilityFraction; + + private int _minConsecutiveStableTrials; + + public BehaviorStabilityParameters() + { + _behaviorEvaluationMode = BehaviorStabilityParametersBehaviorEvaluationMode.End; + _behaviorStabilityFraction = 0.5D; + _minConsecutiveStableTrials = 5; + } + + protected BehaviorStabilityParameters(BehaviorStabilityParameters other) + { + _behaviorEvaluationMode = other._behaviorEvaluationMode; + _behaviorStabilityFraction = other._behaviorStabilityFraction; + _minConsecutiveStableTrials = other._minConsecutiveStableTrials; + } + + [Newtonsoft.Json.JsonPropertyAttribute("behavior_evaluation_mode")] + public BehaviorStabilityParametersBehaviorEvaluationMode BehaviorEvaluationMode + { + get + { + return _behaviorEvaluationMode; + } + set + { + _behaviorEvaluationMode = value; + } + } + + /// + /// Fraction scaling reward-probability difference for behavior. + /// + [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_fraction")] + [System.ComponentModel.DescriptionAttribute("Fraction scaling reward-probability difference for behavior.")] + public double BehaviorStabilityFraction + { + get + { + return _behaviorStabilityFraction; + } + set + { + _behaviorStabilityFraction = value; + } + } + + /// + /// Minimum number of consecutive trials satisfying the behavioral stability fraction. + /// + [Newtonsoft.Json.JsonPropertyAttribute("min_consecutive_stable_trials")] + [System.ComponentModel.DescriptionAttribute("Minimum number of consecutive trials satisfying the behavioral stability fraction" + + ".")] + public int MinConsecutiveStableTrials + { + get + { + return _minConsecutiveStableTrials; + } + set + { + _minConsecutiveStableTrials = value; + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new BehaviorStabilityParameters(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new BehaviorStabilityParameters(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("BehaviorEvaluationMode = " + _behaviorEvaluationMode + ", "); + stringBuilder.Append("BehaviorStabilityFraction = " + _behaviorStabilityFraction + ", "); + stringBuilder.Append("MinConsecutiveStableTrials = " + _minConsecutiveStableTrials); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] @@ -2207,250 +2452,218 @@ public override string ToString() public partial class CoupledTrialGeneratorSpec : TrialGeneratorSpec { - private object _iti; - - private object _quiescentPeriod; - - private double _responseTime; - - private double _rewardConsumeTime; + private object _quiescentDurationDistribution; - private object _blockParameters; + private double _responseDuration; - private int _minReward; + private double _rewardConsumptionDuration; - private AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater _autoWater; + private object _interTrialIntervalDurationDistribution; - private CoupledTrialGeneratorSpecBehaviorEvaluationMode _behaviorEvaluationMode; + private object _blockLenDistribution; - private double _switchThr; + private TrialGenerationEndConditions _trialGenerationEndParameters; - private int _pointsInARow; + private int _minBlockReward; - private Warmup _warmup; + private BehaviorStabilityParameters _behaviorStabilityParameters; - private bool _noResponseTrialAddition; + private bool _extendBlockOnNoResponse; private int _kernelSize; - private RewardProbability _rewardProbabilitySpecs; + private RewardProbabilityParameters _rewardProbabilityParameters; private System.Collections.Generic.List _rewardFamily; public CoupledTrialGeneratorSpec() { - _iti = new object(); - _quiescentPeriod = new object(); - _responseTime = 1D; - _rewardConsumeTime = 3D; - _blockParameters = new object(); - _minReward = 1; - _behaviorEvaluationMode = CoupledTrialGeneratorSpecBehaviorEvaluationMode.Ignore; - _switchThr = 0.5D; - _pointsInARow = 5; - _noResponseTrialAddition = true; - _rewardProbabilitySpecs = new RewardProbability(); + _quiescentDurationDistribution = new object(); + _responseDuration = 1D; + _rewardConsumptionDuration = 3D; + _interTrialIntervalDurationDistribution = new object(); + _blockLenDistribution = new object(); + _trialGenerationEndParameters = new TrialGenerationEndConditions(); + _minBlockReward = 1; + _extendBlockOnNoResponse = true; + _kernelSize = 2; + _rewardProbabilityParameters = new RewardProbabilityParameters(); _rewardFamily = new System.Collections.Generic.List(); } protected CoupledTrialGeneratorSpec(CoupledTrialGeneratorSpec other) : base(other) { - _iti = other._iti; - _quiescentPeriod = other._quiescentPeriod; - _responseTime = other._responseTime; - _rewardConsumeTime = other._rewardConsumeTime; - _blockParameters = other._blockParameters; - _minReward = other._minReward; - _autoWater = other._autoWater; - _behaviorEvaluationMode = other._behaviorEvaluationMode; - _switchThr = other._switchThr; - _pointsInARow = other._pointsInARow; - _warmup = other._warmup; - _noResponseTrialAddition = other._noResponseTrialAddition; + _quiescentDurationDistribution = other._quiescentDurationDistribution; + _responseDuration = other._responseDuration; + _rewardConsumptionDuration = other._rewardConsumptionDuration; + _interTrialIntervalDurationDistribution = other._interTrialIntervalDurationDistribution; + _blockLenDistribution = other._blockLenDistribution; + _trialGenerationEndParameters = other._trialGenerationEndParameters; + _minBlockReward = other._minBlockReward; + _behaviorStabilityParameters = other._behaviorStabilityParameters; + _extendBlockOnNoResponse = other._extendBlockOnNoResponse; _kernelSize = other._kernelSize; - _rewardProbabilitySpecs = other._rewardProbabilitySpecs; + _rewardProbabilityParameters = other._rewardProbabilityParameters; _rewardFamily = other._rewardFamily; } + /// + /// Duration of the quiescence period before trial starts (in seconds). Each lick resets the timer. + /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("iti")] - public object Iti + [Newtonsoft.Json.JsonPropertyAttribute("quiescent_duration_distribution")] + [System.ComponentModel.DescriptionAttribute("Duration of the quiescence period before trial starts (in seconds). Each lick res" + + "ets the timer.")] + public object QuiescentDurationDistribution { get { - return _iti; + return _quiescentDurationDistribution; } set { - _iti = value; + _quiescentDurationDistribution = value; } } - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("quiescent_period")] - public object QuiescentPeriod + /// + /// Duration after go cue for animal response. + /// + [Newtonsoft.Json.JsonPropertyAttribute("response_duration")] + [System.ComponentModel.DescriptionAttribute("Duration after go cue for animal response.")] + public double ResponseDuration { get { - return _quiescentPeriod; + return _responseDuration; } set { - _quiescentPeriod = value; + _responseDuration = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("response_time")] - public double ResponseTime + /// + /// Duration of reward consumption before transition to ITI (in seconds). + /// + [Newtonsoft.Json.JsonPropertyAttribute("reward_consumption_duration")] + [System.ComponentModel.DescriptionAttribute("Duration of reward consumption before transition to ITI (in seconds).")] + public double RewardConsumptionDuration { get { - return _responseTime; + return _rewardConsumptionDuration; } set { - _responseTime = value; + _rewardConsumptionDuration = value; } } /// - /// Time of the no-lick period before trial end + /// Duration of the inter-trial interval (in seconds). /// - [Newtonsoft.Json.JsonPropertyAttribute("reward_consume_time")] - [System.ComponentModel.DescriptionAttribute("Time of the no-lick period before trial end")] - public double RewardConsumeTime - { - get - { - return _rewardConsumeTime; - } - set - { - _rewardConsumeTime = value; - } - } - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("block_parameters")] - public object BlockParameters + [Newtonsoft.Json.JsonPropertyAttribute("inter_trial_interval_duration_distribution")] + [System.ComponentModel.DescriptionAttribute("Duration of the inter-trial interval (in seconds).")] + public object InterTrialIntervalDurationDistribution { get { - return _blockParameters; + return _interTrialIntervalDurationDistribution; } set { - _blockParameters = value; + _interTrialIntervalDurationDistribution = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("min_reward")] - public int MinReward + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("block_len_distribution")] + public object BlockLenDistribution { get { - return _minReward; + return _blockLenDistribution; } set { - _minReward = value; + _blockLenDistribution = value; } } /// - /// Parameters describing auto water. + /// Conditions to end trial generation. /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("auto_water")] - [System.ComponentModel.DescriptionAttribute("Parameters describing auto water.")] - public AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater AutoWater - { - get - { - return _autoWater; - } - set - { - _autoWater = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("behavior_evaluation_mode")] - public CoupledTrialGeneratorSpecBehaviorEvaluationMode BehaviorEvaluationMode - { - get - { - return _behaviorEvaluationMode; - } - set - { - _behaviorEvaluationMode = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("switch_thr")] - public double SwitchThr + [Newtonsoft.Json.JsonPropertyAttribute("trial_generation_end_parameters")] + [System.ComponentModel.DescriptionAttribute("Conditions to end trial generation.")] + public TrialGenerationEndConditions TrialGenerationEndParameters { get { - return _switchThr; + return _trialGenerationEndParameters; } set { - _switchThr = value; + _trialGenerationEndParameters = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("points_in_a_row")] - public int PointsInARow + [Newtonsoft.Json.JsonPropertyAttribute("min_block_reward")] + public int MinBlockReward { get { - return _pointsInARow; + return _minBlockReward; } set { - _pointsInARow = value; + _minBlockReward = value; } } /// - /// Parameters describing warmup. + /// Parameters describing behavior stability required to switch blocks. /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("warmup")] - [System.ComponentModel.DescriptionAttribute("Parameters describing warmup.")] - public Warmup Warmup + [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_parameters")] + [System.ComponentModel.DescriptionAttribute("Parameters describing behavior stability required to switch blocks.")] + public BehaviorStabilityParameters BehaviorStabilityParameters { get { - return _warmup; + return _behaviorStabilityParameters; } set { - _warmup = value; + _behaviorStabilityParameters = value; } } /// - /// Add one trial to the block length on both lickspouts. + /// Add one trial to the min block length. /// - [Newtonsoft.Json.JsonPropertyAttribute("no_response_trial_addition")] - [System.ComponentModel.DescriptionAttribute("Add one trial to the block length on both lickspouts.")] - public bool NoResponseTrialAddition + [Newtonsoft.Json.JsonPropertyAttribute("extend_block_on_no_response")] + [System.ComponentModel.DescriptionAttribute("Add one trial to the min block length.")] + public bool ExtendBlockOnNoResponse { get { - return _noResponseTrialAddition; + return _extendBlockOnNoResponse; } set { - _noResponseTrialAddition = value; + _extendBlockOnNoResponse = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("kernel_size", Required=Newtonsoft.Json.Required.Always)] + /// + /// Kernel to evaluate choice fraction. + /// + [Newtonsoft.Json.JsonPropertyAttribute("kernel_size")] + [System.ComponentModel.DescriptionAttribute("Kernel to evaluate choice fraction.")] public int KernelSize { get @@ -2464,16 +2677,16 @@ public int KernelSize } [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("reward_probability_specs")] - public RewardProbability RewardProbabilitySpecs + [Newtonsoft.Json.JsonPropertyAttribute("reward_probability_parameters")] + public RewardProbabilityParameters RewardProbabilityParameters { get { - return _rewardProbabilitySpecs; + return _rewardProbabilityParameters; } set { - _rewardProbabilitySpecs = value; + _rewardProbabilityParameters = value; } } @@ -2507,20 +2720,17 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) { stringBuilder.Append(", "); } - stringBuilder.Append("Iti = " + _iti + ", "); - stringBuilder.Append("QuiescentPeriod = " + _quiescentPeriod + ", "); - stringBuilder.Append("ResponseTime = " + _responseTime + ", "); - stringBuilder.Append("RewardConsumeTime = " + _rewardConsumeTime + ", "); - stringBuilder.Append("BlockParameters = " + _blockParameters + ", "); - stringBuilder.Append("MinReward = " + _minReward + ", "); - stringBuilder.Append("AutoWater = " + _autoWater + ", "); - stringBuilder.Append("BehaviorEvaluationMode = " + _behaviorEvaluationMode + ", "); - stringBuilder.Append("SwitchThr = " + _switchThr + ", "); - stringBuilder.Append("PointsInARow = " + _pointsInARow + ", "); - stringBuilder.Append("Warmup = " + _warmup + ", "); - stringBuilder.Append("NoResponseTrialAddition = " + _noResponseTrialAddition + ", "); + stringBuilder.Append("QuiescentDurationDistribution = " + _quiescentDurationDistribution + ", "); + stringBuilder.Append("ResponseDuration = " + _responseDuration + ", "); + stringBuilder.Append("RewardConsumptionDuration = " + _rewardConsumptionDuration + ", "); + stringBuilder.Append("InterTrialIntervalDurationDistribution = " + _interTrialIntervalDurationDistribution + ", "); + stringBuilder.Append("BlockLenDistribution = " + _blockLenDistribution + ", "); + stringBuilder.Append("TrialGenerationEndParameters = " + _trialGenerationEndParameters + ", "); + stringBuilder.Append("MinBlockReward = " + _minBlockReward + ", "); + stringBuilder.Append("BehaviorStabilityParameters = " + _behaviorStabilityParameters + ", "); + stringBuilder.Append("ExtendBlockOnNoResponse = " + _extendBlockOnNoResponse + ", "); stringBuilder.Append("KernelSize = " + _kernelSize + ", "); - stringBuilder.Append("RewardProbabilitySpecs = " + _rewardProbabilitySpecs + ", "); + stringBuilder.Append("RewardProbabilityParameters = " + _rewardProbabilityParameters + ", "); stringBuilder.Append("RewardFamily = " + _rewardFamily); return true; } @@ -4558,6 +4768,104 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class RewardProbabilityParameters + { + + private double _baseRewardSum; + + private int _family; + + private int _pairsN; + + public RewardProbabilityParameters() + { + _baseRewardSum = 0.8D; + _family = 1; + _pairsN = 1; + } + + protected RewardProbabilityParameters(RewardProbabilityParameters other) + { + _baseRewardSum = other._baseRewardSum; + _family = other._family; + _pairsN = other._pairsN; + } + + [Newtonsoft.Json.JsonPropertyAttribute("base_reward_sum")] + public double BaseRewardSum + { + get + { + return _baseRewardSum; + } + set + { + _baseRewardSum = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("family")] + public int Family + { + get + { + return _family; + } + set + { + _family = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("pairs_n")] + public int PairsN + { + get + { + return _pairsN; + } + set + { + _pairsN = value; + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new RewardProbabilityParameters(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new RewardProbabilityParameters(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("BaseRewardSum = " + _baseRewardSum + ", "); + stringBuilder.Append("Family = " + _family + ", "); + stringBuilder.Append("PairsN = " + _pairsN); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] @@ -6203,136 +6511,302 @@ public override string ToString() [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(JsonInheritanceConverter), "type")] - [JsonInheritanceAttribute("CoupledTrialGenerator", typeof(CoupledTrialGeneratorSpec))] - [JsonInheritanceAttribute("IntegrationTestTrialGenerator", typeof(IntegrationTestTrialGeneratorSpec))] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class TrialGeneratorSpec + public partial class TrialGenerationEndConditions { - public TrialGeneratorSpec() - { - } - - protected TrialGeneratorSpec(TrialGeneratorSpec other) - { - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialGeneratorSpec(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new TrialGeneratorSpec(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - return false; - } + private int _ignoreWin; - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - /// - /// Represents the outcome of a single trial. - /// - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [System.ComponentModel.DescriptionAttribute("Represents the outcome of a single trial.")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class TrialOutcome - { + private double _ignoreRatioThreshold; - private Trial _trial; + private int _maxTrial; - private bool? _isRightChoice; + private System.TimeSpan _maxTime; - private bool _isRewarded; + private System.TimeSpan _minTime; - public TrialOutcome() + public TrialGenerationEndConditions() { - _trial = new Trial(); + _ignoreWin = 30; + _ignoreRatioThreshold = 0.8D; + _maxTrial = 1000; } - protected TrialOutcome(TrialOutcome other) + protected TrialGenerationEndConditions(TrialGenerationEndConditions other) { - _trial = other._trial; - _isRightChoice = other._isRightChoice; - _isRewarded = other._isRewarded; + _ignoreWin = other._ignoreWin; + _ignoreRatioThreshold = other._ignoreRatioThreshold; + _maxTrial = other._maxTrial; + _maxTime = other._maxTime; + _minTime = other._minTime; } - /// - /// The trial associated with this outcome. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("trial", Required=Newtonsoft.Json.Required.Always)] - [System.ComponentModel.DescriptionAttribute("The trial associated with this outcome.")] - public Trial Trial + [Newtonsoft.Json.JsonPropertyAttribute("ignore_win")] + public int IgnoreWin { get { - return _trial; + return _ignoreWin; } set { - _trial = value; + _ignoreWin = value; } } - /// - /// Reports the choice made by the subject. True for right, False for left, None for no choice. - /// - [Newtonsoft.Json.JsonPropertyAttribute("is_right_choice", Required=Newtonsoft.Json.Required.AllowNull)] - [System.ComponentModel.DescriptionAttribute("Reports the choice made by the subject. True for right, False for left, None for " + - "no choice.")] - public bool? IsRightChoice + [Newtonsoft.Json.JsonPropertyAttribute("ignore_ratio_threshold")] + public double IgnoreRatioThreshold { get { - return _isRightChoice; + return _ignoreRatioThreshold; } set { - _isRightChoice = value; + _ignoreRatioThreshold = value; } } - /// - /// Indicates whether the subject received a reward on this trial. - /// - [Newtonsoft.Json.JsonPropertyAttribute("is_rewarded", Required=Newtonsoft.Json.Required.Always)] - [System.ComponentModel.DescriptionAttribute("Indicates whether the subject received a reward on this trial.")] - public bool IsRewarded + [Newtonsoft.Json.JsonPropertyAttribute("max_trial")] + public int MaxTrial { get { - return _isRewarded; + return _maxTrial; } set { - _isRewarded = value; + _maxTrial = value; } } - public System.IObservable Generate() + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("max_time")] + public System.TimeSpan MaxTime { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialOutcome(this))); + get + { + return _maxTime; + } + set + { + _maxTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MaxTime")] + public string MaxTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_maxTime); + } + set + { + _maxTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("min_time")] + public System.TimeSpan MinTime + { + get + { + return _minTime; + } + set + { + _minTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MinTime")] + public string MinTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_minTime); + } + set + { + _minTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialGenerationEndConditions(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new TrialGenerationEndConditions(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("IgnoreWin = " + _ignoreWin + ", "); + stringBuilder.Append("IgnoreRatioThreshold = " + _ignoreRatioThreshold + ", "); + stringBuilder.Append("MaxTrial = " + _maxTrial + ", "); + stringBuilder.Append("MaxTime = " + _maxTime + ", "); + stringBuilder.Append("MinTime = " + _minTime); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Newtonsoft.Json.JsonConverter(typeof(JsonInheritanceConverter), "type")] + [JsonInheritanceAttribute("CoupledTrialGenerator", typeof(CoupledTrialGeneratorSpec))] + [JsonInheritanceAttribute("IntegrationTestTrialGenerator", typeof(IntegrationTestTrialGeneratorSpec))] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class TrialGeneratorSpec + { + + public TrialGeneratorSpec() + { + } + + protected TrialGeneratorSpec(TrialGeneratorSpec other) + { + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialGeneratorSpec(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new TrialGeneratorSpec(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + return false; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + + /// + /// Represents the outcome of a single trial. + /// + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [System.ComponentModel.DescriptionAttribute("Represents the outcome of a single trial.")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class TrialOutcome + { + + private Trial _trial; + + private bool? _isRightChoice; + + private bool _isRewarded; + + public TrialOutcome() + { + _trial = new Trial(); + } + + protected TrialOutcome(TrialOutcome other) + { + _trial = other._trial; + _isRightChoice = other._isRightChoice; + _isRewarded = other._isRewarded; + } + + /// + /// The trial associated with this outcome. + /// + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("trial", Required=Newtonsoft.Json.Required.Always)] + [System.ComponentModel.DescriptionAttribute("The trial associated with this outcome.")] + public Trial Trial + { + get + { + return _trial; + } + set + { + _trial = value; + } + } + + /// + /// Reports the choice made by the subject. True for right, False for left, None for no choice. + /// + [Newtonsoft.Json.JsonPropertyAttribute("is_right_choice", Required=Newtonsoft.Json.Required.AllowNull)] + [System.ComponentModel.DescriptionAttribute("Reports the choice made by the subject. True for right, False for left, None for " + + "no choice.")] + public bool? IsRightChoice + { + get + { + return _isRightChoice; + } + set + { + _isRightChoice = value; + } + } + + /// + /// Indicates whether the subject received a reward on this trial. + /// + [Newtonsoft.Json.JsonPropertyAttribute("is_rewarded", Required=Newtonsoft.Json.Required.Always)] + [System.ComponentModel.DescriptionAttribute("Indicates whether the subject received a reward on this trial.")] + public bool IsRewarded + { + get + { + return _isRewarded; + } + set + { + _isRewarded = value; + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialOutcome(this))); } public System.IObservable Generate(System.IObservable source) @@ -7492,260 +7966,6 @@ public override string ToString() } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AindBehaviorDynamicForagingTaskLogicAutoWater - { - - private AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType _autoWaterType; - - private double _multiplier; - - private int _unrewarded; - - private int _ignored; - - private bool _includeReward; - - public AindBehaviorDynamicForagingTaskLogicAutoWater() - { - _autoWaterType = AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType.Natural; - _multiplier = 0.8D; - _unrewarded = 200; - _ignored = 100; - _includeReward = false; - } - - protected AindBehaviorDynamicForagingTaskLogicAutoWater(AindBehaviorDynamicForagingTaskLogicAutoWater other) - { - _autoWaterType = other._autoWaterType; - _multiplier = other._multiplier; - _unrewarded = other._unrewarded; - _ignored = other._ignored; - _includeReward = other._includeReward; - } - - [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] - public AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType AutoWaterType - { - get - { - return _autoWaterType; - } - set - { - _autoWaterType = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] - public double Multiplier - { - get - { - return _multiplier; - } - set - { - _multiplier = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] - public int Unrewarded - { - get - { - return _unrewarded; - } - set - { - _unrewarded = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignored")] - public int Ignored - { - get - { - return _ignored; - } - set - { - _ignored = value; - } - } - - /// - /// Include auto water in total rewards. - /// - [Newtonsoft.Json.JsonPropertyAttribute("include_reward")] - [System.ComponentModel.DescriptionAttribute("Include auto water in total rewards.")] - public bool IncludeReward - { - get - { - return _includeReward; - } - set - { - _includeReward = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AindBehaviorDynamicForagingTaskLogicAutoWater(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new AindBehaviorDynamicForagingTaskLogicAutoWater(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); - stringBuilder.Append("Multiplier = " + _multiplier + ", "); - stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); - stringBuilder.Append("Ignored = " + _ignored + ", "); - stringBuilder.Append("IncludeReward = " + _includeReward); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater - { - - private AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType _autoWaterType; - - private double _multiplier; - - private int _unrewarded; - - private int _ignored; - - public AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater() - { - _autoWaterType = AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType.Natural; - _multiplier = 0.8D; - _unrewarded = 200; - _ignored = 100; - } - - protected AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater other) - { - _autoWaterType = other._autoWaterType; - _multiplier = other._multiplier; - _unrewarded = other._unrewarded; - _ignored = other._ignored; - } - - [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] - public AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType AutoWaterType - { - get - { - return _autoWaterType; - } - set - { - _autoWaterType = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] - public double Multiplier - { - get - { - return _multiplier; - } - set - { - _multiplier = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] - public int Unrewarded - { - get - { - return _unrewarded; - } - set - { - _unrewarded = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignored")] - public int Ignored - { - get - { - return _ignored; - } - set - { - _ignored = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); - stringBuilder.Append("Multiplier = " + _multiplier + ", "); - stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); - stringBuilder.Append("Ignored = " + _ignored); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] public enum AindDynamicForagingTaskParametersRandomness @@ -7774,17 +7994,30 @@ public enum AutoBlockAdvancedBlockAuto [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum CoupledTrialGeneratorSpecBehaviorEvaluationMode + public enum AutoWaterType { - [System.Runtime.Serialization.EnumMemberAttribute(Value="ignore")] - Ignore = 0, + [System.Runtime.Serialization.EnumMemberAttribute(Value="Natural")] + Natural = 0, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="Both")] + Both = 1, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="High pro")] + HighPro = 2, + } + + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] + public enum BehaviorStabilityParametersBehaviorEvaluationMode + { [System.Runtime.Serialization.EnumMemberAttribute(Value="end")] - End = 1, + End = 0, [System.Runtime.Serialization.EnumMemberAttribute(Value="anytime")] - Anytime = 2, + Anytime = 1, } @@ -7814,38 +8047,6 @@ public enum TruncationParametersTruncationMode } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Natural")] - Natural = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Both")] - Both = 1, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="High pro")] - HighPro = 2, - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Natural")] - Natural = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Both")] - Both = 1, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="High pro")] - HighPro = 2, - } - - [System.CodeDom.Compiler.GeneratedCode("NJsonSchema", "10.9.0.0 (Newtonsoft.Json v13.0.0.0)")] [System.AttributeUsage(System.AttributeTargets.Class | System.AttributeTargets.Interface, AllowMultiple = true)] internal class JsonInheritanceAttribute : System.Attribute @@ -8118,6 +8319,11 @@ public System.IObservable Process(System.IObservable source) return Process(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8128,6 +8334,11 @@ public System.IObservable Process(System.IObservable source) return Process(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8238,6 +8449,11 @@ public System.IObservable Process(System.IObservable return Process(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8268,6 +8484,11 @@ public System.IObservable Process(System.IObservable source) return Process(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8322,16 +8543,6 @@ public System.IObservable Process(System.IObservable source) { return Process(source); } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } } @@ -8349,8 +8560,10 @@ public System.IObservable Process(System.IObservable))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -8373,12 +8586,14 @@ public System.IObservable Process(System.IObservable))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -8390,8 +8605,6 @@ public System.IObservable Process(System.IObservable))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] public partial class DeserializeFromJson : Bonsai.Expressions.SingleArgumentExpressionBuilder { From 18d8ff88e66126b78d178a5ca0aee294c2da964e Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 6 Feb 2026 16:13:10 -0800 Subject: [PATCH 03/21] lints and adds logs --- examples/coupled_trial_generator.py | 11 ++- .../coupled_trial_generator.py | 88 ++++++++++++++----- 2 files changed, 71 insertions(+), 28 deletions(-) diff --git a/examples/coupled_trial_generator.py b/examples/coupled_trial_generator.py index 7a782ca..e9befde 100644 --- a/examples/coupled_trial_generator.py +++ b/examples/coupled_trial_generator.py @@ -1,19 +1,22 @@ import os import random +import logging from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import CoupledTrialGeneratorSpec from aind_behavior_dynamic_foraging.task_logic.trial_models import TrialOutcome, Trial + def main(): coupled_trial_generator = CoupledTrialGeneratorSpec().create_generator() - + for i in range(100): - trial_outcome = TrialOutcome(trial=Trial(), - is_right_choice=random.choice([True, False, None]), - is_rewarded=random.choice([True, False])) + trial_outcome = TrialOutcome( + trial=Trial(), is_right_choice=random.choice([True, False, None]), is_rewarded=random.choice([True, False]) + ) coupled_trial_generator.update(trial_outcome) coupled_trial_generator.next() if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) main() diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index deb04b5..f71f84f 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -1,5 +1,6 @@ import random from typing import Literal, Optional, Union +import logging import numpy as np from aind_behavior_services.task.distributions import ( @@ -97,7 +98,8 @@ class CoupledTrialGeneratorSpec(_BaseTrialGeneratorSpecModel): ) min_block_reward: int = Field(default=1, title="Minimal rewards in a block to switch") behavior_stability_parameters: Optional[BehaviorStabilityParameters] = Field( - default=None, description="Parameters describing behavior stability required to switch blocks." + default=BehaviorStabilityParameters(), + description="Parameters describing behavior stability required to switch blocks.", ) extend_block_on_no_response: bool = Field( default=True, @@ -126,7 +128,7 @@ def create_generator(self) -> "CoupledTrialGenerator": class CoupledTrialGenerator(ITrialGenerator): def __init__(self, spec: CoupledTrialGeneratorSpec) -> None: """""" - + self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__) self.spec = spec self.is_right_choice_history: list[bool | None] = [] self.reward_history: list[bool] = [] @@ -146,11 +148,13 @@ def next(self) -> Trial | None: Generate next trial """ + self.logger.info("Generating next trial.") # check end conditions if not self.are_end_conditions_met( self.spec.trial_generation_end_parameters, self.is_right_choice_history, self.start_time ): + self.logger.info("Trial generator end conditons met.") return # determine iti and quiescent period duration @@ -221,15 +225,18 @@ def update(self, outcome: TrialOutcome) -> None: :param outcome: trial outcome of previous trial """ - + + self.logger.info(f"Updating coupled trial generator with trial outcome of {outcome}") + self.is_right_choice_history.append(outcome.is_right_choice) self.reward_history.append(outcome.is_rewarded) self.trials_in_block += 1 if self.spec.extend_block_on_no_response and outcome.is_right_choice == None: + self.logger.info(f"Extending minimum block length due to ignored trial.") self.block.min_length += 1 - switch_block = self.switch_block( + switch_block = self.is_block_switch_allowed( trials_in_block=self.trials_in_block, min_block_reward=self.spec.min_block_reward, block_left_rewards=self.reward_history.count(False), @@ -241,9 +248,9 @@ def update(self, outcome: TrialOutcome) -> None: block_length=self.block.min_length, kernel_size=self.spec.kernel_size, ) - + if switch_block: - print("block switch") + self.logger.info(f"Switching block.") self.trials_in_block = 0 self.block = self.generate_next_block( reward_families=self.spec.reward_family, @@ -277,19 +284,29 @@ def is_behavior_stable( """ + self.logger.info("Evaluating block behavior.") + # do not prohibit block transition if does not rely on behavior or not enough trials to evaluate or reward probs are the same. if not beh_stability_params or left_reward_prob == right_reward_prob or len(choice_history) < kernel_size: + self.logger.debug( + "Behavior stability evaluation skipped: " + f"parameters_missing={not bool(beh_stability_params)}, " + f"rewards_equal={left_reward_prob == right_reward_prob}, " + f"trials_available={len(choice_history)} < kernel_size({kernel_size})" + ) return True # compute fraction of right choices with running average using a sliding window block_history = choice_history[-(trials_in_block + kernel_size - 1) :] block_choice_frac = self.compute_choice_fraction(kernel_size, block_history) + self.logger.debug(f"Choice fraction of block is {block_choice_frac}.") # margin based on right and left probabilities and scaled by switch threshold. Window for evaluating behavior delta = abs((left_reward_prob - right_reward_prob) * float(beh_stability_params.behavior_stability_fraction)) threshold = ( [0, left_reward_prob - delta] if left_reward_prob > right_reward_prob else [left_reward_prob + delta, 1] ) + self.logger.debug(f"Behavior stability threshold applied: {threshold}") # block_choice_fractions above threshold points_above_threshold = np.logical_and( @@ -297,30 +314,36 @@ def is_behavior_stable( block_choice_frac <= threshold[1], ) - if beh_stability_params.behavior_evaluation_mode == "end": - # requires consecutive trials ending on the last trial - # check if the current trial occurs at the end of a long enough consecutive run above threshold - if len(points_above_threshold) < beh_stability_params.min_consecutive_stable_trials: + # evaluate stability based on mode + min_stable = beh_stability_params.min_consecutive_stable_trials + mode = beh_stability_params.behavior_evaluation_mode + if mode == "end": + # requires consecutive trials at end of trial + self.logger.info(f"Evaluating last {min_stable} trials for end-of-block stability.") + if len(points_above_threshold) < min_stable: + self.logger.info("Not enough trials to evaluate stability at block end.") return False - return np.all(points_above_threshold[-beh_stability_params.min_consecutive_stable_trials :]) + stable = np.all(points_above_threshold[-min_stable:]) + self.logger.info(f"Behavior stable at block end: {stable}") + return stable - elif beh_stability_params.behavior_evaluation_mode == "anytime": + elif mode == "anytime": # allows consecutive trials any time in the behavior + self.logger.info(f"Evaluating block for stability anytime over {min_stable} consecutive trials.") run_len = 0 - for v in points_above_threshold: + for i, v in enumerate(points_above_threshold): if v: run_len += 1 else: - if run_len >= beh_stability_params.min_consecutive_stable_trials: - return True - else: - run_len = 0 - return run_len >= beh_stability_params.min_consecutive_stable_trials + run_len = 0 + if run_len >= min_stable: + self.logger.info(f"Behavior stable at trial index {i}.") + return True + self.logger.info("Behavior not stable in block anytime evaluation.") + return False else: - raise ValueError( - f"Behavior evaluation mode {beh_stability_params.behavior_evaluation_mode} not recognized." - ) + raise ValueError(f"Behavior evaluation mode {mode} not recognized.") @staticmethod def compute_choice_fraction(kernel_size: int, choice_history: list[int | None]): @@ -334,11 +357,11 @@ def compute_choice_fraction(kernel_size: int, choice_history: list[int | None]): n_windows = len(choice_history) - kernel_size + 1 choice_fraction = np.empty(n_windows, dtype=float) # create empty array to store running averages for i in range(n_windows): - window = choice_history[i : i + kernel_size].astype(float) + window = np.array(choice_history[i : i + kernel_size], dtype=float) choice_fraction[i] = np.nanmean(window) return choice_fraction - def switch_block( + def is_block_switch_allowed( self, trials_in_block: int, min_block_reward: int, @@ -364,8 +387,11 @@ def switch_block( kernel_size: kernel to evaluate choice fraction """ + self.logger.info("Evaluating block switch.") + # has planned block length been reached? block_length_ok = trials_in_block >= block_length + self.logger.debug(f"Planned block length reached: {block_length_ok}") # is behavior qualified to switch? behavior_ok = self.is_behavior_stable( @@ -376,9 +402,11 @@ def switch_block( trials_in_block, kernel_size, ) + self.logger.debug(f"Behavior meets stability criteria: {behavior_ok}") # has reward criteria been met? reward_ok = block_left_rewards + block_right_rewards >= min_block_reward + self.logger.debug(f"Reward criterion satisfied: {reward_ok}") # conditions to switch: # - planned block length reached @@ -407,35 +435,47 @@ def generate_next_block( :param block_len_distribution: Description """ + self.logger.info("Generating next block.") + # determine candidate reward pairs reward_pairs = reward_families[reward_family_index][:reward_pairs_n] reward_prob = np.array(reward_pairs, dtype=float) reward_prob /= reward_prob.sum(axis=1, keepdims=True) reward_prob *= float(base_reward_sum) + self.logger.info(f"Candidate reward pairs normalized and scaled: {reward_prob.tolist()}") # create pool including all reward probabiliteis and mirrored pairs reward_prob_pool = np.vstack([reward_prob, np.fliplr(reward_prob)]) if current_block: # exclude previous block if history exists + self.logger.info(f"Excluding previous block reward probability.") last_block_reward_prob = [current_block.right_reward_prob, current_block.left_reward_prob] - + # remove blocks identical to last block reward_prob_pool = reward_prob_pool[np.any(reward_prob_pool != last_block_reward_prob, axis=1)] + self.logger.debug(f"Pool after removing identical to last block: {reward_prob_pool.tolist()}") # remove blocks with same high-reward side (if last block had a clear high side) if last_block_reward_prob[0] != last_block_reward_prob[1]: high_side_last = last_block_reward_prob[0] > last_block_reward_prob[1] high_side_pool = reward_prob_pool[:, 0] > reward_prob_pool[:, 1] reward_prob_pool = reward_prob_pool[high_side_pool != high_side_last] + self.logger.debug(f"Pool after removing same high-reward side: {reward_prob_pool.tolist()}") # remove duplicates reward_prob_pool = np.unique(reward_prob_pool, axis=0) + self.logger.debug(f"Final reward probability pool after removing duplicates: {reward_prob_pool.tolist()}") # randomly pick next block reward probability right_reward_prob, left_reward_prob = reward_prob_pool[random.choice(range(reward_prob_pool.shape[0]))] + self.logger.info( + f"Selected next block reward probabilities: right={right_reward_prob}, left={left_reward_prob}" + ) # randomly pick block length next_block_len = round(self.evaluate_distribution(block_len_distribution)) + self.logger.info(f"Selected next block length: {next_block_len}") + return Block( right_reward_prob=right_reward_prob, left_reward_prob=left_reward_prob, From 363c20efb5febf551417c81d1dd4c59f890fc45d Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 6 Feb 2026 16:18:03 -0800 Subject: [PATCH 04/21] lints --- examples/coupled_trial_generator.py | 5 ++--- .../trial_generators/coupled_trial_generator.py | 12 ++++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/examples/coupled_trial_generator.py b/examples/coupled_trial_generator.py index e9befde..4908580 100644 --- a/examples/coupled_trial_generator.py +++ b/examples/coupled_trial_generator.py @@ -1,9 +1,8 @@ -import os -import random import logging +import random from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import CoupledTrialGeneratorSpec -from aind_behavior_dynamic_foraging.task_logic.trial_models import TrialOutcome, Trial +from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome def main(): diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index f71f84f..5e0d053 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -1,6 +1,7 @@ +import logging import random +from datetime import datetime, timedelta from typing import Literal, Optional, Union -import logging import numpy as np from aind_behavior_services.task.distributions import ( @@ -14,7 +15,6 @@ from ..trial_models import Trial, TrialOutcome from ._base import ITrialGenerator, _BaseTrialGeneratorSpecModel -from datetime import datetime, timedelta BlockBehaviorEvaluationMode = Literal[ "end", # behavior stable at end of block to allow switching @@ -232,8 +232,8 @@ def update(self, outcome: TrialOutcome) -> None: self.reward_history.append(outcome.is_rewarded) self.trials_in_block += 1 - if self.spec.extend_block_on_no_response and outcome.is_right_choice == None: - self.logger.info(f"Extending minimum block length due to ignored trial.") + if self.spec.extend_block_on_no_response and outcome.is_right_choice is None: + self.logger.info("Extending minimum block length due to ignored trial.") self.block.min_length += 1 switch_block = self.is_block_switch_allowed( @@ -250,7 +250,7 @@ def update(self, outcome: TrialOutcome) -> None: ) if switch_block: - self.logger.info(f"Switching block.") + self.logger.info("Switching block.") self.trials_in_block = 0 self.block = self.generate_next_block( reward_families=self.spec.reward_family, @@ -448,7 +448,7 @@ def generate_next_block( reward_prob_pool = np.vstack([reward_prob, np.fliplr(reward_prob)]) if current_block: # exclude previous block if history exists - self.logger.info(f"Excluding previous block reward probability.") + self.logger.info("Excluding previous block reward probability.") last_block_reward_prob = [current_block.right_reward_prob, current_block.left_reward_prob] # remove blocks identical to last block From 13d85ab4363a823e6d3cce090b5ebb4a934d2529 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 6 Feb 2026 16:22:10 -0800 Subject: [PATCH 05/21] fixes typos --- .../task_logic/trial_generators/coupled_trial_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 5e0d053..c088de5 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -154,7 +154,7 @@ def next(self) -> Trial | None: if not self.are_end_conditions_met( self.spec.trial_generation_end_parameters, self.is_right_choice_history, self.start_time ): - self.logger.info("Trial generator end conditons met.") + self.logger.info("Trial generator end conditions met.") return # determine iti and quiescent period duration From 852d4a0a860ab6b1ce28352d54e2ba88d77ba5a8 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 18 Feb 2026 11:38:42 -0800 Subject: [PATCH 06/21] uses draw_sample for distributions --- bonsai/Bonsai.config | 3 +++ examples/coupled_trial_generator.py | 8 +++--- pyproject.toml | 2 +- .../coupled_trial_generator.py | 26 ++++--------------- uv.lock | 8 ++---- 5 files changed, 15 insertions(+), 32 deletions(-) diff --git a/bonsai/Bonsai.config b/bonsai/Bonsai.config index 1f46300..44af239 100644 --- a/bonsai/Bonsai.config +++ b/bonsai/Bonsai.config @@ -34,6 +34,7 @@ + @@ -107,6 +108,7 @@ + @@ -149,6 +151,7 @@ + diff --git a/examples/coupled_trial_generator.py b/examples/coupled_trial_generator.py index 4908580..1c65cc7 100644 --- a/examples/coupled_trial_generator.py +++ b/examples/coupled_trial_generator.py @@ -7,14 +7,14 @@ def main(): coupled_trial_generator = CoupledTrialGeneratorSpec().create_generator() - + trial = Trial() for i in range(100): trial_outcome = TrialOutcome( - trial=Trial(), is_right_choice=random.choice([True, False, None]), is_rewarded=random.choice([True, False]) + trial=trial, is_right_choice=random.choice([True, False, None]), is_rewarded=random.choice([True, False]) ) coupled_trial_generator.update(trial_outcome) - coupled_trial_generator.next() - + trial = coupled_trial_generator.next() + print(f"Next trial: {trial}") if __name__ == "__main__": logging.basicConfig(level=logging.INFO) diff --git a/pyproject.toml b/pyproject.toml index 91151bb..8ab3b21 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ version = "0.0.2rc8" readme = {file = "README.md", content-type = "text/markdown"} dependencies = [ - "aind_behavior_services<0.14", + "aind_behavior_services@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git@feat-add-utilities-for-distribution-generation", "pydantic-settings", ] diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 81e496a..6a91222 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -4,6 +4,7 @@ from typing import Literal, Optional, Union import numpy as np +from aind_behavior_services.task.distributions_utils import draw_sample from aind_behavior_services.task.distributions import ( DistributionFamily, ExponentialDistribution, @@ -158,8 +159,8 @@ def next(self) -> Trial | None: return # determine iti and quiescent period duration - iti = self.evaluate_distribution(self.spec.inter_trial_interval_duration_distribution) - quiescent = self.evaluate_distribution(self.spec.quiescent_duration_distribution) + iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) + quiescent = draw_sample(self.spec.quiescent_duration_distribution) # iterate trials in block self.trials_in_block += 1 @@ -201,24 +202,6 @@ def are_end_conditions_met( return True - @staticmethod - def evaluate_distribution( - distribution: Union[UniformDistribution, ExponentialDistribution], - ) -> float: - if distribution.family == DistributionFamily.EXPONENTIAL: - return ( - np.random.exponential(1 / distribution.distribution_parameters.rate) - + distribution.truncation_parameters.min - ) - elif distribution.family == DistributionFamily.UNIFORM: - return random.uniform( - distribution.distribution_parameters.min, - distribution.distribution_parameters.max, - ) - - else: - raise ValueError(f"Distribution {distribution.family} not recognized.") - def update(self, outcome: TrialOutcome) -> None: """ Check if block should switch, generate next block if necessary, and generate next trial @@ -439,6 +422,7 @@ def generate_next_block( # determine candidate reward pairs reward_pairs = reward_families[reward_family_index][:reward_pairs_n] + print("reward pairs", reward_pairs) reward_prob = np.array(reward_pairs, dtype=float) reward_prob /= reward_prob.sum(axis=1, keepdims=True) reward_prob *= float(base_reward_sum) @@ -473,7 +457,7 @@ def generate_next_block( ) # randomly pick block length - next_block_len = round(self.evaluate_distribution(block_len_distribution)) + next_block_len = round(draw_sample(block_len_distribution)) self.logger.info(f"Selected next block length: {next_block_len}") return Block( diff --git a/uv.lock b/uv.lock index cb46c7b..b1c0dc4 100644 --- a/uv.lock +++ b/uv.lock @@ -71,7 +71,7 @@ docs = [ [package.metadata] requires-dist = [ - { name = "aind-behavior-services", specifier = "<0.14" }, + { name = "aind-behavior-services", git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=feat-add-utilities-for-distribution-generation" }, { name = "contraqctor", marker = "extra == 'data'", specifier = ">=0.5.3" }, { name = "pydantic-settings" }, ] @@ -95,7 +95,7 @@ docs = [ [[package]] name = "aind-behavior-services" version = "0.13.0" -source = { registry = "https://pypi.org/simple" } +source = { git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=feat-add-utilities-for-distribution-generation#ee375c265eaeab79ced09717af9963586b50bd58" } dependencies = [ { name = "aind-behavior-curriculum" }, { name = "gitpython" }, @@ -103,10 +103,6 @@ dependencies = [ { name = "pydantic" }, { name = "semver" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e8/52/169fe25fb69cb249ac86408cad77bcce0950e60cc6491e0117c26f206e91/aind_behavior_services-0.13.0.tar.gz", hash = "sha256:21874c1a4555ab2a3c2afef895f279dda48ea0cc3dbc6a7a2ee6fbd7139dc544", size = 25839, upload-time = "2026-01-29T17:13:41.928Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/9b/35/6df8230287d3747ff04192b16bd61aec4bbf4867271c619960f5f66ea222/aind_behavior_services-0.13.0-py3-none-any.whl", hash = "sha256:9b0fe77c0a4ecddf9fcf1b1d8e791de2e16ad36425d053fc587f195f88bb7286", size = 35478, upload-time = "2026-01-29T17:13:40.979Z" }, -] [[package]] name = "alabaster" From 6b22100f2671d47b3e06cce7be90f176b36914c5 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 18 Feb 2026 11:43:30 -0800 Subject: [PATCH 07/21] lints --- examples/coupled_trial_generator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/coupled_trial_generator.py b/examples/coupled_trial_generator.py index 1c65cc7..2cc96cf 100644 --- a/examples/coupled_trial_generator.py +++ b/examples/coupled_trial_generator.py @@ -16,6 +16,7 @@ def main(): trial = coupled_trial_generator.next() print(f"Next trial: {trial}") + if __name__ == "__main__": logging.basicConfig(level=logging.INFO) main() From 61a3dcadd5d6acbe88889eb7accb8d13bfac46ea Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 18 Feb 2026 11:45:28 -0800 Subject: [PATCH 08/21] lints --- .../task_logic/trial_generators/coupled_trial_generator.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 6a91222..303efb6 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -4,14 +4,13 @@ from typing import Literal, Optional, Union import numpy as np -from aind_behavior_services.task.distributions_utils import draw_sample from aind_behavior_services.task.distributions import ( - DistributionFamily, ExponentialDistribution, ExponentialDistributionParameters, TruncationParameters, UniformDistribution, ) +from aind_behavior_services.task.distributions_utils import draw_sample from pydantic import BaseModel, Field from ..trial_models import Trial, TrialOutcome From fd4d57388a3e2d43d0f6c327dd38126f11a304c5 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 18 Feb 2026 11:49:06 -0800 Subject: [PATCH 09/21] regenerates schema --- schema/aind_behavior_dynamic_foraging.json | 41 +- .../AindBehaviorDynamicForaging.Generated.cs | 439 +++++++----------- 2 files changed, 213 insertions(+), 267 deletions(-) diff --git a/schema/aind_behavior_dynamic_foraging.json b/schema/aind_behavior_dynamic_foraging.json index 68b5313..7d2a8c6 100644 --- a/schema/aind_behavior_dynamic_foraging.json +++ b/schema/aind_behavior_dynamic_foraging.json @@ -1013,7 +1013,11 @@ "type": "integer" }, "behavior_stability_parameters": { - "default": null, + "default": { + "behavior_evaluation_mode": "end", + "behavior_stability_fraction": 0.5, + "min_consecutive_stable_trials": 5 + }, "description": "Parameters describing behavior stability required to switch blocks.", "oneOf": [ { @@ -2483,6 +2487,41 @@ "title": "Trial", "type": "object" }, + "TrialGenerationEndConditions": { + "properties": { + "ignore_win": { + "default": 30, + "title": "Window of trials to check ignored responses", + "type": "integer" + }, + "ignore_ratio_threshold": { + "default": 0.8, + "maximum": 1, + "minimum": 0, + "title": "Threshold for acceptable ignored trials within window.", + "type": "number" + }, + "max_trial": { + "default": 1000, + "title": "Maximal number of trials", + "type": "integer" + }, + "max_time": { + "default": "PT1H15M", + "format": "duration", + "title": "Maximal session time (min)", + "type": "string" + }, + "min_time": { + "default": "PT30M", + "format": "duration", + "title": "Minimum session time (min)", + "type": "string" + } + }, + "title": "TrialGenerationEndConditions", + "type": "object" + }, "TrialGeneratorCompositeSpec_TrialGeneratorSpec_": { "properties": { "type": { diff --git a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs index f1f853a..67ef050 100644 --- a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs +++ b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs @@ -2485,6 +2485,7 @@ public CoupledTrialGeneratorSpec() _blockLenDistribution = new object(); _trialGenerationEndParameters = new TrialGenerationEndConditions(); _minBlockReward = 1; + _behaviorStabilityParameters = new BehaviorStabilityParameters(); _extendBlockOnNoResponse = true; _kernelSize = 2; _rewardProbabilityParameters = new RewardProbabilityParameters(); @@ -6510,6 +6511,172 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class TrialGenerationEndConditions + { + + private int _ignoreWin; + + private double _ignoreRatioThreshold; + + private int _maxTrial; + + private System.TimeSpan _maxTime; + + private System.TimeSpan _minTime; + + public TrialGenerationEndConditions() + { + _ignoreWin = 30; + _ignoreRatioThreshold = 0.8D; + _maxTrial = 1000; + } + + protected TrialGenerationEndConditions(TrialGenerationEndConditions other) + { + _ignoreWin = other._ignoreWin; + _ignoreRatioThreshold = other._ignoreRatioThreshold; + _maxTrial = other._maxTrial; + _maxTime = other._maxTime; + _minTime = other._minTime; + } + + [Newtonsoft.Json.JsonPropertyAttribute("ignore_win")] + public int IgnoreWin + { + get + { + return _ignoreWin; + } + set + { + _ignoreWin = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("ignore_ratio_threshold")] + public double IgnoreRatioThreshold + { + get + { + return _ignoreRatioThreshold; + } + set + { + _ignoreRatioThreshold = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("max_trial")] + public int MaxTrial + { + get + { + return _maxTrial; + } + set + { + _maxTrial = value; + } + } + + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("max_time")] + public System.TimeSpan MaxTime + { + get + { + return _maxTime; + } + set + { + _maxTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MaxTime")] + public string MaxTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_maxTime); + } + set + { + _maxTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("min_time")] + public System.TimeSpan MinTime + { + get + { + return _minTime; + } + set + { + _minTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MinTime")] + public string MinTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_minTime); + } + set + { + _minTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialGenerationEndConditions(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new TrialGenerationEndConditions(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("IgnoreWin = " + _ignoreWin + ", "); + stringBuilder.Append("IgnoreRatioThreshold = " + _ignoreRatioThreshold + ", "); + stringBuilder.Append("MaxTrial = " + _maxTrial + ", "); + stringBuilder.Append("MaxTime = " + _maxTime + ", "); + stringBuilder.Append("MinTime = " + _minTime); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] @@ -7861,260 +8028,6 @@ public override string ToString() } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AindBehaviorDynamicForagingTaskLogicAutoWater - { - - private AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType _autoWaterType; - - private double _multiplier; - - private int _unrewarded; - - private int _ignored; - - private bool _includeReward; - - public AindBehaviorDynamicForagingTaskLogicAutoWater() - { - _autoWaterType = AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType.Natural; - _multiplier = 0.8D; - _unrewarded = 200; - _ignored = 100; - _includeReward = false; - } - - protected AindBehaviorDynamicForagingTaskLogicAutoWater(AindBehaviorDynamicForagingTaskLogicAutoWater other) - { - _autoWaterType = other._autoWaterType; - _multiplier = other._multiplier; - _unrewarded = other._unrewarded; - _ignored = other._ignored; - _includeReward = other._includeReward; - } - - [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] - public AindBehaviorDynamicForagingTaskLogicAutoWaterAutoWaterType AutoWaterType - { - get - { - return _autoWaterType; - } - set - { - _autoWaterType = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] - public double Multiplier - { - get - { - return _multiplier; - } - set - { - _multiplier = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] - public int Unrewarded - { - get - { - return _unrewarded; - } - set - { - _unrewarded = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignored")] - public int Ignored - { - get - { - return _ignored; - } - set - { - _ignored = value; - } - } - - /// - /// Include auto water in total rewards. - /// - [Newtonsoft.Json.JsonPropertyAttribute("include_reward")] - [System.ComponentModel.DescriptionAttribute("Include auto water in total rewards.")] - public bool IncludeReward - { - get - { - return _includeReward; - } - set - { - _includeReward = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AindBehaviorDynamicForagingTaskLogicAutoWater(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new AindBehaviorDynamicForagingTaskLogicAutoWater(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); - stringBuilder.Append("Multiplier = " + _multiplier + ", "); - stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); - stringBuilder.Append("Ignored = " + _ignored + ", "); - stringBuilder.Append("IncludeReward = " + _includeReward); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater - { - - private AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType _autoWaterType; - - private double _multiplier; - - private int _unrewarded; - - private int _ignored; - - public AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater() - { - _autoWaterType = AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType.Natural; - _multiplier = 0.8D; - _unrewarded = 200; - _ignored = 100; - } - - protected AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater other) - { - _autoWaterType = other._autoWaterType; - _multiplier = other._multiplier; - _unrewarded = other._unrewarded; - _ignored = other._ignored; - } - - [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] - public AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWaterAutoWaterType AutoWaterType - { - get - { - return _autoWaterType; - } - set - { - _autoWaterType = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] - public double Multiplier - { - get - { - return _multiplier; - } - set - { - _multiplier = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] - public int Unrewarded - { - get - { - return _unrewarded; - } - set - { - _unrewarded = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignored")] - public int Ignored - { - get - { - return _ignored; - } - set - { - _ignored = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new AindBehaviorDynamicForagingTaskLogicTrialGeneratorsCoupledTrialGeneratorAutoWater(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); - stringBuilder.Append("Multiplier = " + _multiplier + ", "); - stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); - stringBuilder.Append("Ignored = " + _ignored); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Newtonsoft.Json.JsonConverter(typeof(JsonInheritanceConverter), "type")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] @@ -8718,6 +8631,11 @@ public System.IObservable Process(System.IObservable source) return Process(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8778,16 +8696,6 @@ public System.IObservable Process(System.IObservable source) return Process(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8842,6 +8750,7 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -8854,8 +8763,6 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] public partial class DeserializeFromJson : Bonsai.Expressions.SingleArgumentExpressionBuilder { From 7c258f41f5e7c29a70028b7ac231396169f5e0dd Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 10:59:26 -0800 Subject: [PATCH 10/21] adds unit tests for couple task --- pyproject.toml | 2 +- .../coupled_trial_generator.py | 4 - .../test_couple_trial_generator.py | 454 ++++++++++++++++++ uv.lock | 10 +- 4 files changed, 462 insertions(+), 8 deletions(-) create mode 100644 tests/trial_generators/test_couple_trial_generator.py diff --git a/pyproject.toml b/pyproject.toml index 8ab3b21..25fb6bc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ version = "0.0.2rc8" readme = {file = "README.md", content-type = "text/markdown"} dependencies = [ - "aind_behavior_services@git+https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git@feat-add-utilities-for-distribution-generation", + "aind_behavior_services>=0.13.1", "pydantic-settings", ] diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 303efb6..8300424 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -161,9 +161,6 @@ def next(self) -> Trial | None: iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) quiescent = draw_sample(self.spec.quiescent_duration_distribution) - # iterate trials in block - self.trials_in_block += 1 - return Trial( p_reward_left=self.block.left_reward_prob, p_reward_right=self.block.right_reward_prob, @@ -421,7 +418,6 @@ def generate_next_block( # determine candidate reward pairs reward_pairs = reward_families[reward_family_index][:reward_pairs_n] - print("reward pairs", reward_pairs) reward_prob = np.array(reward_pairs, dtype=float) reward_prob /= reward_prob.sum(axis=1, keepdims=True) reward_prob *= float(base_reward_sum) diff --git a/tests/trial_generators/test_couple_trial_generator.py b/tests/trial_generators/test_couple_trial_generator.py new file mode 100644 index 0000000..714b8d2 --- /dev/null +++ b/tests/trial_generators/test_couple_trial_generator.py @@ -0,0 +1,454 @@ +import logging +import unittest + +from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec +from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import ( + RewardProbabilityParameters, +) +from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome + +logging.basicConfig(level=logging.DEBUG) + + +class TestCoupledTrialGenerator(unittest.TestCase): + ##### Tests is_behavior_stable ##### + + def test_behavior_stable_end(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "end" + + # stable at end: wrong side early, correct side at end + choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_not_stable_end(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "end" + + # unstable at end: correct side early, wrong side at end + choices = [high_reward_is_right] * 10 + [not high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertFalse( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_anytime(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "anytime" + + # stable run early, then drifts off — should still pass + choices = [high_reward_is_right] * (min_stable + kernel_size - 1) + [not high_reward_is_right] * 10 + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + # stable at end: wrong side early, correct side at end + choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_alteranating_choices_behavior_not_stable(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + # IMPORTANT: Need to force right probability lower than left since + # the stability threshold lower bound is anchored to left_prob + delta. + # With very asymmetric probabilities (e.g. left=0.08) the threshold can be + # permissive enough that an alternating animal (choice fraction ~0.5) is considered + # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 + # is this right? + beh_params = generator.spec.behavior_stability_parameters + left_prob = 0.7111111111111111 + right_prob = 0.08888888888888889 + kernel_size = generator.spec.kernel_size + + # never stable: alternating throughout + choices = [True, False] * 15 + + beh_params.behavior_evaluation_mode = "anytime" + self.assertFalse( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + beh_params.behavior_evaluation_mode = "end" + self.assertFalse( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_alteranating_choices_behavior_stable(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + # IMPORTANT: Force right probability higher than left since + # the stability threshold lower bound is anchored to left_prob + delta. + # With very asymmetric probabilities (e.g. left=0.08) the threshold can be + # permissive enough that an alternating animal (choice fraction ~0.5) is considered + # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 + # is this right? + beh_params = generator.spec.behavior_stability_parameters + right_prob = 0.7111111111111111 + left_prob = 0.08888888888888889 + kernel_size = generator.spec.kernel_size + + # never stable: alternating throughout + choices = [True, False] * 15 + + beh_params.behavior_evaluation_mode = "anytime" + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + beh_params.behavior_evaluation_mode = "end" + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_equal_reward_prob(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = 0.5 + left_prob = 0.5 + kernel_size = generator.spec.kernel_size + + choices = [True] * 15 + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_choice_len_less_than_kernel(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size + + choices = [True] + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_no_beh_stability_params(self): + spec = CoupledTrialGeneratorSpec(behavior_stability_parameters=None) + generator = spec.create_generator() + + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size + + choices = [True] + self.assertTrue( + generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + #### Test is_block_switch_allowed #### + + def test_block_switch_all_conditions_met_switches(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 20 + generator.trials_in_block = 20 + + result = generator.is_block_switch_allowed( + trials_in_block=generator.trials_in_block, + min_block_reward=1, + block_left_rewards=0, + block_right_rewards=5, + choice_history=[True] * 20, + right_reward_prob=generator.block.right_reward_prob, + left_reward_prob=generator.block.left_reward_prob, + beh_stability_params=generator.spec.behavior_stability_parameters, + block_length=generator.block.min_length, + kernel_size=generator.spec.kernel_size, + ) + self.assertTrue(result) + + def test_block_switch_block_length_not_reached(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 20 + + result = generator.is_block_switch_allowed( + trials_in_block=10, # below min_length + min_block_reward=1, + block_left_rewards=0, + block_right_rewards=5, + choice_history=[True] * 10, + right_reward_prob=generator.block.right_reward_prob, + left_reward_prob=generator.block.left_reward_prob, + beh_stability_params=generator.spec.behavior_stability_parameters, + block_length=generator.block.min_length, + kernel_size=generator.spec.kernel_size, + ) + self.assertFalse(result) + + def test_block_switch_reward_not_met(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 20 + + result = generator.is_block_switch_allowed( + trials_in_block=20, + min_block_reward=5, + block_left_rewards=0, + block_right_rewards=0, # no rewards + choice_history=[True] * 20, + right_reward_prob=generator.block.right_reward_prob, + left_reward_prob=generator.block.left_reward_prob, + beh_stability_params=generator.spec.behavior_stability_parameters, + block_length=generator.block.min_length, + kernel_size=generator.spec.kernel_size, + ) + self.assertFalse(result) + + def test_block_switch_behavior_not_stable(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 20 + + result = generator.is_block_switch_allowed( + trials_in_block=20, + min_block_reward=1, + block_left_rewards=5, + block_right_rewards=0, + choice_history=[False] * 20, # always choosing low-reward side + right_reward_prob=generator.block.right_reward_prob, + left_reward_prob=generator.block.left_reward_prob, + beh_stability_params=generator.spec.behavior_stability_parameters, + block_length=generator.block.min_length, + kernel_size=generator.spec.kernel_size, + ) + self.assertFalse(result) + + #### Test generate_next_block #### + + def test_next_block_differs_from_current(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + current = generator.block + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + self.assertNotEqual( + (next_block.right_reward_prob, next_block.left_reward_prob), + (current.right_reward_prob, current.left_reward_prob), + ) + + def test_next_block_switches_high_reward_side(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + current = generator.block + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + current_high_is_right = current.right_reward_prob > current.left_reward_prob + next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob + self.assertNotEqual(current_high_is_right, next_high_is_right) + + def test_next_block_switches_high_reward_side_multiple_pairs(self): + spec = CoupledTrialGeneratorSpec( + reward_probability_parameters=RewardProbabilityParameters( + family=0, # [[8,1],[6,1],[3,1],[1,1]] - 4 pairs + pairs_n=3, + ) + ) + generator = spec.create_generator() + + current = generator.block + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + + current_high_is_right = current.right_reward_prob > current.left_reward_prob + next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob + self.assertNotEqual(current_high_is_right, next_high_is_right) + + def test_next_block_never_repeats_current_multiple_pairs(self): + spec = CoupledTrialGeneratorSpec( + reward_probability_parameters=RewardProbabilityParameters( + family=0, + pairs_n=3, + ) + ) + generator = spec.create_generator() + + current = generator.block + for _ in range(50): + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + self.assertNotEqual( + (next_block.right_reward_prob, next_block.left_reward_prob), + (current.right_reward_prob, current.left_reward_prob), + ) + self.assertNotEqual( + next_block.right_reward_prob > next_block.left_reward_prob, + current.right_reward_prob > current.left_reward_prob, + ) + current = next_block + + #### Test update #### + + def _make_outcome(self, is_right_choice, is_rewarded): + return TrialOutcome(trial=Trial(), is_right_choice=is_right_choice, is_rewarded=is_rewarded) + + def test_update_appends_to_history(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.update(self._make_outcome(True, True)) + self.assertEqual(len(generator.is_right_choice_history), 1) + self.assertEqual(len(generator.reward_history), 1) + + def test_update_ignored_trial_extends_block_length(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + original_length = generator.block.min_length + generator.update(self._make_outcome(None, False)) + self.assertEqual(generator.block.min_length, original_length + 1) + + def test_update_non_ignored_trial_does_not_extend_block(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + original_length = generator.block.min_length + generator.update(self._make_outcome(True, True)) + self.assertEqual(generator.block.min_length, original_length) + + def test_update_block_switches_after_conditions_met(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 5 + generator.trials_in_block = 0 + + initial_block = generator.block + + min_stable = generator.spec.behavior_stability_parameters.min_consecutive_stable_trials + kernel_size = generator.spec.kernel_size + for _ in range(min_stable + kernel_size - 1): + generator.update(self._make_outcome(True, True)) + + self.assertIsNot(generator.block, initial_block) + + def test_update_block_does_not_switch_before_min_length(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + generator.block.right_reward_prob = 0.8 + generator.block.left_reward_prob = 0.2 + generator.block.min_length = 100 + generator.trials_in_block = 0 + + initial_block = generator.block + + for _ in range(5): + generator.update(self._make_outcome(True, True)) + + self.assertIs(generator.block, initial_block) + + #### Test next #### + + def test_next_returns_trial(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + trial = generator.next() + self.assertIsInstance(trial, Trial) + + def test_next_returns_correct_reward_probs(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + trial = generator.next() + self.assertEqual(trial.p_reward_left, generator.block.left_reward_prob) + self.assertEqual(trial.p_reward_right, generator.block.right_reward_prob) + + def test_next_returns_none_after_max_trials(self): + spec = CoupledTrialGeneratorSpec() + generator = spec.create_generator() + + # exhaust the trial limit + generator.is_right_choice_history = [True] * (spec.trial_generation_end_parameters.max_trial + 1) + # bypass min_time + generator.start_time = generator.start_time - spec.trial_generation_end_parameters.min_time + + trial = generator.next() + self.assertIsNone(trial) + + +if __name__ == "__main__": + unittest.main() diff --git a/uv.lock b/uv.lock index b1c0dc4..9cef4e0 100644 --- a/uv.lock +++ b/uv.lock @@ -71,7 +71,7 @@ docs = [ [package.metadata] requires-dist = [ - { name = "aind-behavior-services", git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=feat-add-utilities-for-distribution-generation" }, + { name = "aind-behavior-services", specifier = ">=0.13.1" }, { name = "contraqctor", marker = "extra == 'data'", specifier = ">=0.5.3" }, { name = "pydantic-settings" }, ] @@ -94,8 +94,8 @@ docs = [ [[package]] name = "aind-behavior-services" -version = "0.13.0" -source = { git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=feat-add-utilities-for-distribution-generation#ee375c265eaeab79ced09717af9963586b50bd58" } +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aind-behavior-curriculum" }, { name = "gitpython" }, @@ -103,6 +103,10 @@ dependencies = [ { name = "pydantic" }, { name = "semver" }, ] +sdist = { url = "https://files.pythonhosted.org/packages/40/0c/e72979c641d8d32fd4e00591c16f20a852fde10014385cf9f338ac57cb7b/aind_behavior_services-0.13.1.tar.gz", hash = "sha256:8c5db2a1694b17ba2ff8552d1331ee77114753de540b7c42edfe24930183c04a", size = 27466, upload-time = "2026-02-23T02:54:51.953Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/93/d53dc7c9f603ec87b7d5aa521f3ff09ff2376385f8a82ae9a62db6719373/aind_behavior_services-0.13.1-py3-none-any.whl", hash = "sha256:01a77ab3cc16849d3eaeaf3c541f5126d19d2a8df7fdec614dc4c895e21379f5", size = 37453, upload-time = "2026-02-23T02:54:50.823Z" }, +] [[package]] name = "alabaster" From 196abcaa6130e2b5362eac28cfc99be1c7322e7f Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 13:46:47 -0800 Subject: [PATCH 11/21] removes unneeded fields from task logic --- examples/task_logic.py | 19 +- schema/aind_behavior_dynamic_foraging.json | 354 +--- .../AindBehaviorDynamicForaging.Generated.cs | 1788 +++-------------- .../task_logic/__init__.py | 91 - src/test_python.bonsai | 37 +- 5 files changed, 285 insertions(+), 2004 deletions(-) diff --git a/examples/task_logic.py b/examples/task_logic.py index a7e13d1..2990aad 100644 --- a/examples/task_logic.py +++ b/examples/task_logic.py @@ -8,27 +8,12 @@ task_logic = AindDynamicForagingTaskLogic( task_parameters=AindDynamicForagingTaskParameters( rng_seed=42, - warmup=df_task_logic.Warmup(min_trial=50, max_choice_ratio_bias=0.1, min_finish_ratio=0.8, windowsize=20), - reward_probability=df_task_logic.RewardProbability(base_reward_sum=0.8, family=3, pairs_n=1), - block_parameters=df_task_logic.BlockParameters(min=10, max=30, beta=10, min_reward=0), - inter_trial_interval=df_task_logic.InterTrialInterval(min=1, max=7, beta=3), - delay_period=df_task_logic.DelayPeriod(min=0, max=0, beta=0), - reward_delay=0.1, reward_size=df_task_logic.RewardSize(right_value_volume=4.0, left_value_volume=4.0), - auto_water=df_task_logic.AutoWater( - auto_water_type="Natural", - multiplier=0.5, - unrewarded=3, - ignored=3, - ), - auto_block=df_task_logic.AutoBlock(advanced_block_auto="now", switch_thr=0.5, points_in_a_row=5), - response_time=df_task_logic.Response(response_time=5, reward_consume_time=1), - uncoupled_reward=[0.1, 0.3, 0.7], - ) + ) ) -def main(path_seed: str = "./local/PatchForaging_{schema}.json"): +def main(path_seed: str = "./local/DynamicForaging_{schema}.json"): example_task_logic = task_logic example_trainer_state = TrainerState( stage=Stage(name="example_stage", task=example_task_logic), curriculum=None, is_on_curriculum=False diff --git a/schema/aind_behavior_dynamic_foraging.json b/schema/aind_behavior_dynamic_foraging.json index 7d2a8c6..49da17c 100644 --- a/schema/aind_behavior_dynamic_foraging.json +++ b/schema/aind_behavior_dynamic_foraging.json @@ -3,7 +3,7 @@ "AindDynamicForagingRig": { "properties": { "aind_behavior_services_pkg_version": { - "default": "0.13.0", + "default": "0.13.1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" @@ -194,110 +194,11 @@ "title": "Rng Seed" }, "aind_behavior_services_pkg_version": { - "default": "0.13.0", + "default": "0.13.1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" }, - "block_parameters": { - "$ref": "#/$defs/BlockParameters", - "default": { - "min": 20, - "max": 60, - "beta": 20, - "min_reward": 1 - }, - "description": "Parameters describing block conditions." - }, - "reward_probability": { - "$ref": "#/$defs/RewardProbability", - "default": { - "base_reward_sum": 0.8, - "family": 1, - "pairs_n": 1 - }, - "description": "Parameters describing reward_probability." - }, - "uncoupled_reward": { - "default": [ - 0.1, - 0.3, - 0.7 - ], - "oneOf": [ - { - "items": { - "type": "number" - }, - "maxItems": 3, - "minItems": 3, - "type": "array" - }, - { - "type": "null" - } - ], - "title": "Uncoupled reward" - }, - "randomness": { - "default": "Exponential", - "enum": [ - "Exponential", - "Even" - ], - "title": "Randomness mode", - "type": "string" - }, - "delay_period": { - "$ref": "#/$defs/DelayPeriod", - "default": { - "min": 0.0, - "max": 1.0, - "beta": 1.0 - }, - "description": "Parameters describing delay period." - }, - "reward_delay": { - "default": 0, - "title": "Reward delay (sec)", - "type": "number" - }, - "auto_water": { - "default": null, - "description": "Parameters describing auto water.", - "oneOf": [ - { - "$ref": "#/$defs/AutoWater" - }, - { - "type": "null" - } - ] - }, - "inter_trial_interval": { - "$ref": "#/$defs/InterTrialInterval", - "description": "Parameters describing iti." - }, - "response_time": { - "$ref": "#/$defs/Response", - "default": { - "response_time": 1.0, - "reward_consume_time": 3.0 - }, - "description": "Parameters describing response time." - }, - "auto_block": { - "default": null, - "description": "Parameters describing auto advancement to next block.", - "oneOf": [ - { - "$ref": "#/$defs/AutoBlock" - }, - { - "type": "null" - } - ] - }, "reward_size": { "$ref": "#/$defs/RewardSize", "default": { @@ -306,35 +207,6 @@ }, "description": "Parameters describing reward size." }, - "warmup": { - "default": null, - "description": "Parameters describing warmup.", - "oneOf": [ - { - "$ref": "#/$defs/Warmup" - }, - { - "type": "null" - } - ] - }, - "no_response_trial_addition": { - "default": true, - "description": "Add one trial to the block length on both lickspouts.", - "title": "No Response Trial Addition", - "type": "boolean" - }, - "reward_n": { - "default": null, - "oneOf": [ - { - "$ref": "#/$defs/RewardN" - }, - { - "type": "null" - } - ] - }, "lick_spout_retraction": { "default": false, "description": "Lick spout retraction enabled.", @@ -578,68 +450,6 @@ "title": "AuditorySecondaryReinforcer", "type": "object" }, - "AutoBlock": { - "properties": { - "advanced_block_auto": { - "default": "now", - "enum": [ - "now", - "once" - ], - "title": "Auto block mode", - "type": "string" - }, - "switch_thr": { - "default": 0.5, - "title": "Switch threshold for auto block", - "type": "number" - }, - "points_in_a_row": { - "default": 5, - "title": "Points in a row for auto block", - "type": "integer" - } - }, - "title": "AutoBlock", - "type": "object" - }, - "AutoWater": { - "properties": { - "auto_water_type": { - "default": "Natural", - "enum": [ - "Natural", - "Both", - "High pro" - ], - "title": "Auto water mode", - "type": "string" - }, - "multiplier": { - "default": 0.8, - "title": "Multiplier for auto reward", - "type": "number" - }, - "unrewarded": { - "default": 200, - "title": "Number of unrewarded trials before auto water", - "type": "integer" - }, - "ignored": { - "default": 100, - "title": "Number of ignored trials before auto water", - "type": "integer" - }, - "include_reward": { - "default": false, - "description": "Include auto water in total rewards.", - "title": "Include Reward", - "type": "boolean" - } - }, - "title": "AutoWater", - "type": "object" - }, "Axis": { "description": "Motor axis available", "enum": [ @@ -750,32 +560,6 @@ "title": "BehaviorStabilityParameters", "type": "object" }, - "BlockParameters": { - "properties": { - "min": { - "default": 20, - "title": "Block length (min)", - "type": "integer" - }, - "max": { - "default": 60, - "title": "Block length (max)", - "type": "integer" - }, - "beta": { - "default": 20, - "title": "Block length (beta)", - "type": "integer" - }, - "min_reward": { - "default": 1, - "title": "Minimal rewards in a block to switch", - "type": "integer" - } - }, - "title": "BlockParameters", - "type": "object" - }, "CameraController_SpinnakerCamera_": { "properties": { "device_type": { @@ -1127,27 +911,6 @@ "title": "CoupledTrialGeneratorSpec", "type": "object" }, - "DelayPeriod": { - "properties": { - "min": { - "default": 0.0, - "title": "Delay period (min) ", - "type": "number" - }, - "max": { - "default": 1.0, - "title": "Delay period (max) ", - "type": "number" - }, - "beta": { - "default": 1.0, - "title": "Delay period (beta)", - "type": "number" - } - }, - "title": "DelayPeriod", - "type": "object" - }, "ExponentialDistribution": { "description": "An exponential probability distribution.\n\nModels time between events in a Poisson process. Commonly used\nfor wait times and inter-event intervals.", "properties": { @@ -1540,32 +1303,6 @@ "title": "IntegrationTestTrialGeneratorSpec", "type": "object" }, - "InterTrialInterval": { - "properties": { - "min": { - "default": 1.0, - "title": "ITI (min)", - "type": "number" - }, - "max": { - "default": 8.0, - "title": "ITI (max)", - "type": "number" - }, - "beta": { - "default": 2.0, - "title": "ITI (beta)", - "type": "number" - }, - "increase": { - "default": 0.0, - "title": "ITI increase", - "type": "number" - } - }, - "title": "InterTrialInterval", - "type": "object" - }, "ManipulatorPosition": { "description": "Represents a position in the manipulator coordinate system", "properties": { @@ -1701,56 +1438,6 @@ "title": "Rect", "type": "object" }, - "Response": { - "properties": { - "response_time": { - "default": 1.0, - "title": "Response time", - "type": "number" - }, - "reward_consume_time": { - "default": 3.0, - "description": "Time of the no-lick period before trial end", - "title": "Reward consume time", - "type": "number" - } - }, - "title": "Response", - "type": "object" - }, - "RewardN": { - "properties": { - "initial_inactive_trials": { - "default": 2, - "description": "Initial N trials of the active side where no bait will be be given.", - "title": "Initial Inactive Trials", - "type": "integer" - } - }, - "title": "RewardN", - "type": "object" - }, - "RewardProbability": { - "properties": { - "base_reward_sum": { - "default": 0.8, - "title": "Sum of p_reward", - "type": "number" - }, - "family": { - "default": 1, - "title": "Reward family", - "type": "integer" - }, - "pairs_n": { - "default": 1, - "title": "Number of pairs", - "type": "integer" - } - }, - "title": "RewardProbability", - "type": "object" - }, "RewardProbabilityParameters": { "properties": { "base_reward_sum": { @@ -1843,14 +1530,14 @@ "Session": { "properties": { "aind_behavior_services_pkg_version": { - "default": "0.13.0", + "default": "0.13.1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" }, "version": { - "const": "0.13.0", - "default": "0.13.0", + "const": "0.13.1", + "default": "0.13.1", "title": "Version", "type": "string" }, @@ -2735,13 +2422,13 @@ "type": "integer" }, "container_extension": { - "default": "mp4", + "default": "mkv", "description": "Container extension", "title": "Container Extension", "type": "string" }, "output_arguments": { - "default": "-vf \"scale=out_color_matrix=bt709:out_range=full,format=bgr24,scale=out_range=full\" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p4 -rc vbr -cq 12 -b:v 0M -metadata author=\"Allen Institute for Neural Dynamics\" -maxrate 700M -bufsize 350M", + "default": "-vf \"scale=out_range=full,setparams=range=full:colorspace=bt709:color_primaries=bt709:color_trc=linear\" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p3 -rc vbr -cq 18 -b:v 0M -metadata author=\"Allen Institute for Neural Dynamics\" -maxrate 700M -bufsize 350M -f matroska -write_crc32 0", "description": "Output arguments", "title": "Output Arguments", "type": "string" @@ -2788,32 +2475,6 @@ "title": "VideoWriterOpenCv", "type": "object" }, - "Warmup": { - "properties": { - "min_trial": { - "default": 50, - "title": "Warmup finish criteria: minimal trials", - "type": "integer" - }, - "max_choice_ratio_bias": { - "default": 0.1, - "title": "Warmup finish criteria: maximal choice ratio bias from 0.5", - "type": "number" - }, - "min_finish_ratio": { - "default": 0.8, - "title": "Warmup finish criteria: minimal finish ratio", - "type": "number" - }, - "windowsize": { - "default": 20, - "title": "Warmup finish criteria: window size to compute the bias and ratio", - "type": "integer" - } - }, - "title": "Warmup", - "type": "object" - }, "WaterValveCalibration": { "description": "Represents a water valve calibration.", "properties": { @@ -2894,6 +2555,7 @@ } }, "required": [ + "date", "slope", "offset" ], diff --git a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs index 67ef050..4646ba1 100644 --- a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs +++ b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs @@ -49,7 +49,7 @@ public partial class AindDynamicForagingRig public AindDynamicForagingRig() { - _aindBehaviorServicesPkgVersion = "0.13.0"; + _aindBehaviorServicesPkgVersion = "0.13.1"; _version = "0.0.2-rc8"; _triggeredCameraController = new CameraControllerSpinnakerCamera(); _harpBehavior = new HarpBehavior(); @@ -586,49 +586,16 @@ public partial class AindDynamicForagingTaskParameters private string _aindBehaviorServicesPkgVersion; - private BlockParameters _blockParameters; - - private RewardProbability _rewardProbability; - - private System.Collections.Generic.List _uncoupledReward; - - private AindDynamicForagingTaskParametersRandomness _randomness; - - private DelayPeriod _delayPeriod; - - private double _rewardDelay; - - private AutoWater _autoWater; - - private InterTrialInterval _interTrialInterval; - - private Response _responseTime; - - private AutoBlock _autoBlock; - private RewardSize _rewardSize; - private Warmup _warmup; - - private bool _noResponseTrialAddition; - - private RewardN _rewardN; - private bool? _lickSpoutRetraction; private TrialGeneratorSpec _trialGenerator; public AindDynamicForagingTaskParameters() { - _aindBehaviorServicesPkgVersion = "0.13.0"; - _blockParameters = new BlockParameters(); - _rewardProbability = new RewardProbability(); - _randomness = AindDynamicForagingTaskParametersRandomness.Exponential; - _delayPeriod = new DelayPeriod(); - _rewardDelay = 0D; - _responseTime = new Response(); + _aindBehaviorServicesPkgVersion = "0.13.1"; _rewardSize = new RewardSize(); - _noResponseTrialAddition = true; _lickSpoutRetraction = false; _trialGenerator = new TrialGeneratorSpec(); } @@ -637,20 +604,7 @@ protected AindDynamicForagingTaskParameters(AindDynamicForagingTaskParameters ot { _rngSeed = other._rngSeed; _aindBehaviorServicesPkgVersion = other._aindBehaviorServicesPkgVersion; - _blockParameters = other._blockParameters; - _rewardProbability = other._rewardProbability; - _uncoupledReward = other._uncoupledReward; - _randomness = other._randomness; - _delayPeriod = other._delayPeriod; - _rewardDelay = other._rewardDelay; - _autoWater = other._autoWater; - _interTrialInterval = other._interTrialInterval; - _responseTime = other._responseTime; - _autoBlock = other._autoBlock; _rewardSize = other._rewardSize; - _warmup = other._warmup; - _noResponseTrialAddition = other._noResponseTrialAddition; - _rewardN = other._rewardN; _lickSpoutRetraction = other._lickSpoutRetraction; _trialGenerator = other._trialGenerator; } @@ -685,172 +639,6 @@ public string AindBehaviorServicesPkgVersion } } - /// - /// Parameters describing block conditions. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("block_parameters")] - [System.ComponentModel.DescriptionAttribute("Parameters describing block conditions.")] - public BlockParameters BlockParameters - { - get - { - return _blockParameters; - } - set - { - _blockParameters = value; - } - } - - /// - /// Parameters describing reward_probability. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("reward_probability")] - [System.ComponentModel.DescriptionAttribute("Parameters describing reward_probability.")] - public RewardProbability RewardProbability - { - get - { - return _rewardProbability; - } - set - { - _rewardProbability = value; - } - } - - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("uncoupled_reward")] - public System.Collections.Generic.List UncoupledReward - { - get - { - return _uncoupledReward; - } - set - { - _uncoupledReward = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("randomness")] - public AindDynamicForagingTaskParametersRandomness Randomness - { - get - { - return _randomness; - } - set - { - _randomness = value; - } - } - - /// - /// Parameters describing delay period. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("delay_period")] - [System.ComponentModel.DescriptionAttribute("Parameters describing delay period.")] - public DelayPeriod DelayPeriod - { - get - { - return _delayPeriod; - } - set - { - _delayPeriod = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("reward_delay")] - public double RewardDelay - { - get - { - return _rewardDelay; - } - set - { - _rewardDelay = value; - } - } - - /// - /// Parameters describing auto water. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("auto_water")] - [System.ComponentModel.DescriptionAttribute("Parameters describing auto water.")] - public AutoWater AutoWater - { - get - { - return _autoWater; - } - set - { - _autoWater = value; - } - } - - /// - /// Parameters describing iti. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("inter_trial_interval")] - [System.ComponentModel.DescriptionAttribute("Parameters describing iti.")] - public InterTrialInterval InterTrialInterval - { - get - { - return _interTrialInterval; - } - set - { - _interTrialInterval = value; - } - } - - /// - /// Parameters describing response time. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("response_time")] - [System.ComponentModel.DescriptionAttribute("Parameters describing response time.")] - public Response ResponseTime - { - get - { - return _responseTime; - } - set - { - _responseTime = value; - } - } - - /// - /// Parameters describing auto advancement to next block. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("auto_block")] - [System.ComponentModel.DescriptionAttribute("Parameters describing auto advancement to next block.")] - public AutoBlock AutoBlock - { - get - { - return _autoBlock; - } - set - { - _autoBlock = value; - } - } - /// /// Parameters describing reward size. /// @@ -869,55 +657,6 @@ public RewardSize RewardSize } } - /// - /// Parameters describing warmup. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("warmup")] - [System.ComponentModel.DescriptionAttribute("Parameters describing warmup.")] - public Warmup Warmup - { - get - { - return _warmup; - } - set - { - _warmup = value; - } - } - - /// - /// Add one trial to the block length on both lickspouts. - /// - [Newtonsoft.Json.JsonPropertyAttribute("no_response_trial_addition")] - [System.ComponentModel.DescriptionAttribute("Add one trial to the block length on both lickspouts.")] - public bool NoResponseTrialAddition - { - get - { - return _noResponseTrialAddition; - } - set - { - _noResponseTrialAddition = value; - } - } - - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("reward_n")] - public RewardN RewardN - { - get - { - return _rewardN; - } - set - { - _rewardN = value; - } - } - /// /// Lick spout retraction enabled. /// @@ -967,20 +706,7 @@ protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) { stringBuilder.Append("RngSeed = " + _rngSeed + ", "); stringBuilder.Append("AindBehaviorServicesPkgVersion = " + _aindBehaviorServicesPkgVersion + ", "); - stringBuilder.Append("BlockParameters = " + _blockParameters + ", "); - stringBuilder.Append("RewardProbability = " + _rewardProbability + ", "); - stringBuilder.Append("UncoupledReward = " + _uncoupledReward + ", "); - stringBuilder.Append("Randomness = " + _randomness + ", "); - stringBuilder.Append("DelayPeriod = " + _delayPeriod + ", "); - stringBuilder.Append("RewardDelay = " + _rewardDelay + ", "); - stringBuilder.Append("AutoWater = " + _autoWater + ", "); - stringBuilder.Append("InterTrialInterval = " + _interTrialInterval + ", "); - stringBuilder.Append("ResponseTime = " + _responseTime + ", "); - stringBuilder.Append("AutoBlock = " + _autoBlock + ", "); stringBuilder.Append("RewardSize = " + _rewardSize + ", "); - stringBuilder.Append("Warmup = " + _warmup + ", "); - stringBuilder.Append("NoResponseTrialAddition = " + _noResponseTrialAddition + ", "); - stringBuilder.Append("RewardN = " + _rewardN + ", "); stringBuilder.Append("LickSpoutRetraction = " + _lickSpoutRetraction + ", "); stringBuilder.Append("TrialGenerator = " + _trialGenerator); return true; @@ -1376,377 +1102,141 @@ public override string ToString() } + /// + /// Motor axis available + /// + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + public enum Axis + { + + [System.Runtime.Serialization.EnumMemberAttribute(Value="0")] + None = 0, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="1")] + X = 1, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="2")] + Y1 = 2, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="3")] + Y2 = 3, + + [System.Runtime.Serialization.EnumMemberAttribute(Value="4")] + Z = 4, + } + + + /// + /// Axis configuration + /// [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [System.ComponentModel.DescriptionAttribute("Axis configuration")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AutoBlock + public partial class AxisConfiguration { - private AutoBlockAdvancedBlockAuto _advancedBlockAuto; + private Axis _axis; + + private int _stepAccelerationInterval; + + private int _stepInterval; + + private MicrostepResolution _microstepResolution; + + private int _maximumStepInterval; + + private MotorOperationMode _motorOperationMode; - private double _switchThr; + private double _maxLimit; - private int _pointsInARow; + private double _minLimit; - public AutoBlock() + public AxisConfiguration() { - _advancedBlockAuto = AutoBlockAdvancedBlockAuto.Now; - _switchThr = 0.5D; - _pointsInARow = 5; + _stepAccelerationInterval = 100; + _stepInterval = 100; + _microstepResolution = MicrostepResolution.Microstep8; + _maximumStepInterval = 2000; + _motorOperationMode = MotorOperationMode.Quiet; + _maxLimit = 25D; + _minLimit = -0.01D; } - protected AutoBlock(AutoBlock other) + protected AxisConfiguration(AxisConfiguration other) { - _advancedBlockAuto = other._advancedBlockAuto; - _switchThr = other._switchThr; - _pointsInARow = other._pointsInARow; + _axis = other._axis; + _stepAccelerationInterval = other._stepAccelerationInterval; + _stepInterval = other._stepInterval; + _microstepResolution = other._microstepResolution; + _maximumStepInterval = other._maximumStepInterval; + _motorOperationMode = other._motorOperationMode; + _maxLimit = other._maxLimit; + _minLimit = other._minLimit; } - [Newtonsoft.Json.JsonPropertyAttribute("advanced_block_auto")] - public AutoBlockAdvancedBlockAuto AdvancedBlockAuto + [Newtonsoft.Json.JsonPropertyAttribute("axis", Required=Newtonsoft.Json.Required.Always)] + public Axis Axis { get { - return _advancedBlockAuto; + return _axis; } set { - _advancedBlockAuto = value; + _axis = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("switch_thr")] - public double SwitchThr + /// + /// Acceleration of the step interval in microseconds + /// + [Newtonsoft.Json.JsonPropertyAttribute("step_acceleration_interval")] + [System.ComponentModel.DescriptionAttribute("Acceleration of the step interval in microseconds")] + public int StepAccelerationInterval { get { - return _switchThr; + return _stepAccelerationInterval; } set { - _switchThr = value; + _stepAccelerationInterval = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("points_in_a_row")] - public int PointsInARow + /// + /// Step interval in microseconds. + /// + [Newtonsoft.Json.JsonPropertyAttribute("step_interval")] + [System.ComponentModel.DescriptionAttribute("Step interval in microseconds.")] + public int StepInterval { get { - return _pointsInARow; + return _stepInterval; } set { - _pointsInARow = value; + _stepInterval = value; } } - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AutoBlock(this))); - } - - public System.IObservable Generate(System.IObservable source) + [Newtonsoft.Json.JsonPropertyAttribute("microstep_resolution")] + public MicrostepResolution MicrostepResolution { - return System.Reactive.Linq.Observable.Select(source, _ => new AutoBlock(this)); + get + { + return _microstepResolution; + } + set + { + _microstepResolution = value; + } } - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AdvancedBlockAuto = " + _advancedBlockAuto + ", "); - stringBuilder.Append("SwitchThr = " + _switchThr + ", "); - stringBuilder.Append("PointsInARow = " + _pointsInARow); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AutoWater - { - - private AutoWaterType _autoWaterType; - - private double _multiplier; - - private int _unrewarded; - - private int _ignored; - - private bool _includeReward; - - public AutoWater() - { - _autoWaterType = AutoWaterType.Natural; - _multiplier = 0.8D; - _unrewarded = 200; - _ignored = 100; - _includeReward = false; - } - - protected AutoWater(AutoWater other) - { - _autoWaterType = other._autoWaterType; - _multiplier = other._multiplier; - _unrewarded = other._unrewarded; - _ignored = other._ignored; - _includeReward = other._includeReward; - } - - [Newtonsoft.Json.JsonPropertyAttribute("auto_water_type")] - public AutoWaterType AutoWaterType - { - get - { - return _autoWaterType; - } - set - { - _autoWaterType = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("multiplier")] - public double Multiplier - { - get - { - return _multiplier; - } - set - { - _multiplier = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("unrewarded")] - public int Unrewarded - { - get - { - return _unrewarded; - } - set - { - _unrewarded = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignored")] - public int Ignored - { - get - { - return _ignored; - } - set - { - _ignored = value; - } - } - - /// - /// Include auto water in total rewards. - /// - [Newtonsoft.Json.JsonPropertyAttribute("include_reward")] - [System.ComponentModel.DescriptionAttribute("Include auto water in total rewards.")] - public bool IncludeReward - { - get - { - return _includeReward; - } - set - { - _includeReward = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new AutoWater(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new AutoWater(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("AutoWaterType = " + _autoWaterType + ", "); - stringBuilder.Append("Multiplier = " + _multiplier + ", "); - stringBuilder.Append("Unrewarded = " + _unrewarded + ", "); - stringBuilder.Append("Ignored = " + _ignored + ", "); - stringBuilder.Append("IncludeReward = " + _includeReward); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - /// - /// Motor axis available - /// - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - public enum Axis - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="0")] - None = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="1")] - X = 1, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="2")] - Y1 = 2, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="3")] - Y2 = 3, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="4")] - Z = 4, - } - - - /// - /// Axis configuration - /// - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [System.ComponentModel.DescriptionAttribute("Axis configuration")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class AxisConfiguration - { - - private Axis _axis; - - private int _stepAccelerationInterval; - - private int _stepInterval; - - private MicrostepResolution _microstepResolution; - - private int _maximumStepInterval; - - private MotorOperationMode _motorOperationMode; - - private double _maxLimit; - - private double _minLimit; - - public AxisConfiguration() - { - _stepAccelerationInterval = 100; - _stepInterval = 100; - _microstepResolution = MicrostepResolution.Microstep8; - _maximumStepInterval = 2000; - _motorOperationMode = MotorOperationMode.Quiet; - _maxLimit = 25D; - _minLimit = -0.01D; - } - - protected AxisConfiguration(AxisConfiguration other) - { - _axis = other._axis; - _stepAccelerationInterval = other._stepAccelerationInterval; - _stepInterval = other._stepInterval; - _microstepResolution = other._microstepResolution; - _maximumStepInterval = other._maximumStepInterval; - _motorOperationMode = other._motorOperationMode; - _maxLimit = other._maxLimit; - _minLimit = other._minLimit; - } - - [Newtonsoft.Json.JsonPropertyAttribute("axis", Required=Newtonsoft.Json.Required.Always)] - public Axis Axis - { - get - { - return _axis; - } - set - { - _axis = value; - } - } - - /// - /// Acceleration of the step interval in microseconds - /// - [Newtonsoft.Json.JsonPropertyAttribute("step_acceleration_interval")] - [System.ComponentModel.DescriptionAttribute("Acceleration of the step interval in microseconds")] - public int StepAccelerationInterval - { - get - { - return _stepAccelerationInterval; - } - set - { - _stepAccelerationInterval = value; - } - } - - /// - /// Step interval in microseconds. - /// - [Newtonsoft.Json.JsonPropertyAttribute("step_interval")] - [System.ComponentModel.DescriptionAttribute("Step interval in microseconds.")] - public int StepInterval - { - get - { - return _stepInterval; - } - set - { - _stepInterval = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("microstep_resolution")] - public MicrostepResolution MicrostepResolution - { - get - { - return _microstepResolution; - } - set - { - _microstepResolution = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("maximum_step_interval")] - public int MaximumStepInterval + [Newtonsoft.Json.JsonPropertyAttribute("maximum_step_interval")] + public int MaximumStepInterval { get { @@ -1910,180 +1400,64 @@ public BehaviorStabilityParametersBehaviorEvaluationMode BehaviorEvaluationMode { get { - return _behaviorEvaluationMode; - } - set - { - _behaviorEvaluationMode = value; - } - } - - /// - /// Fraction scaling reward-probability difference for behavior. - /// - [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_fraction")] - [System.ComponentModel.DescriptionAttribute("Fraction scaling reward-probability difference for behavior.")] - public double BehaviorStabilityFraction - { - get - { - return _behaviorStabilityFraction; - } - set - { - _behaviorStabilityFraction = value; - } - } - - /// - /// Minimum number of consecutive trials satisfying the behavioral stability fraction. - /// - [Newtonsoft.Json.JsonPropertyAttribute("min_consecutive_stable_trials")] - [System.ComponentModel.DescriptionAttribute("Minimum number of consecutive trials satisfying the behavioral stability fraction" + - ".")] - public int MinConsecutiveStableTrials - { - get - { - return _minConsecutiveStableTrials; - } - set - { - _minConsecutiveStableTrials = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new BehaviorStabilityParameters(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new BehaviorStabilityParameters(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("BehaviorEvaluationMode = " + _behaviorEvaluationMode + ", "); - stringBuilder.Append("BehaviorStabilityFraction = " + _behaviorStabilityFraction + ", "); - stringBuilder.Append("MinConsecutiveStableTrials = " + _minConsecutiveStableTrials); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class BlockParameters - { - - private int _min; - - private int _max; - - private int _beta; - - private int _minReward; - - public BlockParameters() - { - _min = 20; - _max = 60; - _beta = 20; - _minReward = 1; - } - - protected BlockParameters(BlockParameters other) - { - _min = other._min; - _max = other._max; - _beta = other._beta; - _minReward = other._minReward; - } - - [Newtonsoft.Json.JsonPropertyAttribute("min")] - public int Min - { - get - { - return _min; - } - set - { - _min = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("max")] - public int Max - { - get - { - return _max; + return _behaviorEvaluationMode; } set { - _max = value; + _behaviorEvaluationMode = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("beta")] - public int Beta + /// + /// Fraction scaling reward-probability difference for behavior. + /// + [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_fraction")] + [System.ComponentModel.DescriptionAttribute("Fraction scaling reward-probability difference for behavior.")] + public double BehaviorStabilityFraction { get { - return _beta; + return _behaviorStabilityFraction; } set { - _beta = value; + _behaviorStabilityFraction = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("min_reward")] - public int MinReward + /// + /// Minimum number of consecutive trials satisfying the behavioral stability fraction. + /// + [Newtonsoft.Json.JsonPropertyAttribute("min_consecutive_stable_trials")] + [System.ComponentModel.DescriptionAttribute("Minimum number of consecutive trials satisfying the behavioral stability fraction" + + ".")] + public int MinConsecutiveStableTrials { get { - return _minReward; + return _minConsecutiveStableTrials; } set { - _minReward = value; + _minConsecutiveStableTrials = value; } } - public System.IObservable Generate() + public System.IObservable Generate() { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new BlockParameters(this))); + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new BehaviorStabilityParameters(this))); } - public System.IObservable Generate(System.IObservable source) + public System.IObservable Generate(System.IObservable source) { - return System.Reactive.Linq.Observable.Select(source, _ => new BlockParameters(this)); + return System.Reactive.Linq.Observable.Select(source, _ => new BehaviorStabilityParameters(this)); } protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) { - stringBuilder.Append("Min = " + _min + ", "); - stringBuilder.Append("Max = " + _max + ", "); - stringBuilder.Append("Beta = " + _beta + ", "); - stringBuilder.Append("MinReward = " + _minReward); + stringBuilder.Append("BehaviorEvaluationMode = " + _behaviorEvaluationMode + ", "); + stringBuilder.Append("BehaviorStabilityFraction = " + _behaviorStabilityFraction + ", "); + stringBuilder.Append("MinConsecutiveStableTrials = " + _minConsecutiveStableTrials); return true; } @@ -2738,104 +2112,6 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class DelayPeriod - { - - private double _min; - - private double _max; - - private double _beta; - - public DelayPeriod() - { - _min = 0D; - _max = 1D; - _beta = 1D; - } - - protected DelayPeriod(DelayPeriod other) - { - _min = other._min; - _max = other._max; - _beta = other._beta; - } - - [Newtonsoft.Json.JsonPropertyAttribute("min")] - public double Min - { - get - { - return _min; - } - set - { - _min = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("max")] - public double Max - { - get - { - return _max; - } - set - { - _max = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("beta")] - public double Beta - { - get - { - return _beta; - } - set - { - _beta = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new DelayPeriod(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new DelayPeriod(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("Min = " + _min + ", "); - stringBuilder.Append("Max = " + _max + ", "); - stringBuilder.Append("Beta = " + _beta); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - /// /// An exponential probability distribution. /// @@ -3881,206 +3157,58 @@ public string SerialNumber } /// - /// Device port name - /// - [Newtonsoft.Json.JsonPropertyAttribute("port_name", Required=Newtonsoft.Json.Required.Always)] - [System.ComponentModel.DescriptionAttribute("Device port name")] - public string PortName - { - get - { - return _portName; - } - set - { - _portName = value; - } - } - - /// - /// Connected clock outputs - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("connected_clock_outputs")] - [System.ComponentModel.DescriptionAttribute("Connected clock outputs")] - public System.Collections.Generic.List ConnectedClockOutputs - { - get - { - return _connectedClockOutputs; - } - set - { - _connectedClockOutputs = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new HarpWhiteRabbit(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new HarpWhiteRabbit(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("DeviceType = " + _deviceType + ", "); - stringBuilder.Append("Calibration = " + _calibration + ", "); - stringBuilder.Append("WhoAmI = " + _whoAmI + ", "); - stringBuilder.Append("SerialNumber = " + _serialNumber + ", "); - stringBuilder.Append("PortName = " + _portName + ", "); - stringBuilder.Append("ConnectedClockOutputs = " + _connectedClockOutputs); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class IntegrationTestTrialGeneratorSpec : TrialGeneratorSpec - { - - public IntegrationTestTrialGeneratorSpec() - { - } - - protected IntegrationTestTrialGeneratorSpec(IntegrationTestTrialGeneratorSpec other) : - base(other) - { - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new IntegrationTestTrialGeneratorSpec(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new IntegrationTestTrialGeneratorSpec(this)); - } - - protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - return base.PrintMembers(stringBuilder); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class InterTrialInterval - { - - private double _min; - - private double _max; - - private double _beta; - - private double _increase; - - public InterTrialInterval() - { - _min = 1D; - _max = 8D; - _beta = 2D; - _increase = 0D; - } - - protected InterTrialInterval(InterTrialInterval other) - { - _min = other._min; - _max = other._max; - _beta = other._beta; - _increase = other._increase; - } - - [Newtonsoft.Json.JsonPropertyAttribute("min")] - public double Min - { - get - { - return _min; - } - set - { - _min = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("max")] - public double Max - { - get - { - return _max; - } - set - { - _max = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("beta")] - public double Beta + /// Device port name + /// + [Newtonsoft.Json.JsonPropertyAttribute("port_name", Required=Newtonsoft.Json.Required.Always)] + [System.ComponentModel.DescriptionAttribute("Device port name")] + public string PortName { get { - return _beta; + return _portName; } set { - _beta = value; + _portName = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("increase")] - public double Increase + /// + /// Connected clock outputs + /// + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("connected_clock_outputs")] + [System.ComponentModel.DescriptionAttribute("Connected clock outputs")] + public System.Collections.Generic.List ConnectedClockOutputs { get { - return _increase; + return _connectedClockOutputs; } set { - _increase = value; + _connectedClockOutputs = value; } } - public System.IObservable Generate() + public System.IObservable Generate() { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new InterTrialInterval(this))); + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new HarpWhiteRabbit(this))); } - public System.IObservable Generate(System.IObservable source) + public System.IObservable Generate(System.IObservable source) { - return System.Reactive.Linq.Observable.Select(source, _ => new InterTrialInterval(this)); + return System.Reactive.Linq.Observable.Select(source, _ => new HarpWhiteRabbit(this)); } protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) { - stringBuilder.Append("Min = " + _min + ", "); - stringBuilder.Append("Max = " + _max + ", "); - stringBuilder.Append("Beta = " + _beta + ", "); - stringBuilder.Append("Increase = " + _increase); + stringBuilder.Append("DeviceType = " + _deviceType + ", "); + stringBuilder.Append("Calibration = " + _calibration + ", "); + stringBuilder.Append("WhoAmI = " + _whoAmI + ", "); + stringBuilder.Append("SerialNumber = " + _serialNumber + ", "); + stringBuilder.Append("PortName = " + _portName + ", "); + stringBuilder.Append("ConnectedClockOutputs = " + _connectedClockOutputs); return true; } @@ -4099,6 +3227,38 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class IntegrationTestTrialGeneratorSpec : TrialGeneratorSpec + { + + public IntegrationTestTrialGeneratorSpec() + { + } + + protected IntegrationTestTrialGeneratorSpec(IntegrationTestTrialGeneratorSpec other) : + base(other) + { + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new IntegrationTestTrialGeneratorSpec(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new IntegrationTestTrialGeneratorSpec(this)); + } + + protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + return base.PrintMembers(stringBuilder); + } + } + + /// /// Represents a position in the manipulator coordinate system /// @@ -4380,377 +3540,129 @@ public enum MotorOperationMode [System.Runtime.Serialization.EnumMemberAttribute(Value="0")] Quiet = 0, - [System.Runtime.Serialization.EnumMemberAttribute(Value="1")] - Dynamic = 1, - } - - - /// - /// Represents a rectangle defined by its top-left corner, width, and height. - /// - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [System.ComponentModel.DescriptionAttribute("Represents a rectangle defined by its top-left corner, width, and height.")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class Rect - { - - private int _x; - - private int _y; - - private int _width; - - private int _height; - - public Rect() - { - _x = 0; - _y = 0; - _width = 0; - _height = 0; - } - - protected Rect(Rect other) - { - _x = other._x; - _y = other._y; - _width = other._width; - _height = other._height; - } - - /// - /// X coordinate of the top-left corner - /// - [Newtonsoft.Json.JsonPropertyAttribute("x")] - [System.ComponentModel.DescriptionAttribute("X coordinate of the top-left corner")] - public int X - { - get - { - return _x; - } - set - { - _x = value; - } - } - - /// - /// Y coordinate of the top-left corner - /// - [Newtonsoft.Json.JsonPropertyAttribute("y")] - [System.ComponentModel.DescriptionAttribute("Y coordinate of the top-left corner")] - public int Y - { - get - { - return _y; - } - set - { - _y = value; - } - } - - /// - /// Width of the rectangle - /// - [Newtonsoft.Json.JsonPropertyAttribute("width")] - [System.ComponentModel.DescriptionAttribute("Width of the rectangle")] - public int Width - { - get - { - return _width; - } - set - { - _width = value; - } - } - - /// - /// Height of the rectangle - /// - [Newtonsoft.Json.JsonPropertyAttribute("height")] - [System.ComponentModel.DescriptionAttribute("Height of the rectangle")] - public int Height - { - get - { - return _height; - } - set - { - _height = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new Rect(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new Rect(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("X = " + _x + ", "); - stringBuilder.Append("Y = " + _y + ", "); - stringBuilder.Append("Width = " + _width + ", "); - stringBuilder.Append("Height = " + _height); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class Response - { - - private double _responseTime; - - private double _rewardConsumeTime; - - public Response() - { - _responseTime = 1D; - _rewardConsumeTime = 3D; - } - - protected Response(Response other) - { - _responseTime = other._responseTime; - _rewardConsumeTime = other._rewardConsumeTime; - } - - [Newtonsoft.Json.JsonPropertyAttribute("response_time")] - public double ResponseTime - { - get - { - return _responseTime; - } - set - { - _responseTime = value; - } - } - - /// - /// Time of the no-lick period before trial end - /// - [Newtonsoft.Json.JsonPropertyAttribute("reward_consume_time")] - [System.ComponentModel.DescriptionAttribute("Time of the no-lick period before trial end")] - public double RewardConsumeTime - { - get - { - return _rewardConsumeTime; - } - set - { - _rewardConsumeTime = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new Response(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new Response(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("ResponseTime = " + _responseTime + ", "); - stringBuilder.Append("RewardConsumeTime = " + _rewardConsumeTime); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } + [System.Runtime.Serialization.EnumMemberAttribute(Value="1")] + Dynamic = 1, } + /// + /// Represents a rectangle defined by its top-left corner, width, and height. + /// [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [System.ComponentModel.DescriptionAttribute("Represents a rectangle defined by its top-left corner, width, and height.")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class RewardN + public partial class Rect { - private int _initialInactiveTrials; + private int _x; + + private int _y; + + private int _width; + + private int _height; - public RewardN() + public Rect() { - _initialInactiveTrials = 2; + _x = 0; + _y = 0; + _width = 0; + _height = 0; } - protected RewardN(RewardN other) + protected Rect(Rect other) { - _initialInactiveTrials = other._initialInactiveTrials; + _x = other._x; + _y = other._y; + _width = other._width; + _height = other._height; } /// - /// Initial N trials of the active side where no bait will be be given. + /// X coordinate of the top-left corner /// - [Newtonsoft.Json.JsonPropertyAttribute("initial_inactive_trials")] - [System.ComponentModel.DescriptionAttribute("Initial N trials of the active side where no bait will be be given.")] - public int InitialInactiveTrials + [Newtonsoft.Json.JsonPropertyAttribute("x")] + [System.ComponentModel.DescriptionAttribute("X coordinate of the top-left corner")] + public int X { get { - return _initialInactiveTrials; + return _x; } set { - _initialInactiveTrials = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new RewardN(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new RewardN(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("InitialInactiveTrials = " + _initialInactiveTrials); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); + _x = value; } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class RewardProbability - { - - private double _baseRewardSum; - - private int _family; - - private int _pairsN; - - public RewardProbability() - { - _baseRewardSum = 0.8D; - _family = 1; - _pairsN = 1; - } - - protected RewardProbability(RewardProbability other) - { - _baseRewardSum = other._baseRewardSum; - _family = other._family; - _pairsN = other._pairsN; } - [Newtonsoft.Json.JsonPropertyAttribute("base_reward_sum")] - public double BaseRewardSum + /// + /// Y coordinate of the top-left corner + /// + [Newtonsoft.Json.JsonPropertyAttribute("y")] + [System.ComponentModel.DescriptionAttribute("Y coordinate of the top-left corner")] + public int Y { get { - return _baseRewardSum; + return _y; } set { - _baseRewardSum = value; + _y = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("family")] - public int Family + /// + /// Width of the rectangle + /// + [Newtonsoft.Json.JsonPropertyAttribute("width")] + [System.ComponentModel.DescriptionAttribute("Width of the rectangle")] + public int Width { get { - return _family; + return _width; } set { - _family = value; + _width = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("pairs_n")] - public int PairsN + /// + /// Height of the rectangle + /// + [Newtonsoft.Json.JsonPropertyAttribute("height")] + [System.ComponentModel.DescriptionAttribute("Height of the rectangle")] + public int Height { get { - return _pairsN; + return _height; } set { - _pairsN = value; + _height = value; } } - public System.IObservable Generate() + public System.IObservable Generate() { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new RewardProbability(this))); + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new Rect(this))); } - public System.IObservable Generate(System.IObservable source) + public System.IObservable Generate(System.IObservable source) { - return System.Reactive.Linq.Observable.Select(source, _ => new RewardProbability(this)); + return System.Reactive.Linq.Observable.Select(source, _ => new Rect(this)); } protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) { - stringBuilder.Append("BaseRewardSum = " + _baseRewardSum + ", "); - stringBuilder.Append("Family = " + _family + ", "); - stringBuilder.Append("PairsN = " + _pairsN); + stringBuilder.Append("X = " + _x + ", "); + stringBuilder.Append("Y = " + _y + ", "); + stringBuilder.Append("Width = " + _width + ", "); + stringBuilder.Append("Height = " + _height); return true; } @@ -5169,8 +4081,8 @@ public partial class Session public Session() { - _aindBehaviorServicesPkgVersion = "0.13.0"; - _version = "0.13.0"; + _aindBehaviorServicesPkgVersion = "0.13.1"; + _version = "0.13.1"; _experimenter = new System.Collections.Generic.List(); _allowDirtyRepo = false; _skipHardwareValidation = false; @@ -7350,8 +6262,8 @@ public partial class VideoWriterFfmpeg : VideoWriter public VideoWriterFfmpeg() { _frameRate = 30; - _containerExtension = "mp4"; - _outputArguments = "-vf \"scale=out_color_matrix=bt709:out_range=full,format=bgr24,scale=out_range=full\" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p4 -rc vbr -cq 12 -b:v 0M -metadata author=\"Allen Institute for Neural Dynamics\" -maxrate 700M -bufsize 350M"; + _containerExtension = "mkv"; + _outputArguments = "-vf \"scale=out_range=full,setparams=range=full:colorspace=bt709:color_primaries=bt709:color_trc=linear\" -c:v h264_nvenc -pix_fmt yuv420p -color_range full -colorspace bt709 -color_trc linear -tune hq -preset p3 -rc vbr -cq 18 -b:v 0M -metadata author=\"Allen Institute for Neural Dynamics\" -maxrate 700M -bufsize 350M -f matroska -write_crc32 0"; _inputArguments = "-colorspace bt709 -color_primaries bt709 -color_range full -color_trc linear"; } @@ -7563,122 +6475,6 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class Warmup - { - - private int _minTrial; - - private double _maxChoiceRatioBias; - - private double _minFinishRatio; - - private int _windowsize; - - public Warmup() - { - _minTrial = 50; - _maxChoiceRatioBias = 0.1D; - _minFinishRatio = 0.8D; - _windowsize = 20; - } - - protected Warmup(Warmup other) - { - _minTrial = other._minTrial; - _maxChoiceRatioBias = other._maxChoiceRatioBias; - _minFinishRatio = other._minFinishRatio; - _windowsize = other._windowsize; - } - - [Newtonsoft.Json.JsonPropertyAttribute("min_trial")] - public int MinTrial - { - get - { - return _minTrial; - } - set - { - _minTrial = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("max_choice_ratio_bias")] - public double MaxChoiceRatioBias - { - get - { - return _maxChoiceRatioBias; - } - set - { - _maxChoiceRatioBias = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("min_finish_ratio")] - public double MinFinishRatio - { - get - { - return _minFinishRatio; - } - set - { - _minFinishRatio = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("windowsize")] - public int Windowsize - { - get - { - return _windowsize; - } - set - { - _windowsize = value; - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new Warmup(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new Warmup(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("MinTrial = " + _minTrial + ", "); - stringBuilder.Append("MaxChoiceRatioBias = " + _maxChoiceRatioBias + ", "); - stringBuilder.Append("MinFinishRatio = " + _minFinishRatio + ", "); - stringBuilder.Append("Windowsize = " + _windowsize); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - /// /// Represents a water valve calibration. /// @@ -7723,7 +6519,7 @@ protected WaterValveCalibration(WaterValveCalibration other) /// Date of the calibration /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("date")] + [Newtonsoft.Json.JsonPropertyAttribute("date", Required=Newtonsoft.Json.Required.Always)] [System.ComponentModel.DescriptionAttribute("Date of the calibration")] public System.DateTimeOffset Date { @@ -8073,48 +6869,6 @@ public override string ToString() } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum AindDynamicForagingTaskParametersRandomness - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Exponential")] - Exponential = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Even")] - Even = 1, - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum AutoBlockAdvancedBlockAuto - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="now")] - Now = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="once")] - Once = 1, - } - - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] - public enum AutoWaterType - { - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Natural")] - Natural = 0, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="Both")] - Both = 1, - - [System.Runtime.Serialization.EnumMemberAttribute(Value="High pro")] - HighPro = 2, - } - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Newtonsoft.Json.JsonConverter(typeof(Newtonsoft.Json.Converters.StringEnumConverter))] public enum BehaviorStabilityParametersBehaviorEvaluationMode @@ -8461,16 +7215,6 @@ public System.IObservable Process(System.IObservable(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8486,11 +7230,6 @@ public System.IObservable Process(System.IObservable(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8511,11 +7250,6 @@ public System.IObservable Process(System.IObservable(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8561,11 +7295,6 @@ public System.IObservable Process(System.IObservable(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8581,21 +7310,6 @@ public System.IObservable Process(System.IObservable source) return Process(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8681,11 +7395,6 @@ public System.IObservable Process(System.IObservable return Process(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -8716,17 +7425,13 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -8736,13 +7441,9 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -8760,7 +7461,6 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] diff --git a/src/aind_behavior_dynamic_foraging/task_logic/__init__.py b/src/aind_behavior_dynamic_foraging/task_logic/__init__.py index f3b2e8f..519bf3a 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/__init__.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/__init__.py @@ -10,78 +10,12 @@ from . import trial_models as trial_models from .trial_generators import IntegrationTestTrialGeneratorSpec, TrialGeneratorSpec -RANDOMNESSES = Literal["Exponential", "Even"] -AUTO_WATER_MODES = Literal["Natural", "Both", "High pro"] - - -class BlockParameters(BaseModel): - # Block length - min: int = Field(default=20, title="Block length (min)") - max: int = Field(default=60, title="Block length (max)") - beta: int = Field(default=20, title="Block length (beta)") - min_reward: int = Field(default=1, title="Minimal rewards in a block to switch") - - -class RewardProbability(BaseModel): - base_reward_sum: float = Field(default=0.8, title="Sum of p_reward") - family: int = Field(default=1, title="Reward family") # Should be explicit here - pairs_n: int = Field(default=1, title="Number of pairs") # Should be explicit here - - -class DelayPeriod(BaseModel): - min: float = Field(default=0.0, title="Delay period (min) ") - max: float = Field(default=1.0, title="Delay period (max) ") - beta: float = Field(default=1.0, title="Delay period (beta)") - - -class AutoWater(BaseModel): - auto_water_type: AUTO_WATER_MODES = Field(default="Natural", title="Auto water mode") - multiplier: float = Field(default=0.8, title="Multiplier for auto reward") - unrewarded: int = Field(default=200, title="Number of unrewarded trials before auto water") - ignored: int = Field(default=100, title="Number of ignored trials before auto water") - include_reward: bool = Field(default=False, description="Include auto water in total rewards.") - - -class InterTrialInterval(BaseModel): - min: float = Field(default=1.0, title="ITI (min)") - max: float = Field(default=8.0, title="ITI (max)") - beta: float = Field(default=2.0, title="ITI (beta)") - increase: float = Field(default=0.0, title="ITI increase") # TODO: not implemented in the GUI?? - - -class Response(BaseModel): - response_time: float = Field(default=1.0, title="Response time") - reward_consume_time: float = Field( - default=3.0, title="Reward consume time", description="Time of the no-lick period before trial end" - ) - - -class AutoBlock(BaseModel): - advanced_block_auto: Literal["now", "once"] = Field(default="now", title="Auto block mode") - switch_thr: float = Field(default=0.5, title="Switch threshold for auto block") - points_in_a_row: int = Field(default=5, title="Points in a row for auto block") - class RewardSize(BaseModel): right_value_volume: float = Field(default=3.00, title="Right reward size (uL)") left_value_volume: float = Field(default=3.00, title="Left reward size (uL)") -class Warmup(BaseModel): - min_trial: int = Field(default=50, title="Warmup finish criteria: minimal trials") - max_choice_ratio_bias: float = Field( - default=0.1, title="Warmup finish criteria: maximal choice ratio bias from 0.5" - ) - min_finish_ratio: float = Field(default=0.8, title="Warmup finish criteria: minimal finish ratio") - windowsize: int = Field(default=20, title="Warmup finish criteria: window size to compute the bias and ratio") - - -class RewardN(BaseModel): - initial_inactive_trials: int = Field( - default=2, description="Initial N trials of the active side where no bait will be be given." - ) - - # ==================== MAIN TASK LOGIC CLASSES ==================== @@ -94,32 +28,7 @@ class AindDynamicForagingTaskParameters(TaskParameters): and numerical updaters for dynamic parameter modification. """ - block_parameters: BlockParameters = Field( - default=BlockParameters(), description="Parameters describing block conditions." - ) - reward_probability: RewardProbability = Field( - default=RewardProbability(), description="Parameters describing reward_probability." - ) - uncoupled_reward: Optional[list[float]] = Field( - default=[0.1, 0.3, 0.7], title="Uncoupled reward", min_length=3, max_length=3 - ) # For uncoupled tasks only - randomness: RANDOMNESSES = Field(default="Exponential", title="Randomness mode") - delay_period: DelayPeriod = Field(default=DelayPeriod(), description="Parameters describing delay period.") - reward_delay: float = Field(default=0, title="Reward delay (sec)") - auto_water: Optional[AutoWater] = Field(default=None, description="Parameters describing auto water.") - inter_trial_interval: InterTrialInterval = Field( - default_factory=InterTrialInterval, validate_default=True, description="Parameters describing iti." - ) - response_time: Response = Field(default=Response(), description="Parameters describing response time.") - auto_block: Optional[AutoBlock] = Field( - default=None, description="Parameters describing auto advancement to next block." - ) reward_size: RewardSize = Field(default=RewardSize(), description="Parameters describing reward size.") - warmup: Optional[Warmup] = Field(default=None, description="Parameters describing warmup.") - no_response_trial_addition: bool = Field( - default=True, description="Add one trial to the block length on both lickspouts." - ) - reward_n: Optional[RewardN] = Field(default=None) lick_spout_retraction: Optional[bool] = Field(default=False, description="Lick spout retraction enabled.") trial_generator: TrialGeneratorSpec = Field( default=IntegrationTestTrialGeneratorSpec(), diff --git a/src/test_python.bonsai b/src/test_python.bonsai index c391c2d..94fa034 100644 --- a/src/test_python.bonsai +++ b/src/test_python.bonsai @@ -3,6 +3,7 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:py="clr-namespace:Bonsai.Scripting.Python;assembly=Bonsai.Scripting.Python" xmlns:rx="clr-namespace:Bonsai.Reactive;assembly=Bonsai.Core" + xmlns:io="clr-namespace:Bonsai.IO;assembly=Bonsai.System" xmlns:p1="clr-namespace:AindDynamicForagingDataSchema;assembly=Extensions" xmlns:p2="clr-namespace:;assembly=Extensions" xmlns="https://bonsai-rx.org/2018/workflow"> @@ -33,8 +34,27 @@ + + + + + + C:\Users\micah.woodard\Documents\GitHub\Aind.Behavior.DynamicForaging\local\DynamicForaging_AindDynamicForagingTaskLogic.json + + + + + - + + 1 + + + + TaskParameters + + + TrialGenerator @@ -88,18 +108,23 @@ - + - + - - - + + + + + + + + \ No newline at end of file From 50ea72281d3d9fcb4f2f07e0db4365fb2e08c332 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 13:58:22 -0800 Subject: [PATCH 12/21] reverts test_python --- src/test_python.bonsai | 37 ++++++------------------------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/src/test_python.bonsai b/src/test_python.bonsai index 94fa034..c391c2d 100644 --- a/src/test_python.bonsai +++ b/src/test_python.bonsai @@ -3,7 +3,6 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:py="clr-namespace:Bonsai.Scripting.Python;assembly=Bonsai.Scripting.Python" xmlns:rx="clr-namespace:Bonsai.Reactive;assembly=Bonsai.Core" - xmlns:io="clr-namespace:Bonsai.IO;assembly=Bonsai.System" xmlns:p1="clr-namespace:AindDynamicForagingDataSchema;assembly=Extensions" xmlns:p2="clr-namespace:;assembly=Extensions" xmlns="https://bonsai-rx.org/2018/workflow"> @@ -34,27 +33,8 @@ - - - - - - C:\Users\micah.woodard\Documents\GitHub\Aind.Behavior.DynamicForaging\local\DynamicForaging_AindDynamicForagingTaskLogic.json - - - - - - - 1 - - - - TaskParameters - - - TrialGenerator + @@ -108,23 +88,18 @@ - + - + + + - + - - - - - - - \ No newline at end of file From c90b53b9122fb138c3eae0fb1edc819927c87bd4 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 16:19:46 -0800 Subject: [PATCH 13/21] adds baiting logic --- .../coupled_trial_generator.py | 33 +++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 8300424..deb9dfe 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -121,6 +121,8 @@ class CoupledTrialGeneratorSpec(BaseTrialGeneratorSpecModel): [[6, 1], [3, 1], [1, 1]], ] + baiting: bool = Field(default=False, description="Whether uncollected rewards carry over to the next trial.") + def create_generator(self) -> "CoupledTrialGenerator": return CoupledTrialGenerator(self) @@ -143,6 +145,9 @@ def __init__(self, spec: CoupledTrialGeneratorSpec) -> None: self.trials_in_block = 0 self.start_time = datetime.now() + self.is_left_baited: bool = False + self.is_right_baited: bool = False + def next(self) -> Trial | None: """ Generate next trial @@ -160,10 +165,25 @@ def next(self) -> Trial | None: # determine iti and quiescent period duration iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) quiescent = draw_sample(self.spec.quiescent_duration_distribution) + + p_reward_left=self.block.left_reward_prob, + p_reward_right=self.block.right_reward_prob + + if self.spec.baiting: + random_numbers = np.random.random(2) + + is_left_baited = self.block.left_reward_prob > random_numbers[0] or self.is_left_baited + self.logger.debug(f"Left baited: {is_left_baited}") + p_reward_left = 1 if is_left_baited else p_reward_left + + is_right_baited = self.block.right_reward_prob > random_numbers[1] or self.is_right_baited + self.logger.debug(f"Right baited: {is_left_baited}") + p_reward_right = 1 if is_right_baited else p_reward_right + return Trial( - p_reward_left=self.block.left_reward_prob, - p_reward_right=self.block.right_reward_prob, + p_reward_left=p_reward_left, + p_reward_right=p_reward_right, reward_consumption_duration=self.spec.reward_consumption_duration, response_deadline_duration=self.spec.response_duration, quiescence_period_duration=quiescent, @@ -211,6 +231,15 @@ def update(self, outcome: TrialOutcome) -> None: self.reward_history.append(outcome.is_rewarded) self.trials_in_block += 1 + + if self.spec.baiting: + if outcome.is_right_choice: + self.logger.debug(f"Resesting right bait.") + self.is_right_baited = False + elif not outcome.is_right_choice: + self.logger.debug(f"Resesting left bait.") + self.is_left_baited = False + if self.spec.extend_block_on_no_response and outcome.is_right_choice is None: self.logger.info("Extending minimum block length due to ignored trial.") self.block.min_length += 1 From b5eefa59a2e14d53fed2831c837ade57773c9207 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 17:02:56 -0800 Subject: [PATCH 14/21] updates aind behavior service --- pyproject.toml | 2 +- .../curricula/coupled_baiting_2p3.py | 491 +++++++++++++++ .../curricula/metrics.py | 13 + .../curricula/uncoupled_baiting_2p3.py | 482 +++++++++++++++ .../uncoupled_no_baiting_2p3p1rwdDelay159.py | 558 ++++++++++++++++++ uv.lock | 10 +- 6 files changed, 1548 insertions(+), 8 deletions(-) create mode 100644 src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py create mode 100644 src/aind_behavior_dynamic_foraging/curricula/metrics.py create mode 100644 src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py create mode 100644 src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py diff --git a/pyproject.toml b/pyproject.toml index c8d95d7..b4d17b2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ version = "0.0.2rc9" readme = {file = "README.md", content-type = "text/markdown"} dependencies = [ - "aind_behavior_services>=0.13.1", + "aind_behavior_services@ git+https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git@v0.13.2rc1", "pydantic-settings", ] diff --git a/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py b/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py new file mode 100644 index 0000000..80c1e58 --- /dev/null +++ b/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py @@ -0,0 +1,491 @@ +from aind_behavior_curriculum import ( + Stage, + StageTransition, + create_curriculum, + Curriculum +) + +from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec +from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import ( + RewardProbabilityParameters, +) + +from typing import List, Literal +import numpy as np + +__version__ = "0.2.3" + +# --- Stages --- +s_stage_1_warmup = Stage( + name="stage_1_warmup", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup ON + warmup='on', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 20, 5] + block_min=10, + block_max=20, + block_beta=5, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.1, + delay_max=0.1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=4.0, + left_value_volume=4.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=3, + ignored=3, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, # Very long response time at the beginning + reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_1 = Stage( + name="stage_1", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 20, 5] + block_min=10, + block_max=20, + block_beta=5, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.1, + delay_max=0.1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=5, + ignored=5, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_2 = Stage( + name="stage_2", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8 --> 0.6, p_ratio = [1:0] -> [8:1] + base_reward_sum=0.6, + reward_family=1, + reward_pairs_n=1, + + # block length [10, 20, 5] --> [10, 40, 10] + block_min=10, + block_max=40, + block_beta=10, + block_min_reward=0, + + # ITI [1, 7, 3] --> [1, 10, 5] + iti_min=1, + iti_max=10, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.3, + delay_max=0.3, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=7, + ignored=7, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.6, # Increase auto block switch threshold: 0.5 --> 0.6 + points_in_a_row=5, # Auto stop on ignores-in-a-row starts to take effect + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=3, + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_3 = Stage( + name="stage_3", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.6 --> 0.45, p_ratio still [8:1] + base_reward_sum=0.45, + reward_family=1, + reward_pairs_n=1, + + # block length [10, 40, 10] --> [20, 60, 20] + block_min=20, + block_max=60, + block_beta=20, + block_min_reward=0, + + # ITI [2, 10, 5] --> [3, 15, 5] + iti_min=1, + iti_max=15, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.5, + delay_max=0.5, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.6, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=2, + reward_consume_time=1, # Very long response time at the beginning + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_final = Stage( + name="final", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.45, p_ratio = [8:1] --> [8:1], [6:1], [3:1], [1:1] + base_reward_sum=0.45, + reward_family=1, + reward_pairs_n=4, + + # block = [10, 20, 5] (mean ~ 33 trials) + block_min=20, + block_max=60, + block_beta=20, + block_min_reward=0, + + # ITI [1, 15, 5] --> [1, 30, 5] (mean ~ 6.0 s, not included 1-s no lick window before ITI start) + iti_min=1, + iti_max=30, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, # Turn off auto water + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block + switch_thr=0.6, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1, + reward_consume_time=3, + uncoupled_reward="", + ) + ) +) + +# graduated same is identical to final but an absorbing state +s_graduated = Stage( + name="graduated", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.45, p_ratio = [8:1] --> [8:1], [6:1], [3:1], [1:1] + base_reward_sum=0.45, + reward_family=1, + reward_pairs_n=4, + + # block = [10, 20, 5] (mean ~ 33 trials) + block_min=20, + block_max=60, + block_beta=20, + block_min_reward=0, + + # ITI [1, 15, 5] --> [1, 30, 5] (mean ~ 6.0 s, not included 1-s no lick window before ITI start) + iti_min=1, + iti_max=30, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, # Turn off auto water + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block + switch_thr=0.6, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1, + reward_consume_time=3, + uncoupled_reward="", + ) + ) +) + + +# --- STAGE TRANSITIONS --- + +# warmup +@StageTransition +def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 1 + + +@StageTransition +def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.finished_trials[-1] >= 200 and metrics.foraging_efficiency[-1] >= 0.6 + + +# stage 1 +@StageTransition +def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 + + +# stage 2 +@StageTransition +def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.65 and metrics.finished_trials[-1] >= 300 + + +@StageTransition +def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 + + +# stage 3 +@StageTransition +def st_stage_3_to_final(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.7 and metrics.finished_trials[-1] >= 400 + + +@StageTransition +def st_stage_3_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] < 0.65 or metrics.finished_trials[-1] < 300 + + +# stage final +@StageTransition +def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_total >= 10 and \ + metrics.session_at_current_stage >= 5 and \ + np.mean(metrics.finished_trials[-5:]) >= 450 and \ + np.mean(metrics.foraging_efficiency[-5:]) >= 0.7 + + +@StageTransition +def st_final_to_stage_3(metrics: DynamicForagingMetrics) -> bool: + return np.mean(metrics.foraging_efficiency[-5:]) < 0.60 or np.mean(metrics.finished_trials[-5:]) < 300 + +# --- Curriculum --- +def construct_coupled_baiting_1p0_curriculum() -> Curriculum: + cb_curriculum = create_curriculum("CoupledBaiting2p3Curriculum", __version__, [AindDynamicForagingTaskLogic])() + # add stages + cb_curriculum.add_stage(s_stage_1_warmup) + cb_curriculum.add_stage(s_stage_1) + cb_curriculum.add_stage(s_stage_2) + cb_curriculum.add_stage(s_stage_3) + cb_curriculum.add_stage(s_final) + cb_curriculum.add_stage(s_graduated) + + # add stage transitions + # warmup + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, + st_stage_1_warmup_to_stage_2) # add 2 first to take priority + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) + # stage 1 + cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) + # stage 2 + cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) + cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) + # stage 3 + cb_curriculum.add_stage_transition(s_stage_3, s_final, st_stage_3_to_final) + cb_curriculum.add_stage_transition(s_stage_3, s_stage_2, st_stage_3_to_stage_2) + # final + cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) + cb_curriculum.add_stage_transition(s_final, s_stage_3, st_final_to_stage_3) + + return cb_curriculum diff --git a/src/aind_behavior_dynamic_foraging/curricula/metrics.py b/src/aind_behavior_dynamic_foraging/curricula/metrics.py new file mode 100644 index 0000000..d20929e --- /dev/null +++ b/src/aind_behavior_dynamic_foraging/curricula/metrics.py @@ -0,0 +1,13 @@ +from aind_behavior_curriculum import Metrics + + +from typing import List + + +class DynamicForagingMetrics(Metrics): + """ Metrics for dynamic foraging + """ + foraging_efficiency: List[float] # Full history of foraging efficiency + finished_trials: List[int] # Full history of finished trials + session_total: int + session_at_current_stage: int diff --git a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py new file mode 100644 index 0000000..5ae52e4 --- /dev/null +++ b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py @@ -0,0 +1,482 @@ +from aind_behavior_curriculum import ( + Curriculum, + Stage, + StageTransition, + create_curriculum +) + +from aind_behavior_dynamic_foraging import ( + AindDynamicForagingTaskParameters, + AutoWaterMode, + AdvancedBlockMode, + AindDynamicForagingTaskLogic, + DynamicForagingMetrics +) + +from typing import List, Literal +import numpy as np + +__version__ = "0.2.3" + +# --- Stages --- +s_stage_1_warmup = Stage( + name="stage_1_warmup", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='on', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 30, 10] + block_min=10, + block_max=30, + block_beta=10, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.1, + delay_max=0.1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=4.0, + left_value_volume=4.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=3, + ignored=3, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, # Very long response time at the beginning + reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_1 = Stage( + name="stage_1", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 20, 5] + block_min=10, + block_max=30, + block_beta=10, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.1, + delay_max=0.1, + delay_beta=0, + + # Decrease water size to 2.0 from now on + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=5, + ignored=5, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, + reward_consume_time=1, # Very long response time at the beginning + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_2 = Stage( + name="stage_2", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_ratio [1:0] -> [8:1] + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + # block length [10, 30, 10] --> [20, 35, 20] + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + # ITI [1, 7, 3] --> [1, 10, 3] + iti_min=1, + iti_max=10, + iti_beta=3, + + delay_min=0.3, + delay_max=0.3, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=7, + ignored=7, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=3, # Decrease response time: 5 --> 3 + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_3 = Stage( + name="stage_3", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + # ITI [1, 10, 3] --> [1, 15, 3] + iti_min=1, + iti_max=15, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0.5, + delay_max=0.5, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Turn on auto water for the first day after switching to uncoupled task + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=2, # Decrease response time: 3 --> 2 + reward_consume_time=1, # Very long response time at the beginning + uncoupled_reward="0.1, 0.4, 0.7", + ) + ) +) + +s_final = Stage( + name="final", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + iti_min=1, + iti_max=30, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, # Turn off auto water + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1, + reward_consume_time=3, + uncoupled_reward="0.1, 0.4, 0.7", + ) + ) +) + +# graduated same is identical to final but an absorbing state +s_graduated = Stage( + name="graduated", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + iti_min=1, + iti_max=30, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + reward_delay=0.0, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, + switch_thr=0.5, + points_in_a_row=5, + + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1, + reward_consume_time=3, + uncoupled_reward="0.1, 0.4, 0.7", + ) + ) +) + + +# --- STAGE TRANSITIONS --- + +# warmup +@StageTransition +def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 1 + + +@StageTransition +def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 + + +# stage 1 +@StageTransition +def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 + + +# stage 2 +@StageTransition +def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.65 and\ + metrics.finished_trials[-1] >= 300 and metrics.session_at_current_stage >= 2 + + +@StageTransition +def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 + + +# stage 3 +@StageTransition +def st_stage_3_to_final(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 1 + + +# stage final +@StageTransition +def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_total >= 10 and \ + metrics.session_at_current_stage >= 5 and \ + np.mean(metrics.finished_trials[-5:]) >= 450 and \ + np.mean(metrics.foraging_efficiency[-5:]) >= 0.7 + + +@StageTransition +def st_final_to_stage_3(metrics: DynamicForagingMetrics) -> bool: + return np.mean(metrics.foraging_efficiency[-5:]) < 0.60 or np.mean(metrics.finished_trials[-5:]) < 300 + + +# --- Curriculum --- +def construct_uncoupled_baiting_2p3_curriculum() -> Curriculum: + + cb_curriculum = create_curriculum("UnCoupledBaiting2p3Curriculum", __version__, [AindDynamicForagingTaskLogic])() + + # add stages + cb_curriculum.add_stage(s_stage_1_warmup) + cb_curriculum.add_stage(s_stage_1) + cb_curriculum.add_stage(s_stage_2) + cb_curriculum.add_stage(s_stage_3) + cb_curriculum.add_stage(s_final) + cb_curriculum.add_stage(s_graduated) + + # add stage transitions + # warmup + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, st_stage_1_warmup_to_stage_2) # first to set priority + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) + # stage 1 + cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) + # stage 2 + cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) + cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) + # stage 3 + cb_curriculum.add_stage_transition(s_stage_3, s_final, st_stage_3_to_final) + # final + cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) + cb_curriculum.add_stage_transition(s_final, s_stage_3, st_final_to_stage_3) + + + return cb_curriculum + diff --git a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py new file mode 100644 index 0000000..897e257 --- /dev/null +++ b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py @@ -0,0 +1,558 @@ +from aind_behavior_curriculum import ( + Curriculum, + Stage, + StageTransition, + create_curriculum +) + +from aind_behavior_dynamic_foraging import ( + AindDynamicForagingTaskParameters, + AutoWaterMode, + AdvancedBlockMode, + AindDynamicForagingTaskLogic, + DynamicForagingMetrics +) + +from typing import List, Literal +import numpy as np + +__version__ = "0.2.3" + +# --- Stages --- +s_stage_1_warmup = Stage( + name="stage_1_warmup", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup ON + warmup='on', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 30, 10] + block_min=10, + block_max=30, + block_beta=10, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0, + delay_max=0, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.1, + right_value_volume=4.0, + left_value_volume=4.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=3, + ignored=3, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, # Very long response time at the beginning + reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_1 = Stage( + name="stage_1", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_sum = 0.8, p_ratio = [1:0] + base_reward_sum=0.8, + reward_family=3, + reward_pairs_n=1, + + # block = [10, 30, 10] + block_min=10, + block_max=30, + block_beta=10, + block_min_reward=0, + + # Small ITI at the beginning to better engage the animal + iti_min=1, + iti_max=7, + iti_beta=3, + + # Add a (fixed) small delay period at the beginning # TODO: automate delay period + delay_min=0, + delay_max=0, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.1, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=5, + ignored=5, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop; set stop_ignores to a large number at the beginning + max_trial=1000, + max_time=75, + auto_stop_ignore_win=20000, + auto_stop_ignore_ratio_threshold=1, + + # -- Miscs -- + response_time=5, + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_2 = Stage( + name="stage_2", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_ratio [1:0] -> [8:1] + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + # block length [10, 30, 10] --> [20, 35, 20] + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + # ITI [1, 7, 3] --> [1, 10, 3] + iti_min=1, + iti_max=10, + iti_beta=3, + + # Delay 0 --> 0.25 + delay_min=0.25, + delay_max=0.25, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.1, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=7, + ignored=7, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1.5, # Decrease response time: 5 --> 1.5 + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_3 = Stage( + name="stage_3", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + # p_ratio [1:0] -> [8:1] + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + # block length [10, 30, 10] --> [20, 35, 20] + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + # ITI [1, 7, 3] --> [1, 10, 3] + iti_min=1, + iti_max=10, + iti_beta=3, + + # Delay 0.5 --> 1.0 + delay_min=1, + delay_max=1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.1, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.NOW, + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1.5, # Decrease response time: 5 --> 1.5 + reward_consume_time=1, + uncoupled_reward="", # Only valid in uncoupled task + ) + ) +) + +s_stage_4 = Stage( + name="stage_4", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + # Final block length for uncoupled task + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + # ITI [1, 10, 3] --> [1, 15, 3] + iti_min=1, + iti_max=15, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + # Reward size and reward delay + reward_delay=0.15, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=True, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block + switch_thr=0.5, + points_in_a_row=5, + + # Auto stop + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1.5, + reward_consume_time=1, + uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task + ) + ) +) + +s_final = Stage( + name="final", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + iti_min=2, + iti_max=15, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + reward_delay=0.2, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, + switch_thr=0.5, + points_in_a_row=5, + + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1.5, + reward_consume_time=1, + uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task + ) + ) +) + +# graduated same is identical to final but an absorbing state +s_graduated = Stage( + name="graduated", + task=AindDynamicForagingTaskLogic( + task_parameters=AindDynamicForagingTaskParameters( + + # Warmup OFF + warmup='off', + warm_min_trial=50, + warm_max_choice_ratio_bias=0.1, + warm_min_finish_ratio=0.8, + warm_windowsize=20, + + base_reward_sum=0.8, + reward_family=1, + reward_pairs_n=1, + + block_min=20, + block_max=35, + block_beta=10, + block_min_reward=0, + + iti_min=2, + iti_max=15, + iti_beta=3, + + delay_min=1, + delay_max=1, + delay_beta=0, + + reward_delay=0.2, + right_value_volume=2.0, + left_value_volume=2.0, + + # -- Within session automation -- + # Auto water + auto_reward=False, + auto_water_type=AutoWaterMode.NATURAL, + unrewarded=10, + ignored=10, + multiplier=0.5, + + # Auto block + advanced_block_auto=AdvancedBlockMode.OFF, + switch_thr=0.5, + points_in_a_row=5, + + max_trial=1000, + max_time=75, + auto_stop_ignore_win=30, + auto_stop_ignore_ratio_threshold=.83, + + # -- Miscs -- + response_time=1.5, + reward_consume_time=1, + uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task + ) + ) +) + + +# --- STAGE TRANSITIONS --- + +# warmup +@StageTransition +def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 1 + + +@StageTransition +def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 + + +# stage 1 +@StageTransition +def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 + + +# stage 2 +@StageTransition +def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 3 + + +@StageTransition +def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: + return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 + + +# stage 3 +@StageTransition +def st_stage_3_to_stage_4(metrics: DynamicForagingMetrics) -> bool: + return metrics.finished_trials[-1] >= 300 and \ + metrics.foraging_efficiency[-1] >= 0.65 and \ + metrics.session_at_current_stage >= 3 + +@StageTransition +def st_stage_3_to_stage_2(metrics: DynamicForagingMetrics) -> bool: + return (metrics.finished_trials[-1] < 250 or metrics.foraging_efficiency[-1] < 0.50) and \ + metrics.session_at_current_stage >= 3 + +@StageTransition +def st_stage_4_to_final(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_at_current_stage >= 2 + +# stage final +@StageTransition +def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: + return metrics.session_total >= 10 and \ + metrics.session_at_current_stage >= 5 and \ + np.mean(metrics.finished_trials[-5:]) >= 400 and \ + np.mean(metrics.foraging_efficiency[-5:]) >= 0.65 + + +@StageTransition +def st_final_to_stage_4(metrics: DynamicForagingMetrics) -> bool: + return np.mean(metrics.finished_trials[-5:]) < 250 or np.mean(metrics.foraging_efficiency[-5:]) < 0.60 + +# --Curriculum-- +def construct_uncoupled_no_baiting_2p3p1_reward_delay_curriculum() -> Curriculum: + + cb_curriculum = create_curriculum("UncoupledNoBaiting2p3p1RewardDelayCurriculum", + __version__, + [AindDynamicForagingTaskLogic])() + + # add stages + cb_curriculum.add_stage(s_stage_1_warmup) + cb_curriculum.add_stage(s_stage_1) + cb_curriculum.add_stage(s_stage_2) + cb_curriculum.add_stage(s_stage_3) + cb_curriculum.add_stage(s_final) + cb_curriculum.add_stage(s_graduated) + + # add stage transitions + # warmup + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, st_stage_1_warmup_to_stage_2) + cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) + # stage 1 + cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) + # stage 2 + cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) + cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) + # stage 3 + cb_curriculum.add_stage_transition(s_stage_3, s_stage_4, st_stage_3_to_stage_4) + cb_curriculum.add_stage_transition(s_stage_3, s_stage_2, st_stage_3_to_stage_2) + # stage 4 + cb_curriculum.add_stage_transition(s_stage_4, s_final, st_stage_4_to_final) + # final + cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) + cb_curriculum.add_stage_transition(s_final, s_stage_4, st_final_to_stage_4) + + return cb_curriculum + diff --git a/uv.lock b/uv.lock index ae7cdbd..e0ffeb8 100644 --- a/uv.lock +++ b/uv.lock @@ -71,7 +71,7 @@ docs = [ [package.metadata] requires-dist = [ - { name = "aind-behavior-services", specifier = ">=0.13.1" }, + { name = "aind-behavior-services", git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=v0.13.2rc1" }, { name = "contraqctor", marker = "extra == 'data'", specifier = ">=0.5.3" }, { name = "pydantic-settings" }, ] @@ -94,8 +94,8 @@ docs = [ [[package]] name = "aind-behavior-services" -version = "0.13.1" -source = { registry = "https://pypi.org/simple" } +version = "0.13.2rc1" +source = { git = "https://github.com/AllenNeuralDynamics/Aind.Behavior.Services.git?rev=v0.13.2rc1#ff8accd8d1b09b358d665f7bf0fefaeaf7af6db9" } dependencies = [ { name = "aind-behavior-curriculum" }, { name = "gitpython" }, @@ -103,10 +103,6 @@ dependencies = [ { name = "pydantic" }, { name = "semver" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/40/0c/e72979c641d8d32fd4e00591c16f20a852fde10014385cf9f338ac57cb7b/aind_behavior_services-0.13.1.tar.gz", hash = "sha256:8c5db2a1694b17ba2ff8552d1331ee77114753de540b7c42edfe24930183c04a", size = 27466, upload-time = "2026-02-23T02:54:51.953Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/93/d53dc7c9f603ec87b7d5aa521f3ff09ff2376385f8a82ae9a62db6719373/aind_behavior_services-0.13.1-py3-none-any.whl", hash = "sha256:01a77ab3cc16849d3eaeaf3c541f5126d19d2a8df7fdec614dc4c895e21379f5", size = 37453, upload-time = "2026-02-23T02:54:50.823Z" }, -] [[package]] name = "alabaster" From c9f726f75086092b2823784ffceb1d8111a81b5d Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 17:06:00 -0800 Subject: [PATCH 15/21] removes curricula --- .../curricula/coupled_baiting_2p3.py | 491 --------------- .../curricula/metrics.py | 13 - .../curricula/uncoupled_baiting_2p3.py | 482 --------------- .../uncoupled_no_baiting_2p3p1rwdDelay159.py | 558 ------------------ .../coupled_trial_generator.py | 4 +- 5 files changed, 2 insertions(+), 1546 deletions(-) delete mode 100644 src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py delete mode 100644 src/aind_behavior_dynamic_foraging/curricula/metrics.py delete mode 100644 src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py delete mode 100644 src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py diff --git a/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py b/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py deleted file mode 100644 index 80c1e58..0000000 --- a/src/aind_behavior_dynamic_foraging/curricula/coupled_baiting_2p3.py +++ /dev/null @@ -1,491 +0,0 @@ -from aind_behavior_curriculum import ( - Stage, - StageTransition, - create_curriculum, - Curriculum -) - -from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec -from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import ( - RewardProbabilityParameters, -) - -from typing import List, Literal -import numpy as np - -__version__ = "0.2.3" - -# --- Stages --- -s_stage_1_warmup = Stage( - name="stage_1_warmup", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup ON - warmup='on', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 20, 5] - block_min=10, - block_max=20, - block_beta=5, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.1, - delay_max=0.1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=4.0, - left_value_volume=4.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=3, - ignored=3, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, # Very long response time at the beginning - reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_1 = Stage( - name="stage_1", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 20, 5] - block_min=10, - block_max=20, - block_beta=5, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.1, - delay_max=0.1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=5, - ignored=5, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_2 = Stage( - name="stage_2", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8 --> 0.6, p_ratio = [1:0] -> [8:1] - base_reward_sum=0.6, - reward_family=1, - reward_pairs_n=1, - - # block length [10, 20, 5] --> [10, 40, 10] - block_min=10, - block_max=40, - block_beta=10, - block_min_reward=0, - - # ITI [1, 7, 3] --> [1, 10, 5] - iti_min=1, - iti_max=10, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.3, - delay_max=0.3, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=7, - ignored=7, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.6, # Increase auto block switch threshold: 0.5 --> 0.6 - points_in_a_row=5, # Auto stop on ignores-in-a-row starts to take effect - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=3, - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_3 = Stage( - name="stage_3", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.6 --> 0.45, p_ratio still [8:1] - base_reward_sum=0.45, - reward_family=1, - reward_pairs_n=1, - - # block length [10, 40, 10] --> [20, 60, 20] - block_min=20, - block_max=60, - block_beta=20, - block_min_reward=0, - - # ITI [2, 10, 5] --> [3, 15, 5] - iti_min=1, - iti_max=15, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.5, - delay_max=0.5, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.6, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=2, - reward_consume_time=1, # Very long response time at the beginning - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_final = Stage( - name="final", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.45, p_ratio = [8:1] --> [8:1], [6:1], [3:1], [1:1] - base_reward_sum=0.45, - reward_family=1, - reward_pairs_n=4, - - # block = [10, 20, 5] (mean ~ 33 trials) - block_min=20, - block_max=60, - block_beta=20, - block_min_reward=0, - - # ITI [1, 15, 5] --> [1, 30, 5] (mean ~ 6.0 s, not included 1-s no lick window before ITI start) - iti_min=1, - iti_max=30, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, # Turn off auto water - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block - switch_thr=0.6, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1, - reward_consume_time=3, - uncoupled_reward="", - ) - ) -) - -# graduated same is identical to final but an absorbing state -s_graduated = Stage( - name="graduated", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.45, p_ratio = [8:1] --> [8:1], [6:1], [3:1], [1:1] - base_reward_sum=0.45, - reward_family=1, - reward_pairs_n=4, - - # block = [10, 20, 5] (mean ~ 33 trials) - block_min=20, - block_max=60, - block_beta=20, - block_min_reward=0, - - # ITI [1, 15, 5] --> [1, 30, 5] (mean ~ 6.0 s, not included 1-s no lick window before ITI start) - iti_min=1, - iti_max=30, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, # Turn off auto water - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block - switch_thr=0.6, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1, - reward_consume_time=3, - uncoupled_reward="", - ) - ) -) - - -# --- STAGE TRANSITIONS --- - -# warmup -@StageTransition -def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 1 - - -@StageTransition -def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.finished_trials[-1] >= 200 and metrics.foraging_efficiency[-1] >= 0.6 - - -# stage 1 -@StageTransition -def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 - - -# stage 2 -@StageTransition -def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.65 and metrics.finished_trials[-1] >= 300 - - -@StageTransition -def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 - - -# stage 3 -@StageTransition -def st_stage_3_to_final(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.7 and metrics.finished_trials[-1] >= 400 - - -@StageTransition -def st_stage_3_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] < 0.65 or metrics.finished_trials[-1] < 300 - - -# stage final -@StageTransition -def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_total >= 10 and \ - metrics.session_at_current_stage >= 5 and \ - np.mean(metrics.finished_trials[-5:]) >= 450 and \ - np.mean(metrics.foraging_efficiency[-5:]) >= 0.7 - - -@StageTransition -def st_final_to_stage_3(metrics: DynamicForagingMetrics) -> bool: - return np.mean(metrics.foraging_efficiency[-5:]) < 0.60 or np.mean(metrics.finished_trials[-5:]) < 300 - -# --- Curriculum --- -def construct_coupled_baiting_1p0_curriculum() -> Curriculum: - cb_curriculum = create_curriculum("CoupledBaiting2p3Curriculum", __version__, [AindDynamicForagingTaskLogic])() - # add stages - cb_curriculum.add_stage(s_stage_1_warmup) - cb_curriculum.add_stage(s_stage_1) - cb_curriculum.add_stage(s_stage_2) - cb_curriculum.add_stage(s_stage_3) - cb_curriculum.add_stage(s_final) - cb_curriculum.add_stage(s_graduated) - - # add stage transitions - # warmup - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, - st_stage_1_warmup_to_stage_2) # add 2 first to take priority - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) - # stage 1 - cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) - # stage 2 - cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) - cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) - # stage 3 - cb_curriculum.add_stage_transition(s_stage_3, s_final, st_stage_3_to_final) - cb_curriculum.add_stage_transition(s_stage_3, s_stage_2, st_stage_3_to_stage_2) - # final - cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) - cb_curriculum.add_stage_transition(s_final, s_stage_3, st_final_to_stage_3) - - return cb_curriculum diff --git a/src/aind_behavior_dynamic_foraging/curricula/metrics.py b/src/aind_behavior_dynamic_foraging/curricula/metrics.py deleted file mode 100644 index d20929e..0000000 --- a/src/aind_behavior_dynamic_foraging/curricula/metrics.py +++ /dev/null @@ -1,13 +0,0 @@ -from aind_behavior_curriculum import Metrics - - -from typing import List - - -class DynamicForagingMetrics(Metrics): - """ Metrics for dynamic foraging - """ - foraging_efficiency: List[float] # Full history of foraging efficiency - finished_trials: List[int] # Full history of finished trials - session_total: int - session_at_current_stage: int diff --git a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py deleted file mode 100644 index 5ae52e4..0000000 --- a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_baiting_2p3.py +++ /dev/null @@ -1,482 +0,0 @@ -from aind_behavior_curriculum import ( - Curriculum, - Stage, - StageTransition, - create_curriculum -) - -from aind_behavior_dynamic_foraging import ( - AindDynamicForagingTaskParameters, - AutoWaterMode, - AdvancedBlockMode, - AindDynamicForagingTaskLogic, - DynamicForagingMetrics -) - -from typing import List, Literal -import numpy as np - -__version__ = "0.2.3" - -# --- Stages --- -s_stage_1_warmup = Stage( - name="stage_1_warmup", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='on', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 30, 10] - block_min=10, - block_max=30, - block_beta=10, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.1, - delay_max=0.1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=4.0, - left_value_volume=4.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=3, - ignored=3, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, # Very long response time at the beginning - reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_1 = Stage( - name="stage_1", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 20, 5] - block_min=10, - block_max=30, - block_beta=10, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.1, - delay_max=0.1, - delay_beta=0, - - # Decrease water size to 2.0 from now on - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=5, - ignored=5, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, - reward_consume_time=1, # Very long response time at the beginning - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_2 = Stage( - name="stage_2", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_ratio [1:0] -> [8:1] - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - # block length [10, 30, 10] --> [20, 35, 20] - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - # ITI [1, 7, 3] --> [1, 10, 3] - iti_min=1, - iti_max=10, - iti_beta=3, - - delay_min=0.3, - delay_max=0.3, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=7, - ignored=7, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=3, # Decrease response time: 5 --> 3 - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_3 = Stage( - name="stage_3", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - # ITI [1, 10, 3] --> [1, 15, 3] - iti_min=1, - iti_max=15, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0.5, - delay_max=0.5, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Turn on auto water for the first day after switching to uncoupled task - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=2, # Decrease response time: 3 --> 2 - reward_consume_time=1, # Very long response time at the beginning - uncoupled_reward="0.1, 0.4, 0.7", - ) - ) -) - -s_final = Stage( - name="final", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - iti_min=1, - iti_max=30, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, # Turn off auto water - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1, - reward_consume_time=3, - uncoupled_reward="0.1, 0.4, 0.7", - ) - ) -) - -# graduated same is identical to final but an absorbing state -s_graduated = Stage( - name="graduated", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - iti_min=1, - iti_max=30, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - reward_delay=0.0, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, - switch_thr=0.5, - points_in_a_row=5, - - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1, - reward_consume_time=3, - uncoupled_reward="0.1, 0.4, 0.7", - ) - ) -) - - -# --- STAGE TRANSITIONS --- - -# warmup -@StageTransition -def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 1 - - -@StageTransition -def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 - - -# stage 1 -@StageTransition -def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 - - -# stage 2 -@StageTransition -def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.65 and\ - metrics.finished_trials[-1] >= 300 and metrics.session_at_current_stage >= 2 - - -@StageTransition -def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 - - -# stage 3 -@StageTransition -def st_stage_3_to_final(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 1 - - -# stage final -@StageTransition -def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_total >= 10 and \ - metrics.session_at_current_stage >= 5 and \ - np.mean(metrics.finished_trials[-5:]) >= 450 and \ - np.mean(metrics.foraging_efficiency[-5:]) >= 0.7 - - -@StageTransition -def st_final_to_stage_3(metrics: DynamicForagingMetrics) -> bool: - return np.mean(metrics.foraging_efficiency[-5:]) < 0.60 or np.mean(metrics.finished_trials[-5:]) < 300 - - -# --- Curriculum --- -def construct_uncoupled_baiting_2p3_curriculum() -> Curriculum: - - cb_curriculum = create_curriculum("UnCoupledBaiting2p3Curriculum", __version__, [AindDynamicForagingTaskLogic])() - - # add stages - cb_curriculum.add_stage(s_stage_1_warmup) - cb_curriculum.add_stage(s_stage_1) - cb_curriculum.add_stage(s_stage_2) - cb_curriculum.add_stage(s_stage_3) - cb_curriculum.add_stage(s_final) - cb_curriculum.add_stage(s_graduated) - - # add stage transitions - # warmup - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, st_stage_1_warmup_to_stage_2) # first to set priority - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) - # stage 1 - cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) - # stage 2 - cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) - cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) - # stage 3 - cb_curriculum.add_stage_transition(s_stage_3, s_final, st_stage_3_to_final) - # final - cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) - cb_curriculum.add_stage_transition(s_final, s_stage_3, st_final_to_stage_3) - - - return cb_curriculum - diff --git a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py b/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py deleted file mode 100644 index 897e257..0000000 --- a/src/aind_behavior_dynamic_foraging/curricula/uncoupled_no_baiting_2p3p1rwdDelay159.py +++ /dev/null @@ -1,558 +0,0 @@ -from aind_behavior_curriculum import ( - Curriculum, - Stage, - StageTransition, - create_curriculum -) - -from aind_behavior_dynamic_foraging import ( - AindDynamicForagingTaskParameters, - AutoWaterMode, - AdvancedBlockMode, - AindDynamicForagingTaskLogic, - DynamicForagingMetrics -) - -from typing import List, Literal -import numpy as np - -__version__ = "0.2.3" - -# --- Stages --- -s_stage_1_warmup = Stage( - name="stage_1_warmup", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup ON - warmup='on', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 30, 10] - block_min=10, - block_max=30, - block_beta=10, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0, - delay_max=0, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.1, - right_value_volume=4.0, - left_value_volume=4.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=3, - ignored=3, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, # Very long response time at the beginning - reward_consume_time=1, # Shorter RewardConsumeTime to increase the number of trials - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_1 = Stage( - name="stage_1", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_sum = 0.8, p_ratio = [1:0] - base_reward_sum=0.8, - reward_family=3, - reward_pairs_n=1, - - # block = [10, 30, 10] - block_min=10, - block_max=30, - block_beta=10, - block_min_reward=0, - - # Small ITI at the beginning to better engage the animal - iti_min=1, - iti_max=7, - iti_beta=3, - - # Add a (fixed) small delay period at the beginning # TODO: automate delay period - delay_min=0, - delay_max=0, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.1, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=5, - ignored=5, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop; set stop_ignores to a large number at the beginning - max_trial=1000, - max_time=75, - auto_stop_ignore_win=20000, - auto_stop_ignore_ratio_threshold=1, - - # -- Miscs -- - response_time=5, - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_2 = Stage( - name="stage_2", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_ratio [1:0] -> [8:1] - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - # block length [10, 30, 10] --> [20, 35, 20] - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - # ITI [1, 7, 3] --> [1, 10, 3] - iti_min=1, - iti_max=10, - iti_beta=3, - - # Delay 0 --> 0.25 - delay_min=0.25, - delay_max=0.25, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.1, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=7, - ignored=7, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1.5, # Decrease response time: 5 --> 1.5 - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_3 = Stage( - name="stage_3", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - # p_ratio [1:0] -> [8:1] - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - # block length [10, 30, 10] --> [20, 35, 20] - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - # ITI [1, 7, 3] --> [1, 10, 3] - iti_min=1, - iti_max=10, - iti_beta=3, - - # Delay 0.5 --> 1.0 - delay_min=1, - delay_max=1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.1, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.NOW, - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1.5, # Decrease response time: 5 --> 1.5 - reward_consume_time=1, - uncoupled_reward="", # Only valid in uncoupled task - ) - ) -) - -s_stage_4 = Stage( - name="stage_4", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - # Final block length for uncoupled task - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - # ITI [1, 10, 3] --> [1, 15, 3] - iti_min=1, - iti_max=15, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - # Reward size and reward delay - reward_delay=0.15, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=True, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, # Turn off auto block - switch_thr=0.5, - points_in_a_row=5, - - # Auto stop - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1.5, - reward_consume_time=1, - uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task - ) - ) -) - -s_final = Stage( - name="final", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - iti_min=2, - iti_max=15, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - reward_delay=0.2, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, - switch_thr=0.5, - points_in_a_row=5, - - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1.5, - reward_consume_time=1, - uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task - ) - ) -) - -# graduated same is identical to final but an absorbing state -s_graduated = Stage( - name="graduated", - task=AindDynamicForagingTaskLogic( - task_parameters=AindDynamicForagingTaskParameters( - - # Warmup OFF - warmup='off', - warm_min_trial=50, - warm_max_choice_ratio_bias=0.1, - warm_min_finish_ratio=0.8, - warm_windowsize=20, - - base_reward_sum=0.8, - reward_family=1, - reward_pairs_n=1, - - block_min=20, - block_max=35, - block_beta=10, - block_min_reward=0, - - iti_min=2, - iti_max=15, - iti_beta=3, - - delay_min=1, - delay_max=1, - delay_beta=0, - - reward_delay=0.2, - right_value_volume=2.0, - left_value_volume=2.0, - - # -- Within session automation -- - # Auto water - auto_reward=False, - auto_water_type=AutoWaterMode.NATURAL, - unrewarded=10, - ignored=10, - multiplier=0.5, - - # Auto block - advanced_block_auto=AdvancedBlockMode.OFF, - switch_thr=0.5, - points_in_a_row=5, - - max_trial=1000, - max_time=75, - auto_stop_ignore_win=30, - auto_stop_ignore_ratio_threshold=.83, - - # -- Miscs -- - response_time=1.5, - reward_consume_time=1, - uncoupled_reward="0.1, 0.5, 0.9", # Only valid in uncoupled task - ) - ) -) - - -# --- STAGE TRANSITIONS --- - -# warmup -@StageTransition -def st_stage_1_warmup_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 1 - - -@StageTransition -def st_stage_1_warmup_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 - - -# stage 1 -@StageTransition -def st_stage_1_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] >= 0.6 and metrics.finished_trials[-1] >= 200 - - -# stage 2 -@StageTransition -def st_stage_2_to_stage_3(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 3 - - -@StageTransition -def st_stage_2_to_stage_1(metrics: DynamicForagingMetrics) -> bool: - return metrics.foraging_efficiency[-1] < 0.55 or metrics.finished_trials[-1] < 200 - - -# stage 3 -@StageTransition -def st_stage_3_to_stage_4(metrics: DynamicForagingMetrics) -> bool: - return metrics.finished_trials[-1] >= 300 and \ - metrics.foraging_efficiency[-1] >= 0.65 and \ - metrics.session_at_current_stage >= 3 - -@StageTransition -def st_stage_3_to_stage_2(metrics: DynamicForagingMetrics) -> bool: - return (metrics.finished_trials[-1] < 250 or metrics.foraging_efficiency[-1] < 0.50) and \ - metrics.session_at_current_stage >= 3 - -@StageTransition -def st_stage_4_to_final(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_at_current_stage >= 2 - -# stage final -@StageTransition -def st_final_to_graduated(metrics: DynamicForagingMetrics) -> bool: - return metrics.session_total >= 10 and \ - metrics.session_at_current_stage >= 5 and \ - np.mean(metrics.finished_trials[-5:]) >= 400 and \ - np.mean(metrics.foraging_efficiency[-5:]) >= 0.65 - - -@StageTransition -def st_final_to_stage_4(metrics: DynamicForagingMetrics) -> bool: - return np.mean(metrics.finished_trials[-5:]) < 250 or np.mean(metrics.foraging_efficiency[-5:]) < 0.60 - -# --Curriculum-- -def construct_uncoupled_no_baiting_2p3p1_reward_delay_curriculum() -> Curriculum: - - cb_curriculum = create_curriculum("UncoupledNoBaiting2p3p1RewardDelayCurriculum", - __version__, - [AindDynamicForagingTaskLogic])() - - # add stages - cb_curriculum.add_stage(s_stage_1_warmup) - cb_curriculum.add_stage(s_stage_1) - cb_curriculum.add_stage(s_stage_2) - cb_curriculum.add_stage(s_stage_3) - cb_curriculum.add_stage(s_final) - cb_curriculum.add_stage(s_graduated) - - # add stage transitions - # warmup - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_2, st_stage_1_warmup_to_stage_2) - cb_curriculum.add_stage_transition(s_stage_1_warmup, s_stage_1, st_stage_1_warmup_to_stage_1) - # stage 1 - cb_curriculum.add_stage_transition(s_stage_1, s_stage_2, st_stage_1_to_stage_2) - # stage 2 - cb_curriculum.add_stage_transition(s_stage_2, s_stage_3, st_stage_2_to_stage_3) - cb_curriculum.add_stage_transition(s_stage_2, s_stage_1, st_stage_2_to_stage_1) - # stage 3 - cb_curriculum.add_stage_transition(s_stage_3, s_stage_4, st_stage_3_to_stage_4) - cb_curriculum.add_stage_transition(s_stage_3, s_stage_2, st_stage_3_to_stage_2) - # stage 4 - cb_curriculum.add_stage_transition(s_stage_4, s_final, st_stage_4_to_final) - # final - cb_curriculum.add_stage_transition(s_final, s_graduated, st_final_to_graduated) - cb_curriculum.add_stage_transition(s_final, s_stage_4, st_final_to_stage_4) - - return cb_curriculum - diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index deb9dfe..2e3495a 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -234,10 +234,10 @@ def update(self, outcome: TrialOutcome) -> None: if self.spec.baiting: if outcome.is_right_choice: - self.logger.debug(f"Resesting right bait.") + self.logger.debug("Resesting right bait.") self.is_right_baited = False elif not outcome.is_right_choice: - self.logger.debug(f"Resesting left bait.") + self.logger.debug("Resesting left bait.") self.is_left_baited = False if self.spec.extend_block_on_no_response and outcome.is_right_choice is None: From b39098b79bbfe5c3f773b14aec5231907a79f60c Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Wed, 25 Feb 2026 17:09:16 -0800 Subject: [PATCH 16/21] regenerates --- schema/aind_behavior_dynamic_foraging.json | 31 +++++++---- .../AindBehaviorDynamicForaging.Generated.cs | 54 ++++++++++--------- 2 files changed, 52 insertions(+), 33 deletions(-) diff --git a/schema/aind_behavior_dynamic_foraging.json b/schema/aind_behavior_dynamic_foraging.json index 3f4f373..26bb4fb 100644 --- a/schema/aind_behavior_dynamic_foraging.json +++ b/schema/aind_behavior_dynamic_foraging.json @@ -3,7 +3,7 @@ "AindDynamicForagingRig": { "properties": { "aind_behavior_services_pkg_version": { - "default": "0.13.1", + "default": "0.13.2-rc1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" @@ -195,7 +195,7 @@ "title": "Rng Seed" }, "aind_behavior_services_pkg_version": { - "default": "0.13.1", + "default": "0.13.2-rc1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" @@ -902,6 +902,12 @@ "items": {}, "title": "Reward Family", "type": "array" + }, + "baiting": { + "default": false, + "description": "Whether uncollected rewards carry over to the next trial.", + "title": "Baiting", + "type": "boolean" } }, "title": "CoupledTrialGeneratorSpec", @@ -1539,14 +1545,14 @@ "Session": { "properties": { "aind_behavior_services_pkg_version": { - "default": "0.13.1", + "default": "0.13.2-rc1", "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", "title": "aind_behavior_services package version", "type": "string" }, "version": { - "const": "0.13.1", - "default": "0.13.1", + "const": "0.13.2-rc1", + "default": "0.13.2-rc1", "title": "Version", "type": "string" }, @@ -2525,10 +2531,18 @@ "description": "Represents a water valve calibration.", "properties": { "date": { + "default": null, "description": "Date of the calibration", - "format": "date-time", - "title": "Date", - "type": "string" + "oneOf": [ + { + "format": "date-time", + "type": "string" + }, + { + "type": "null" + } + ], + "title": "Date" }, "measurements": { "default": [], @@ -2601,7 +2615,6 @@ } }, "required": [ - "date", "slope", "offset" ], diff --git a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs index 4b69fb1..b10b6f9 100644 --- a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs +++ b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs @@ -49,7 +49,7 @@ public partial class AindDynamicForagingRig public AindDynamicForagingRig() { - _aindBehaviorServicesPkgVersion = "0.13.1"; + _aindBehaviorServicesPkgVersion = "0.13.2-rc1"; _version = "0.0.2-rc9"; _triggeredCameraController = new CameraControllerSpinnakerCamera(); _harpBehavior = new HarpBehavior(); @@ -592,7 +592,7 @@ public partial class AindDynamicForagingTaskParameters public AindDynamicForagingTaskParameters() { - _aindBehaviorServicesPkgVersion = "0.13.1"; + _aindBehaviorServicesPkgVersion = "0.13.2-rc1"; _rewardSize = new RewardSize(); _lickSpoutRetraction = false; _trialGenerator = new TrialGeneratorSpec(); @@ -1830,6 +1830,8 @@ public partial class CoupledTrialGeneratorSpec : TrialGeneratorSpec private System.Collections.Generic.List _rewardFamily; + private bool _baiting; + public CoupledTrialGeneratorSpec() { _quiescentDurationDistribution = new object(); @@ -1844,6 +1846,7 @@ public CoupledTrialGeneratorSpec() _kernelSize = 2; _rewardProbabilityParameters = new RewardProbabilityParameters(); _rewardFamily = new System.Collections.Generic.List(); + _baiting = false; } protected CoupledTrialGeneratorSpec(CoupledTrialGeneratorSpec other) : @@ -1861,6 +1864,7 @@ protected CoupledTrialGeneratorSpec(CoupledTrialGeneratorSpec other) : _kernelSize = other._kernelSize; _rewardProbabilityParameters = other._rewardProbabilityParameters; _rewardFamily = other._rewardFamily; + _baiting = other._baiting; } /// @@ -2059,6 +2063,23 @@ public System.Collections.Generic.List RewardFamily } } + /// + /// Whether uncollected rewards carry over to the next trial. + /// + [Newtonsoft.Json.JsonPropertyAttribute("baiting")] + [System.ComponentModel.DescriptionAttribute("Whether uncollected rewards carry over to the next trial.")] + public bool Baiting + { + get + { + return _baiting; + } + set + { + _baiting = value; + } + } + public System.IObservable Generate() { return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new CoupledTrialGeneratorSpec(this))); @@ -2086,7 +2107,8 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) stringBuilder.Append("ExtendBlockOnNoResponse = " + _extendBlockOnNoResponse + ", "); stringBuilder.Append("KernelSize = " + _kernelSize + ", "); stringBuilder.Append("RewardProbabilityParameters = " + _rewardProbabilityParameters + ", "); - stringBuilder.Append("RewardFamily = " + _rewardFamily); + stringBuilder.Append("RewardFamily = " + _rewardFamily + ", "); + stringBuilder.Append("Baiting = " + _baiting); return true; } } @@ -4067,8 +4089,8 @@ public partial class Session public Session() { - _aindBehaviorServicesPkgVersion = "0.13.1"; - _version = "0.13.1"; + _aindBehaviorServicesPkgVersion = "0.13.2-rc1"; + _version = "0.13.2-rc1"; _experimenter = new System.Collections.Generic.List(); _allowDirtyRepo = false; _skipHardwareValidation = false; @@ -6589,7 +6611,7 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) public partial class WaterValveCalibration { - private System.DateTimeOffset _date; + private System.DateTimeOffset? _date; private System.Collections.Generic.List _measurements; @@ -6623,9 +6645,9 @@ protected WaterValveCalibration(WaterValveCalibration other) /// Date of the calibration /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("date", Required=Newtonsoft.Json.Required.Always)] + [Newtonsoft.Json.JsonPropertyAttribute("date")] [System.ComponentModel.DescriptionAttribute("Date of the calibration")] - public System.DateTimeOffset Date + public System.DateTimeOffset? Date { get { @@ -6637,22 +6659,6 @@ public System.DateTimeOffset Date } } - [Newtonsoft.Json.JsonIgnoreAttribute()] - [System.ComponentModel.BrowsableAttribute(false)] - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - [System.Xml.Serialization.XmlElementAttribute("Date")] - public string DateXml - { - get - { - return _date.ToString("o"); - } - set - { - _date = System.DateTimeOffset.Parse(value); - } - } - /// /// List of measurements /// From aaa1be235b082b1df1f88842c394db9031f0e457 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Thu, 26 Feb 2026 07:58:28 -0800 Subject: [PATCH 17/21] removes comma --- .../task_logic/trial_generators/coupled_trial_generator.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 2e3495a..57b7e4d 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -166,9 +166,9 @@ def next(self) -> Trial | None: iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) quiescent = draw_sample(self.spec.quiescent_duration_distribution) - p_reward_left=self.block.left_reward_prob, + p_reward_left=self.block.left_reward_prob p_reward_right=self.block.right_reward_prob - + if self.spec.baiting: random_numbers = np.random.random(2) @@ -180,7 +180,6 @@ def next(self) -> Trial | None: self.logger.debug(f"Right baited: {is_left_baited}") p_reward_right = 1 if is_right_baited else p_reward_right - return Trial( p_reward_left=p_reward_left, p_reward_right=p_reward_right, From e5c995e63cdbd95c35a3da61bcf5a36d10221260 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 27 Feb 2026 10:37:38 -0800 Subject: [PATCH 18/21] adds block based trial generator base class --- examples/task_logic.py | 2 +- schema/aind_behavior_dynamic_foraging.json | 136 ++--- .../AindBehaviorDynamicForaging.Generated.cs | 474 +++++++++--------- .../block_based_trial_generator.py | 216 ++++++++ .../coupled_trial_generator.py | 289 ++--------- .../test_couple_trial_generator.py | 2 +- 6 files changed, 575 insertions(+), 544 deletions(-) create mode 100644 src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py diff --git a/examples/task_logic.py b/examples/task_logic.py index 2990aad..dc16f9d 100644 --- a/examples/task_logic.py +++ b/examples/task_logic.py @@ -9,7 +9,7 @@ task_parameters=AindDynamicForagingTaskParameters( rng_seed=42, reward_size=df_task_logic.RewardSize(right_value_volume=4.0, left_value_volume=4.0), - ) + ) ) diff --git a/schema/aind_behavior_dynamic_foraging.json b/schema/aind_behavior_dynamic_foraging.json index 26bb4fb..e333b47 100644 --- a/schema/aind_behavior_dynamic_foraging.json +++ b/schema/aind_behavior_dynamic_foraging.json @@ -682,6 +682,41 @@ "title": "ConnectedClockOutput", "type": "object" }, + "CoupledTrialGenerationEndConditions": { + "properties": { + "ignore_win": { + "default": 30, + "title": "Window of trials to check ignored responses", + "type": "integer" + }, + "ignore_ratio_threshold": { + "default": 0.8, + "maximum": 1, + "minimum": 0, + "title": "Threshold for acceptable ignored trials within window.", + "type": "number" + }, + "max_trial": { + "default": 1000, + "title": "Maximal number of trials", + "type": "integer" + }, + "max_time": { + "default": "PT1H15M", + "format": "duration", + "title": "Maximal session time (min)", + "type": "string" + }, + "min_time": { + "default": "PT30M", + "format": "duration", + "title": "Minimum session time (min)", + "type": "string" + } + }, + "title": "CoupledTrialGenerationEndConditions", + "type": "object" + }, "CoupledTrialGeneratorSpec": { "properties": { "type": { @@ -776,44 +811,11 @@ ], "title": "Block Len Distribution" }, - "trial_generation_end_parameters": { - "$ref": "#/$defs/TrialGenerationEndConditions", - "default": { - "ignore_win": 30, - "ignore_ratio_threshold": 0.8, - "max_trial": 1000, - "max_time": "PT1H15M", - "min_time": "PT30M" - }, - "description": "Conditions to end trial generation." - }, "min_block_reward": { "default": 1, "title": "Minimal rewards in a block to switch", "type": "integer" }, - "behavior_stability_parameters": { - "default": { - "behavior_evaluation_mode": "end", - "behavior_stability_fraction": 0.5, - "min_consecutive_stable_trials": 5 - }, - "description": "Parameters describing behavior stability required to switch blocks.", - "oneOf": [ - { - "$ref": "#/$defs/BehaviorStabilityParameters" - }, - { - "type": "null" - } - ] - }, - "extend_block_on_no_response": { - "default": true, - "description": "Add one trial to the min block length.", - "title": "Extend Block On No Response", - "type": "boolean" - }, "kernel_size": { "default": 2, "description": "Kernel to evaluate choice fraction.", @@ -908,6 +910,39 @@ "description": "Whether uncollected rewards carry over to the next trial.", "title": "Baiting", "type": "boolean" + }, + "trial_generation_end_parameters": { + "$ref": "#/$defs/CoupledTrialGenerationEndConditions", + "default": { + "ignore_win": 30, + "ignore_ratio_threshold": 0.8, + "max_trial": 1000, + "max_time": "PT1H15M", + "min_time": "PT30M" + }, + "description": "Conditions to end trial generation." + }, + "behavior_stability_parameters": { + "default": { + "behavior_evaluation_mode": "end", + "behavior_stability_fraction": 0.5, + "min_consecutive_stable_trials": 5 + }, + "description": "Parameters describing behavior stability required to switch blocks.", + "oneOf": [ + { + "$ref": "#/$defs/BehaviorStabilityParameters" + }, + { + "type": "null" + } + ] + }, + "extend_block_on_no_response": { + "default": true, + "description": "Add one trial to the min block length.", + "title": "Extend Block On No Response", + "type": "boolean" } }, "title": "CoupledTrialGeneratorSpec", @@ -2226,41 +2261,6 @@ "title": "Trial", "type": "object" }, - "TrialGenerationEndConditions": { - "properties": { - "ignore_win": { - "default": 30, - "title": "Window of trials to check ignored responses", - "type": "integer" - }, - "ignore_ratio_threshold": { - "default": 0.8, - "maximum": 1, - "minimum": 0, - "title": "Threshold for acceptable ignored trials within window.", - "type": "number" - }, - "max_trial": { - "default": 1000, - "title": "Maximal number of trials", - "type": "integer" - }, - "max_time": { - "default": "PT1H15M", - "format": "duration", - "title": "Maximal session time (min)", - "type": "string" - }, - "min_time": { - "default": "PT30M", - "format": "duration", - "title": "Minimum session time (min)", - "type": "string" - } - }, - "title": "TrialGenerationEndConditions", - "type": "object" - }, "TrialGeneratorCompositeSpec_TrialGeneratorSpec_": { "properties": { "type": { diff --git a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs index b10b6f9..5ca0ff4 100644 --- a/src/Extensions/AindBehaviorDynamicForaging.Generated.cs +++ b/src/Extensions/AindBehaviorDynamicForaging.Generated.cs @@ -1800,6 +1800,172 @@ public override string ToString() } + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] + [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] + [Bonsai.CombinatorAttribute(MethodName="Generate")] + public partial class CoupledTrialGenerationEndConditions + { + + private int _ignoreWin; + + private double _ignoreRatioThreshold; + + private int _maxTrial; + + private System.TimeSpan _maxTime; + + private System.TimeSpan _minTime; + + public CoupledTrialGenerationEndConditions() + { + _ignoreWin = 30; + _ignoreRatioThreshold = 0.8D; + _maxTrial = 1000; + } + + protected CoupledTrialGenerationEndConditions(CoupledTrialGenerationEndConditions other) + { + _ignoreWin = other._ignoreWin; + _ignoreRatioThreshold = other._ignoreRatioThreshold; + _maxTrial = other._maxTrial; + _maxTime = other._maxTime; + _minTime = other._minTime; + } + + [Newtonsoft.Json.JsonPropertyAttribute("ignore_win")] + public int IgnoreWin + { + get + { + return _ignoreWin; + } + set + { + _ignoreWin = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("ignore_ratio_threshold")] + public double IgnoreRatioThreshold + { + get + { + return _ignoreRatioThreshold; + } + set + { + _ignoreRatioThreshold = value; + } + } + + [Newtonsoft.Json.JsonPropertyAttribute("max_trial")] + public int MaxTrial + { + get + { + return _maxTrial; + } + set + { + _maxTrial = value; + } + } + + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("max_time")] + public System.TimeSpan MaxTime + { + get + { + return _maxTime; + } + set + { + _maxTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MaxTime")] + public string MaxTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_maxTime); + } + set + { + _maxTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("min_time")] + public System.TimeSpan MinTime + { + get + { + return _minTime; + } + set + { + _minTime = value; + } + } + + [Newtonsoft.Json.JsonIgnoreAttribute()] + [System.ComponentModel.BrowsableAttribute(false)] + [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] + [System.Xml.Serialization.XmlElementAttribute("MinTime")] + public string MinTimeXml + { + get + { + return System.Xml.XmlConvert.ToString(_minTime); + } + set + { + _minTime = System.Xml.XmlConvert.ToTimeSpan(value); + } + } + + public System.IObservable Generate() + { + return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new CoupledTrialGenerationEndConditions(this))); + } + + public System.IObservable Generate(System.IObservable source) + { + return System.Reactive.Linq.Observable.Select(source, _ => new CoupledTrialGenerationEndConditions(this)); + } + + protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) + { + stringBuilder.Append("IgnoreWin = " + _ignoreWin + ", "); + stringBuilder.Append("IgnoreRatioThreshold = " + _ignoreRatioThreshold + ", "); + stringBuilder.Append("MaxTrial = " + _maxTrial + ", "); + stringBuilder.Append("MaxTime = " + _maxTime + ", "); + stringBuilder.Append("MinTime = " + _minTime); + return true; + } + + public override string ToString() + { + System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); + stringBuilder.Append(GetType().Name); + stringBuilder.Append(" { "); + if (PrintMembers(stringBuilder)) + { + stringBuilder.Append(" "); + } + stringBuilder.Append("}"); + return stringBuilder.ToString(); + } + } + + [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] @@ -1816,14 +1982,8 @@ public partial class CoupledTrialGeneratorSpec : TrialGeneratorSpec private object _blockLenDistribution; - private TrialGenerationEndConditions _trialGenerationEndParameters; - private int _minBlockReward; - private BehaviorStabilityParameters _behaviorStabilityParameters; - - private bool _extendBlockOnNoResponse; - private int _kernelSize; private RewardProbabilityParameters _rewardProbabilityParameters; @@ -1832,6 +1992,12 @@ public partial class CoupledTrialGeneratorSpec : TrialGeneratorSpec private bool _baiting; + private CoupledTrialGenerationEndConditions _trialGenerationEndParameters; + + private BehaviorStabilityParameters _behaviorStabilityParameters; + + private bool _extendBlockOnNoResponse; + public CoupledTrialGeneratorSpec() { _quiescentDurationDistribution = new object(); @@ -1839,14 +2005,14 @@ public CoupledTrialGeneratorSpec() _rewardConsumptionDuration = 3D; _interTrialIntervalDurationDistribution = new object(); _blockLenDistribution = new object(); - _trialGenerationEndParameters = new TrialGenerationEndConditions(); _minBlockReward = 1; - _behaviorStabilityParameters = new BehaviorStabilityParameters(); - _extendBlockOnNoResponse = true; _kernelSize = 2; _rewardProbabilityParameters = new RewardProbabilityParameters(); _rewardFamily = new System.Collections.Generic.List(); _baiting = false; + _trialGenerationEndParameters = new CoupledTrialGenerationEndConditions(); + _behaviorStabilityParameters = new BehaviorStabilityParameters(); + _extendBlockOnNoResponse = true; } protected CoupledTrialGeneratorSpec(CoupledTrialGeneratorSpec other) : @@ -1857,14 +2023,14 @@ protected CoupledTrialGeneratorSpec(CoupledTrialGeneratorSpec other) : _rewardConsumptionDuration = other._rewardConsumptionDuration; _interTrialIntervalDurationDistribution = other._interTrialIntervalDurationDistribution; _blockLenDistribution = other._blockLenDistribution; - _trialGenerationEndParameters = other._trialGenerationEndParameters; _minBlockReward = other._minBlockReward; - _behaviorStabilityParameters = other._behaviorStabilityParameters; - _extendBlockOnNoResponse = other._extendBlockOnNoResponse; _kernelSize = other._kernelSize; _rewardProbabilityParameters = other._rewardProbabilityParameters; _rewardFamily = other._rewardFamily; _baiting = other._baiting; + _trialGenerationEndParameters = other._trialGenerationEndParameters; + _behaviorStabilityParameters = other._behaviorStabilityParameters; + _extendBlockOnNoResponse = other._extendBlockOnNoResponse; } /// @@ -1952,131 +2118,131 @@ public object BlockLenDistribution } } - /// - /// Conditions to end trial generation. - /// - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("trial_generation_end_parameters")] - [System.ComponentModel.DescriptionAttribute("Conditions to end trial generation.")] - public TrialGenerationEndConditions TrialGenerationEndParameters + [Newtonsoft.Json.JsonPropertyAttribute("min_block_reward")] + public int MinBlockReward { get { - return _trialGenerationEndParameters; + return _minBlockReward; } set { - _trialGenerationEndParameters = value; + _minBlockReward = value; } } - [Newtonsoft.Json.JsonPropertyAttribute("min_block_reward")] - public int MinBlockReward + /// + /// Kernel to evaluate choice fraction. + /// + [Newtonsoft.Json.JsonPropertyAttribute("kernel_size")] + [System.ComponentModel.DescriptionAttribute("Kernel to evaluate choice fraction.")] + public int KernelSize { get { - return _minBlockReward; + return _kernelSize; } set { - _minBlockReward = value; + _kernelSize = value; } } - /// - /// Parameters describing behavior stability required to switch blocks. - /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_parameters")] - [System.ComponentModel.DescriptionAttribute("Parameters describing behavior stability required to switch blocks.")] - public BehaviorStabilityParameters BehaviorStabilityParameters + [Newtonsoft.Json.JsonPropertyAttribute("reward_probability_parameters")] + public RewardProbabilityParameters RewardProbabilityParameters { get { - return _behaviorStabilityParameters; + return _rewardProbabilityParameters; } set { - _behaviorStabilityParameters = value; + _rewardProbabilityParameters = value; } } - /// - /// Add one trial to the min block length. - /// - [Newtonsoft.Json.JsonPropertyAttribute("extend_block_on_no_response")] - [System.ComponentModel.DescriptionAttribute("Add one trial to the min block length.")] - public bool ExtendBlockOnNoResponse + [System.Xml.Serialization.XmlIgnoreAttribute()] + [Newtonsoft.Json.JsonPropertyAttribute("reward_family")] + public System.Collections.Generic.List RewardFamily { get { - return _extendBlockOnNoResponse; + return _rewardFamily; } set { - _extendBlockOnNoResponse = value; + _rewardFamily = value; } } /// - /// Kernel to evaluate choice fraction. + /// Whether uncollected rewards carry over to the next trial. /// - [Newtonsoft.Json.JsonPropertyAttribute("kernel_size")] - [System.ComponentModel.DescriptionAttribute("Kernel to evaluate choice fraction.")] - public int KernelSize + [Newtonsoft.Json.JsonPropertyAttribute("baiting")] + [System.ComponentModel.DescriptionAttribute("Whether uncollected rewards carry over to the next trial.")] + public bool Baiting { get { - return _kernelSize; + return _baiting; } set { - _kernelSize = value; + _baiting = value; } } + /// + /// Conditions to end trial generation. + /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("reward_probability_parameters")] - public RewardProbabilityParameters RewardProbabilityParameters + [Newtonsoft.Json.JsonPropertyAttribute("trial_generation_end_parameters")] + [System.ComponentModel.DescriptionAttribute("Conditions to end trial generation.")] + public CoupledTrialGenerationEndConditions TrialGenerationEndParameters { get { - return _rewardProbabilityParameters; + return _trialGenerationEndParameters; } set { - _rewardProbabilityParameters = value; + _trialGenerationEndParameters = value; } } + /// + /// Parameters describing behavior stability required to switch blocks. + /// [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("reward_family")] - public System.Collections.Generic.List RewardFamily + [Newtonsoft.Json.JsonPropertyAttribute("behavior_stability_parameters")] + [System.ComponentModel.DescriptionAttribute("Parameters describing behavior stability required to switch blocks.")] + public BehaviorStabilityParameters BehaviorStabilityParameters { get { - return _rewardFamily; + return _behaviorStabilityParameters; } set { - _rewardFamily = value; + _behaviorStabilityParameters = value; } } /// - /// Whether uncollected rewards carry over to the next trial. + /// Add one trial to the min block length. /// - [Newtonsoft.Json.JsonPropertyAttribute("baiting")] - [System.ComponentModel.DescriptionAttribute("Whether uncollected rewards carry over to the next trial.")] - public bool Baiting + [Newtonsoft.Json.JsonPropertyAttribute("extend_block_on_no_response")] + [System.ComponentModel.DescriptionAttribute("Add one trial to the min block length.")] + public bool ExtendBlockOnNoResponse { get { - return _baiting; + return _extendBlockOnNoResponse; } set { - _baiting = value; + _extendBlockOnNoResponse = value; } } @@ -2101,14 +2267,14 @@ protected override bool PrintMembers(System.Text.StringBuilder stringBuilder) stringBuilder.Append("RewardConsumptionDuration = " + _rewardConsumptionDuration + ", "); stringBuilder.Append("InterTrialIntervalDurationDistribution = " + _interTrialIntervalDurationDistribution + ", "); stringBuilder.Append("BlockLenDistribution = " + _blockLenDistribution + ", "); - stringBuilder.Append("TrialGenerationEndParameters = " + _trialGenerationEndParameters + ", "); stringBuilder.Append("MinBlockReward = " + _minBlockReward + ", "); - stringBuilder.Append("BehaviorStabilityParameters = " + _behaviorStabilityParameters + ", "); - stringBuilder.Append("ExtendBlockOnNoResponse = " + _extendBlockOnNoResponse + ", "); stringBuilder.Append("KernelSize = " + _kernelSize + ", "); stringBuilder.Append("RewardProbabilityParameters = " + _rewardProbabilityParameters + ", "); stringBuilder.Append("RewardFamily = " + _rewardFamily + ", "); - stringBuilder.Append("Baiting = " + _baiting); + stringBuilder.Append("Baiting = " + _baiting + ", "); + stringBuilder.Append("TrialGenerationEndParameters = " + _trialGenerationEndParameters + ", "); + stringBuilder.Append("BehaviorStabilityParameters = " + _behaviorStabilityParameters + ", "); + stringBuilder.Append("ExtendBlockOnNoResponse = " + _extendBlockOnNoResponse); return true; } } @@ -5549,172 +5715,6 @@ public override string ToString() } - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] - [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] - [Bonsai.CombinatorAttribute(MethodName="Generate")] - public partial class TrialGenerationEndConditions - { - - private int _ignoreWin; - - private double _ignoreRatioThreshold; - - private int _maxTrial; - - private System.TimeSpan _maxTime; - - private System.TimeSpan _minTime; - - public TrialGenerationEndConditions() - { - _ignoreWin = 30; - _ignoreRatioThreshold = 0.8D; - _maxTrial = 1000; - } - - protected TrialGenerationEndConditions(TrialGenerationEndConditions other) - { - _ignoreWin = other._ignoreWin; - _ignoreRatioThreshold = other._ignoreRatioThreshold; - _maxTrial = other._maxTrial; - _maxTime = other._maxTime; - _minTime = other._minTime; - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignore_win")] - public int IgnoreWin - { - get - { - return _ignoreWin; - } - set - { - _ignoreWin = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("ignore_ratio_threshold")] - public double IgnoreRatioThreshold - { - get - { - return _ignoreRatioThreshold; - } - set - { - _ignoreRatioThreshold = value; - } - } - - [Newtonsoft.Json.JsonPropertyAttribute("max_trial")] - public int MaxTrial - { - get - { - return _maxTrial; - } - set - { - _maxTrial = value; - } - } - - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("max_time")] - public System.TimeSpan MaxTime - { - get - { - return _maxTime; - } - set - { - _maxTime = value; - } - } - - [Newtonsoft.Json.JsonIgnoreAttribute()] - [System.ComponentModel.BrowsableAttribute(false)] - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - [System.Xml.Serialization.XmlElementAttribute("MaxTime")] - public string MaxTimeXml - { - get - { - return System.Xml.XmlConvert.ToString(_maxTime); - } - set - { - _maxTime = System.Xml.XmlConvert.ToTimeSpan(value); - } - } - - [System.Xml.Serialization.XmlIgnoreAttribute()] - [Newtonsoft.Json.JsonPropertyAttribute("min_time")] - public System.TimeSpan MinTime - { - get - { - return _minTime; - } - set - { - _minTime = value; - } - } - - [Newtonsoft.Json.JsonIgnoreAttribute()] - [System.ComponentModel.BrowsableAttribute(false)] - [System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)] - [System.Xml.Serialization.XmlElementAttribute("MinTime")] - public string MinTimeXml - { - get - { - return System.Xml.XmlConvert.ToString(_minTime); - } - set - { - _minTime = System.Xml.XmlConvert.ToTimeSpan(value); - } - } - - public System.IObservable Generate() - { - return System.Reactive.Linq.Observable.Defer(() => System.Reactive.Linq.Observable.Return(new TrialGenerationEndConditions(this))); - } - - public System.IObservable Generate(System.IObservable source) - { - return System.Reactive.Linq.Observable.Select(source, _ => new TrialGenerationEndConditions(this)); - } - - protected virtual bool PrintMembers(System.Text.StringBuilder stringBuilder) - { - stringBuilder.Append("IgnoreWin = " + _ignoreWin + ", "); - stringBuilder.Append("IgnoreRatioThreshold = " + _ignoreRatioThreshold + ", "); - stringBuilder.Append("MaxTrial = " + _maxTrial + ", "); - stringBuilder.Append("MaxTime = " + _maxTime + ", "); - stringBuilder.Append("MinTime = " + _minTime); - return true; - } - - public override string ToString() - { - System.Text.StringBuilder stringBuilder = new System.Text.StringBuilder(); - stringBuilder.Append(GetType().Name); - stringBuilder.Append(" { "); - if (PrintMembers(stringBuilder)) - { - stringBuilder.Append(" "); - } - stringBuilder.Append("}"); - return stringBuilder.ToString(); - } - } - - [System.CodeDom.Compiler.GeneratedCodeAttribute("Bonsai.Sgen", "0.7.2.0 (Newtonsoft.Json v13.0.0.0)")] [Bonsai.WorkflowElementCategoryAttribute(Bonsai.ElementCategory.Source)] [Bonsai.CombinatorAttribute(MethodName="Generate")] @@ -7501,6 +7501,11 @@ public System.IObservable Process(System.IObservable(source); } + public System.IObservable Process(System.IObservable source) + { + return Process(source); + } + public System.IObservable Process(System.IObservable source) { return Process(source); @@ -7606,11 +7611,6 @@ public System.IObservable Process(System.IObservable source) return Process(source); } - public System.IObservable Process(System.IObservable source) - { - return Process(source); - } - public System.IObservable Process(System.IObservable source) { return Process(source); @@ -7697,6 +7697,7 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] + [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] @@ -7718,7 +7719,6 @@ public System.IObservable Process(System.IObservable source) [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] - [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] [System.Xml.Serialization.XmlIncludeAttribute(typeof(Bonsai.Expressions.TypeMapping))] diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py new file mode 100644 index 0000000..b11ae91 --- /dev/null +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py @@ -0,0 +1,216 @@ +import logging +import random +from typing import Literal, Optional, Union + +import numpy as np +from aind_behavior_services.task.distributions import ( + ExponentialDistribution, + ExponentialDistributionParameters, + TruncationParameters, + UniformDistribution, +) +from aind_behavior_services.task.distributions_utils import draw_sample +from pydantic import BaseModel, Field + +from ..trial_models import Trial +from ._base import BaseTrialGeneratorSpecModel, ITrialGenerator + +logger = logging.getLogger(__name__) + + +class RewardProbabilityParameters(BaseModel): + base_reward_sum: float = Field(default=0.8, title="Sum of p_reward") + family: int = Field(default=1, title="Reward family") + pairs_n: int = Field(default=1, title="Number of pairs") + + +class Block(BaseModel): + right_reward_prob: float + left_reward_prob: float + min_length: int + + +class BlockBasedTrialGeneratorSpec(BaseTrialGeneratorSpecModel): + type: Literal["BlockBasedTrialGenerator"] = "BlockBasedTrialGenerator" + + quiescent_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( + default=ExponentialDistribution( + distribution_parameters=ExponentialDistributionParameters(rate=1), + truncation_parameters=TruncationParameters(min=0, max=1), + ), + description="Duration of the quiescence period before trial starts (in seconds). Each lick resets the timer.", + ) + + response_duration: float = Field(default=1.0, description="Duration after go cue for animal response.") + + reward_consumption_duration: float = Field( + default=3.0, + description="Duration of reward consumption before transition to ITI (in seconds).", + ) + + inter_trial_interval_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( + default=ExponentialDistribution( + distribution_parameters=ExponentialDistributionParameters(rate=1 / 2), + truncation_parameters=TruncationParameters(min=1, max=8), + ), + description="Duration of the inter-trial interval (in seconds).", + ) + + block_len_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( + default=ExponentialDistribution( + distribution_parameters=ExponentialDistributionParameters(rate=1 / 20), + truncation_parameters=TruncationParameters(min=20, max=60), + ) + ) + + min_block_reward: int = Field(default=1, title="Minimal rewards in a block to switch") + + kernel_size: int = Field(default=2, description="Kernel to evaluate choice fraction.") + reward_probability_parameters: RewardProbabilityParameters = Field(default=RewardProbabilityParameters()) + reward_family: list = [ + [[8, 1], [6, 1], [3, 1], [1, 1]], + [[8, 1], [1, 1]], + [ + [1, 0], + [0.9, 0.1], + [0.8, 0.2], + [0.7, 0.3], + [0.6, 0.4], + [0.5, 0.5], + ], + [[6, 1], [3, 1], [1, 1]], + ] + + baiting: bool = Field(default=False, description="Whether uncollected rewards carry over to the next trial.") + + def create_generator(self) -> "BlockBasedTrialGenerator": + return BlockBasedTrialGenerator(self) + + +class BlockBasedTrialGenerator(ITrialGenerator): + def __init__(self, spec: BlockBasedTrialGeneratorSpec) -> None: + """""" + self.spec = spec + self.is_right_choice_history: list[bool | None] = [] + self.reward_history: list[bool] = [] + self.block_history: list[Block] = [] + self.block: Block = self.generate_next_block( + reward_families=self.spec.reward_family, + reward_family_index=self.spec.reward_probability_parameters.family, + reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, + base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=self.spec.block_len_distribution, + ) + self.trials_in_block = 0 + self.is_left_baited: bool = False + self.is_right_baited: bool = False + + def next(self) -> Trial | None: + """ + Generate next trial + + """ + logger.info("Generating next trial.") + + # check end conditions + if self.are_end_conditions_met(): + logger.info("Trial generator end conditions met.") + return + + # determine iti and quiescent period duration + iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) + quiescent = draw_sample(self.spec.quiescent_duration_distribution) + + p_reward_left = self.block.left_reward_prob + p_reward_right = self.block.right_reward_prob + + if self.spec.baiting: + random_numbers = np.random.random(2) + + is_left_baited = self.block.left_reward_prob > random_numbers[0] or self.is_left_baited + logger.debug(f"Left baited: {is_left_baited}") + p_reward_left = 1 if is_left_baited else p_reward_left + + is_right_baited = self.block.right_reward_prob > random_numbers[1] or self.is_right_baited + logger.debug(f"Right baited: {is_left_baited}") + p_reward_right = 1 if is_right_baited else p_reward_right + + return Trial( + p_reward_left=p_reward_left, + p_reward_right=p_reward_right, + reward_consumption_duration=self.spec.reward_consumption_duration, + response_deadline_duration=self.spec.response_duration, + quiescence_period_duration=quiescent, + inter_trial_interval_duration=iti, + ) + + def are_end_conditions_met(self) -> bool: + """ + Check if end conditions are met to stop session + """ + raise NotImplementedError + + def generate_next_block( + self, + reward_families: list, + reward_family_index: int, + reward_pairs_n: int, + base_reward_sum: float, + block_len_distribution: Union[UniformDistribution, ExponentialDistribution], + current_block: Optional[None] = None, + ) -> Block: + """ + Generate the next block for a block based task. + + :param reward_families: Description + :param reward_family_index: Description + :param reward_pairs_n: Description + :param base_reward_sum: Description + :param current_block: Description + :param block_len_distribution: Description + """ + + logger.info("Generating next block.") + + # determine candidate reward pairs + reward_pairs = reward_families[reward_family_index][:reward_pairs_n] + reward_prob = np.array(reward_pairs, dtype=float) + reward_prob /= reward_prob.sum(axis=1, keepdims=True) + reward_prob *= float(base_reward_sum) + logger.info(f"Candidate reward pairs normalized and scaled: {reward_prob.tolist()}") + + # create pool including all reward probabiliteis and mirrored pairs + reward_prob_pool = np.vstack([reward_prob, np.fliplr(reward_prob)]) + + if current_block: # exclude previous block if history exists + logger.info("Excluding previous block reward probability.") + last_block_reward_prob = [current_block.right_reward_prob, current_block.left_reward_prob] + + # remove blocks identical to last block + reward_prob_pool = reward_prob_pool[np.any(reward_prob_pool != last_block_reward_prob, axis=1)] + logger.debug(f"Pool after removing identical to last block: {reward_prob_pool.tolist()}") + + # remove blocks with same high-reward side (if last block had a clear high side) + if last_block_reward_prob[0] != last_block_reward_prob[1]: + high_side_last = last_block_reward_prob[0] > last_block_reward_prob[1] + high_side_pool = reward_prob_pool[:, 0] > reward_prob_pool[:, 1] + reward_prob_pool = reward_prob_pool[high_side_pool != high_side_last] + logger.debug(f"Pool after removing same high-reward side: {reward_prob_pool.tolist()}") + + # remove duplicates + reward_prob_pool = np.unique(reward_prob_pool, axis=0) + logger.debug(f"Final reward probability pool after removing duplicates: {reward_prob_pool.tolist()}") + + # randomly pick next block reward probability + right_reward_prob, left_reward_prob = reward_prob_pool[random.choice(range(reward_prob_pool.shape[0]))] + logger.info(f"Selected next block reward probabilities: right={right_reward_prob}, left={left_reward_prob}") + + # randomly pick block length + next_block_len = round(draw_sample(block_len_distribution)) + logger.info(f"Selected next block length: {next_block_len}") + + return Block( + right_reward_prob=right_reward_prob, + left_reward_prob=left_reward_prob, + min_length=next_block_len, + ) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index 57b7e4d..afc5895 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -1,20 +1,17 @@ import logging -import random from datetime import datetime, timedelta -from typing import Literal, Optional, Union +from typing import Literal, Optional import numpy as np -from aind_behavior_services.task.distributions import ( - ExponentialDistribution, - ExponentialDistributionParameters, - TruncationParameters, - UniformDistribution, -) -from aind_behavior_services.task.distributions_utils import draw_sample from pydantic import BaseModel, Field -from ..trial_models import Trial, TrialOutcome -from ._base import BaseTrialGeneratorSpecModel, ITrialGenerator +from ..trial_models import TrialOutcome +from .block_based_trial_generator import ( + BlockBasedTrialGenerator, + BlockBasedTrialGeneratorSpec, +) + +logger = logging.getLogger(__name__) BlockBehaviorEvaluationMode = Literal[ "end", # behavior stable at end of block to allow switching @@ -22,7 +19,7 @@ ] -class TrialGenerationEndConditions(BaseModel): +class CoupledTrialGenerationEndConditions(BaseModel): ignore_win: int = Field(default=30, title="Window of trials to check ignored responses") ignore_ratio_threshold: float = Field( default=0.8, title="Threshold for acceptable ignored trials within window.", ge=0, le=1 @@ -48,55 +45,13 @@ class BehaviorStabilityParameters(BaseModel): ) -class RewardProbabilityParameters(BaseModel): - base_reward_sum: float = Field(default=0.8, title="Sum of p_reward") - family: int = Field(default=1, title="Reward family") - pairs_n: int = Field(default=1, title="Number of pairs") - - -class Block(BaseModel): - right_reward_prob: float - left_reward_prob: float - min_length: int - - -class CoupledTrialGeneratorSpec(BaseTrialGeneratorSpecModel): +class CoupledTrialGeneratorSpec(BlockBasedTrialGeneratorSpec): type: Literal["CoupledTrialGenerator"] = "CoupledTrialGenerator" - quiescent_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( - default=ExponentialDistribution( - distribution_parameters=ExponentialDistributionParameters(rate=1), - truncation_parameters=TruncationParameters(min=0, max=1), - ), - description="Duration of the quiescence period before trial starts (in seconds). Each lick resets the timer.", + trial_generation_end_parameters: CoupledTrialGenerationEndConditions = Field( + default=CoupledTrialGenerationEndConditions(), description="Conditions to end trial generation." ) - response_duration: float = Field(default=1.0, description="Duration after go cue for animal response.") - - reward_consumption_duration: float = Field( - default=3.0, - description="Duration of reward consumption before transition to ITI (in seconds).", - ) - - inter_trial_interval_duration_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( - default=ExponentialDistribution( - distribution_parameters=ExponentialDistributionParameters(rate=1 / 2), - truncation_parameters=TruncationParameters(min=1, max=8), - ), - description="Duration of the inter-trial interval (in seconds).", - ) - - block_len_distribution: Union[UniformDistribution, ExponentialDistribution] = Field( - default=ExponentialDistribution( - distribution_parameters=ExponentialDistributionParameters(rate=1 / 20), - truncation_parameters=TruncationParameters(min=20, max=60), - ) - ) - - trial_generation_end_parameters: TrialGenerationEndConditions = Field( - default=TrialGenerationEndConditions(), description="Conditions to end trial generation." - ) - min_block_reward: int = Field(default=1, title="Minimal rewards in a block to switch") behavior_stability_parameters: Optional[BehaviorStabilityParameters] = Field( default=BehaviorStabilityParameters(), description="Parameters describing behavior stability required to switch blocks.", @@ -105,117 +60,45 @@ class CoupledTrialGeneratorSpec(BaseTrialGeneratorSpecModel): default=True, description="Add one trial to the min block length.", ) - kernel_size: int = Field(default=2, description="Kernel to evaluate choice fraction.") - reward_probability_parameters: RewardProbabilityParameters = Field(default=RewardProbabilityParameters()) - reward_family: list = [ - [[8, 1], [6, 1], [3, 1], [1, 1]], - [[8, 1], [1, 1]], - [ - [1, 0], - [0.9, 0.1], - [0.8, 0.2], - [0.7, 0.3], - [0.6, 0.4], - [0.5, 0.5], - ], - [[6, 1], [3, 1], [1, 1]], - ] - - baiting: bool = Field(default=False, description="Whether uncollected rewards carry over to the next trial.") def create_generator(self) -> "CoupledTrialGenerator": return CoupledTrialGenerator(self) -class CoupledTrialGenerator(ITrialGenerator): +class CoupledTrialGenerator(BlockBasedTrialGenerator): + spec: CoupledTrialGeneratorSpec + def __init__(self, spec: CoupledTrialGeneratorSpec) -> None: """""" - self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__) - self.spec = spec - self.is_right_choice_history: list[bool | None] = [] - self.reward_history: list[bool] = [] - self.block_history: list[Block] = [] - self.block: Block = self.generate_next_block( - reward_families=self.spec.reward_family, - reward_family_index=self.spec.reward_probability_parameters.family, - reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, - base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, - block_len_distribution=self.spec.block_len_distribution, - ) - self.trials_in_block = 0 - self.start_time = datetime.now() - - self.is_left_baited: bool = False - self.is_right_baited: bool = False - def next(self) -> Trial | None: - """ - Generate next trial + super().__init__(spec) + self.start_time = datetime.now() + def are_end_conditions_met(self) -> bool: """ - self.logger.info("Generating next trial.") - - # check end conditions - if not self.are_end_conditions_met( - self.spec.trial_generation_end_parameters, self.is_right_choice_history, self.start_time - ): - self.logger.info("Trial generator end conditions met.") - return - - # determine iti and quiescent period duration - iti = draw_sample(self.spec.inter_trial_interval_duration_distribution) - quiescent = draw_sample(self.spec.quiescent_duration_distribution) - - p_reward_left=self.block.left_reward_prob - p_reward_right=self.block.right_reward_prob - - if self.spec.baiting: - random_numbers = np.random.random(2) - - is_left_baited = self.block.left_reward_prob > random_numbers[0] or self.is_left_baited - self.logger.debug(f"Left baited: {is_left_baited}") - p_reward_left = 1 if is_left_baited else p_reward_left - - is_right_baited = self.block.right_reward_prob > random_numbers[1] or self.is_right_baited - self.logger.debug(f"Right baited: {is_left_baited}") - p_reward_right = 1 if is_right_baited else p_reward_right - - return Trial( - p_reward_left=p_reward_left, - p_reward_right=p_reward_right, - reward_consumption_duration=self.spec.reward_consumption_duration, - response_deadline_duration=self.spec.response_duration, - quiescence_period_duration=quiescent, - inter_trial_interval_duration=iti, - ) - - @staticmethod - def are_end_conditions_met( - end_conditions: TrialGenerationEndConditions, choice_history: list[bool | None], start_time: datetime - ) -> bool: + Check if end conditions are met to stop session """ - Check if end conditions are met to stop session + end_conditions = self.spec.trial_generation_end_parameters + choice_history = self.is_right_choice_history - :param end_conditons: conditions to be met for trial generation to stop + time_elapsed = datetime.now() - self.start_time + frac = end_conditions.ignore_ratio_threshold + win = end_conditions.ignore_win - """ - time_elapsed = datetime.now() - start_time - if time_elapsed < end_conditions.min_time: + if time_elapsed > end_conditions.min_time and choice_history[-win:].count(None) >= frac * win: + logger.debug("Minimum time and ignored trial count exceeded.") return True - if end_conditions.max_trial < len(choice_history): - return False - if end_conditions.max_time < time_elapsed: - return False + logger.debug("Maximum session time exceeded.") + return True - frac = end_conditions.ignore_ratio_threshold - win = end_conditions.ignore_win - if choice_history[-win:].count(None) > frac * win: - return False + if end_conditions.max_trial < len(choice_history): + logger.debug("Maximum trial count exceeded.") + return True - return True + return False def update(self, outcome: TrialOutcome) -> None: """ @@ -224,23 +107,22 @@ def update(self, outcome: TrialOutcome) -> None: :param outcome: trial outcome of previous trial """ - self.logger.info(f"Updating coupled trial generator with trial outcome of {outcome}") + logger.info(f"Updating coupled trial generator with trial outcome of {outcome}") self.is_right_choice_history.append(outcome.is_right_choice) self.reward_history.append(outcome.is_rewarded) self.trials_in_block += 1 - - if self.spec.baiting: + if self.spec.baiting: if outcome.is_right_choice: - self.logger.debug("Resesting right bait.") + logger.debug("Resesting right bait.") self.is_right_baited = False elif not outcome.is_right_choice: - self.logger.debug("Resesting left bait.") + logger.debug("Resesting left bait.") self.is_left_baited = False if self.spec.extend_block_on_no_response and outcome.is_right_choice is None: - self.logger.info("Extending minimum block length due to ignored trial.") + logger.info("Extending minimum block length due to ignored trial.") self.block.min_length += 1 switch_block = self.is_block_switch_allowed( @@ -257,7 +139,7 @@ def update(self, outcome: TrialOutcome) -> None: ) if switch_block: - self.logger.info("Switching block.") + logger.info("Switching block.") self.trials_in_block = 0 self.block = self.generate_next_block( reward_families=self.spec.reward_family, @@ -291,11 +173,11 @@ def is_behavior_stable( """ - self.logger.info("Evaluating block behavior.") + logger.info("Evaluating block behavior.") # do not prohibit block transition if does not rely on behavior or not enough trials to evaluate or reward probs are the same. if not beh_stability_params or left_reward_prob == right_reward_prob or len(choice_history) < kernel_size: - self.logger.debug( + logger.debug( "Behavior stability evaluation skipped: " f"parameters_missing={not bool(beh_stability_params)}, " f"rewards_equal={left_reward_prob == right_reward_prob}, " @@ -306,14 +188,14 @@ def is_behavior_stable( # compute fraction of right choices with running average using a sliding window block_history = choice_history[-(trials_in_block + kernel_size - 1) :] block_choice_frac = self.compute_choice_fraction(kernel_size, block_history) - self.logger.debug(f"Choice fraction of block is {block_choice_frac}.") + logger.debug(f"Choice fraction of block is {block_choice_frac}.") # margin based on right and left probabilities and scaled by switch threshold. Window for evaluating behavior delta = abs((left_reward_prob - right_reward_prob) * float(beh_stability_params.behavior_stability_fraction)) threshold = ( [0, left_reward_prob - delta] if left_reward_prob > right_reward_prob else [left_reward_prob + delta, 1] ) - self.logger.debug(f"Behavior stability threshold applied: {threshold}") + logger.debug(f"Behavior stability threshold applied: {threshold}") # block_choice_fractions above threshold points_above_threshold = np.logical_and( @@ -326,17 +208,17 @@ def is_behavior_stable( mode = beh_stability_params.behavior_evaluation_mode if mode == "end": # requires consecutive trials at end of trial - self.logger.info(f"Evaluating last {min_stable} trials for end-of-block stability.") + logger.info(f"Evaluating last {min_stable} trials for end-of-block stability.") if len(points_above_threshold) < min_stable: - self.logger.info("Not enough trials to evaluate stability at block end.") + logger.info("Not enough trials to evaluate stability at block end.") return False stable = np.all(points_above_threshold[-min_stable:]) - self.logger.info(f"Behavior stable at block end: {stable}") + logger.info(f"Behavior stable at block end: {stable}") return stable elif mode == "anytime": # allows consecutive trials any time in the behavior - self.logger.info(f"Evaluating block for stability anytime over {min_stable} consecutive trials.") + logger.info(f"Evaluating block for stability anytime over {min_stable} consecutive trials.") run_len = 0 for i, v in enumerate(points_above_threshold): if v: @@ -344,9 +226,9 @@ def is_behavior_stable( else: run_len = 0 if run_len >= min_stable: - self.logger.info(f"Behavior stable at trial index {i}.") + logger.info(f"Behavior stable at trial index {i}.") return True - self.logger.info("Behavior not stable in block anytime evaluation.") + logger.info("Behavior not stable in block anytime evaluation.") return False else: @@ -394,11 +276,11 @@ def is_block_switch_allowed( kernel_size: kernel to evaluate choice fraction """ - self.logger.info("Evaluating block switch.") + logger.info("Evaluating block switch.") # has planned block length been reached? block_length_ok = trials_in_block >= block_length - self.logger.debug(f"Planned block length reached: {block_length_ok}") + logger.debug(f"Planned block length reached: {block_length_ok}") # is behavior qualified to switch? behavior_ok = self.is_behavior_stable( @@ -409,11 +291,11 @@ def is_block_switch_allowed( trials_in_block, kernel_size, ) - self.logger.debug(f"Behavior meets stability criteria: {behavior_ok}") + logger.debug(f"Behavior meets stability criteria: {behavior_ok}") # has reward criteria been met? reward_ok = block_left_rewards + block_right_rewards >= min_block_reward - self.logger.debug(f"Reward criterion satisfied: {reward_ok}") + logger.debug(f"Reward criterion satisfied: {reward_ok}") # conditions to switch: # - planned block length reached @@ -421,70 +303,3 @@ def is_block_switch_allowed( # - behavior is stable return block_length_ok and reward_ok and behavior_ok - - def generate_next_block( - self, - reward_families: list, - reward_family_index: int, - reward_pairs_n: int, - base_reward_sum: float, - block_len_distribution: Union[UniformDistribution, ExponentialDistribution], - current_block: Optional[None] = None, - ) -> Block: - """ - Generate the next block for a coupled task. - - :param reward_families: Description - :param reward_family_index: Description - :param reward_pairs_n: Description - :param base_reward_sum: Description - :param current_block: Description - :param block_len_distribution: Description - """ - - self.logger.info("Generating next block.") - - # determine candidate reward pairs - reward_pairs = reward_families[reward_family_index][:reward_pairs_n] - reward_prob = np.array(reward_pairs, dtype=float) - reward_prob /= reward_prob.sum(axis=1, keepdims=True) - reward_prob *= float(base_reward_sum) - self.logger.info(f"Candidate reward pairs normalized and scaled: {reward_prob.tolist()}") - - # create pool including all reward probabiliteis and mirrored pairs - reward_prob_pool = np.vstack([reward_prob, np.fliplr(reward_prob)]) - - if current_block: # exclude previous block if history exists - self.logger.info("Excluding previous block reward probability.") - last_block_reward_prob = [current_block.right_reward_prob, current_block.left_reward_prob] - - # remove blocks identical to last block - reward_prob_pool = reward_prob_pool[np.any(reward_prob_pool != last_block_reward_prob, axis=1)] - self.logger.debug(f"Pool after removing identical to last block: {reward_prob_pool.tolist()}") - - # remove blocks with same high-reward side (if last block had a clear high side) - if last_block_reward_prob[0] != last_block_reward_prob[1]: - high_side_last = last_block_reward_prob[0] > last_block_reward_prob[1] - high_side_pool = reward_prob_pool[:, 0] > reward_prob_pool[:, 1] - reward_prob_pool = reward_prob_pool[high_side_pool != high_side_last] - self.logger.debug(f"Pool after removing same high-reward side: {reward_prob_pool.tolist()}") - - # remove duplicates - reward_prob_pool = np.unique(reward_prob_pool, axis=0) - self.logger.debug(f"Final reward probability pool after removing duplicates: {reward_prob_pool.tolist()}") - - # randomly pick next block reward probability - right_reward_prob, left_reward_prob = reward_prob_pool[random.choice(range(reward_prob_pool.shape[0]))] - self.logger.info( - f"Selected next block reward probabilities: right={right_reward_prob}, left={left_reward_prob}" - ) - - # randomly pick block length - next_block_len = round(draw_sample(block_len_distribution)) - self.logger.info(f"Selected next block length: {next_block_len}") - - return Block( - right_reward_prob=right_reward_prob, - left_reward_prob=left_reward_prob, - min_length=next_block_len, - ) diff --git a/tests/trial_generators/test_couple_trial_generator.py b/tests/trial_generators/test_couple_trial_generator.py index 714b8d2..a863932 100644 --- a/tests/trial_generators/test_couple_trial_generator.py +++ b/tests/trial_generators/test_couple_trial_generator.py @@ -2,7 +2,7 @@ import unittest from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec -from aind_behavior_dynamic_foraging.task_logic.trial_generators.coupled_trial_generator import ( +from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( RewardProbabilityParameters, ) from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome From 6115525504905d80c44482fc4c3b5f5c683a98fe Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 27 Feb 2026 11:19:07 -0800 Subject: [PATCH 19/21] breaks up unittests --- .../block_based_trial_generator.py | 2 +- .../test_block_based_trial_generator.py | 115 +++++ .../test_couple_trial_generator.py | 454 ------------------ .../test_coupled_trial_generator.py | 304 ++++++++++++ 4 files changed, 420 insertions(+), 455 deletions(-) create mode 100644 tests/trial_generators/test_block_based_trial_generator.py delete mode 100644 tests/trial_generators/test_couple_trial_generator.py create mode 100644 tests/trial_generators/test_coupled_trial_generator.py diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py index b11ae91..7f9683f 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py @@ -148,7 +148,7 @@ def are_end_conditions_met(self) -> bool: """ Check if end conditions are met to stop session """ - raise NotImplementedError + def generate_next_block( self, diff --git a/tests/trial_generators/test_block_based_trial_generator.py b/tests/trial_generators/test_block_based_trial_generator.py new file mode 100644 index 0000000..4ef4210 --- /dev/null +++ b/tests/trial_generators/test_block_based_trial_generator.py @@ -0,0 +1,115 @@ +import logging +import unittest + +from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import BlockBasedTrialGeneratorSpec +from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( + RewardProbabilityParameters, +) +from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome + +logging.basicConfig(level=logging.DEBUG) + + +class TestCoupledTrialGenerator(unittest.TestCase): + + def setUp(self): + self.spec = BlockBasedTrialGeneratorSpec() + self.generator = self.spec.create_generator() + + #### Test generate_next_block #### + + def test_next_block_differs_from_current(self): + current = self.generator.block + next_block = self.generator.generate_next_block( + reward_families=self.spec.reward_family, + reward_family_index=self.spec.reward_probability_parameters.family, + reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, + base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=self.spec.block_len_distribution, + current_block=current, + ) + self.assertNotEqual( + (next_block.right_reward_prob, next_block.left_reward_prob), + (current.right_reward_prob, current.left_reward_prob), + ) + + def test_next_block_switches_high_reward_side(self): + current = self.generator.block + next_block = self.generator.generate_next_block( + reward_families=self.spec.reward_family, + reward_family_index=self.spec.reward_probability_parameters.family, + reward_pairs_n=self.spec.reward_probability_parameters.pairs_n, + base_reward_sum=self.spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=self.spec.block_len_distribution, + current_block=current, + ) + current_high_is_right = current.right_reward_prob > current.left_reward_prob + next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob + self.assertNotEqual(current_high_is_right, next_high_is_right) + + def test_next_block_switches_high_reward_side_multiple_pairs(self): + spec = BlockBasedTrialGeneratorSpec( + reward_probability_parameters=RewardProbabilityParameters( + family=0, # [[8,1],[6,1],[3,1],[1,1]] - 4 pairs + pairs_n=3, + ) + ) + generator = self.spec.create_generator() + + current = generator.block + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + + current_high_is_right = current.right_reward_prob > current.left_reward_prob + next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob + self.assertNotEqual(current_high_is_right, next_high_is_right) + + def test_next_block_never_repeats_current_multiple_pairs(self): + spec = BlockBasedTrialGeneratorSpec( + reward_probability_parameters=RewardProbabilityParameters( + family=0, + pairs_n=3, + ) + ) + generator = spec.create_generator() + + current = generator.block + for _ in range(50): + next_block = generator.generate_next_block( + reward_families=spec.reward_family, + reward_family_index=spec.reward_probability_parameters.family, + reward_pairs_n=spec.reward_probability_parameters.pairs_n, + base_reward_sum=spec.reward_probability_parameters.base_reward_sum, + block_len_distribution=spec.block_len_distribution, + current_block=current, + ) + self.assertNotEqual( + (next_block.right_reward_prob, next_block.left_reward_prob), + (current.right_reward_prob, current.left_reward_prob), + ) + self.assertNotEqual( + next_block.right_reward_prob > next_block.left_reward_prob, + current.right_reward_prob > current.left_reward_prob, + ) + current = next_block + + #### Test next #### + + def test_next_returns_trial(self): + trial = self.generator.next() + self.assertIsInstance(trial, Trial) + + def test_next_returns_correct_reward_probs(self): + trial = self.generator.next() + self.assertEqual(trial.p_reward_left, self.generator.block.left_reward_prob) + self.assertEqual(trial.p_reward_right, self.generator.block.right_reward_prob) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/trial_generators/test_couple_trial_generator.py b/tests/trial_generators/test_couple_trial_generator.py deleted file mode 100644 index a863932..0000000 --- a/tests/trial_generators/test_couple_trial_generator.py +++ /dev/null @@ -1,454 +0,0 @@ -import logging -import unittest - -from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec -from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( - RewardProbabilityParameters, -) -from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome - -logging.basicConfig(level=logging.DEBUG) - - -class TestCoupledTrialGenerator(unittest.TestCase): - ##### Tests is_behavior_stable ##### - - def test_behavior_stable_end(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = generator.block.right_reward_prob - left_prob = generator.block.left_reward_prob - kernel_size = generator.spec.kernel_size - min_stable = beh_params.min_consecutive_stable_trials - - high_reward_is_right = right_prob > left_prob - - beh_params.behavior_evaluation_mode = "end" - - # stable at end: wrong side early, correct side at end - choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_behavior_not_stable_end(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = generator.block.right_reward_prob - left_prob = generator.block.left_reward_prob - kernel_size = generator.spec.kernel_size - min_stable = beh_params.min_consecutive_stable_trials - - high_reward_is_right = right_prob > left_prob - - beh_params.behavior_evaluation_mode = "end" - - # unstable at end: correct side early, wrong side at end - choices = [high_reward_is_right] * 10 + [not high_reward_is_right] * (min_stable + kernel_size - 1) - self.assertFalse( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_behavior_stable_anytime(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = generator.block.right_reward_prob - left_prob = generator.block.left_reward_prob - kernel_size = generator.spec.kernel_size - min_stable = beh_params.min_consecutive_stable_trials - - high_reward_is_right = right_prob > left_prob - - beh_params.behavior_evaluation_mode = "anytime" - - # stable run early, then drifts off — should still pass - choices = [high_reward_is_right] * (min_stable + kernel_size - 1) + [not high_reward_is_right] * 10 - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - # stable at end: wrong side early, correct side at end - choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_alteranating_choices_behavior_not_stable(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - # IMPORTANT: Need to force right probability lower than left since - # the stability threshold lower bound is anchored to left_prob + delta. - # With very asymmetric probabilities (e.g. left=0.08) the threshold can be - # permissive enough that an alternating animal (choice fraction ~0.5) is considered - # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 - # is this right? - beh_params = generator.spec.behavior_stability_parameters - left_prob = 0.7111111111111111 - right_prob = 0.08888888888888889 - kernel_size = generator.spec.kernel_size - - # never stable: alternating throughout - choices = [True, False] * 15 - - beh_params.behavior_evaluation_mode = "anytime" - self.assertFalse( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - beh_params.behavior_evaluation_mode = "end" - self.assertFalse( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_alteranating_choices_behavior_stable(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - # IMPORTANT: Force right probability higher than left since - # the stability threshold lower bound is anchored to left_prob + delta. - # With very asymmetric probabilities (e.g. left=0.08) the threshold can be - # permissive enough that an alternating animal (choice fraction ~0.5) is considered - # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 - # is this right? - beh_params = generator.spec.behavior_stability_parameters - right_prob = 0.7111111111111111 - left_prob = 0.08888888888888889 - kernel_size = generator.spec.kernel_size - - # never stable: alternating throughout - choices = [True, False] * 15 - - beh_params.behavior_evaluation_mode = "anytime" - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - beh_params.behavior_evaluation_mode = "end" - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_behavior_stable_equal_reward_prob(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = 0.5 - left_prob = 0.5 - kernel_size = generator.spec.kernel_size - - choices = [True] * 15 - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_behavior_stable_choice_len_less_than_kernel(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = generator.block.right_reward_prob - left_prob = generator.block.left_reward_prob - kernel_size = generator.spec.kernel_size - - choices = [True] - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - def test_behavior_stable_no_beh_stability_params(self): - spec = CoupledTrialGeneratorSpec(behavior_stability_parameters=None) - generator = spec.create_generator() - - beh_params = generator.spec.behavior_stability_parameters - right_prob = generator.block.right_reward_prob - left_prob = generator.block.left_reward_prob - kernel_size = generator.spec.kernel_size - - choices = [True] - self.assertTrue( - generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) - ) - - #### Test is_block_switch_allowed #### - - def test_block_switch_all_conditions_met_switches(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 20 - generator.trials_in_block = 20 - - result = generator.is_block_switch_allowed( - trials_in_block=generator.trials_in_block, - min_block_reward=1, - block_left_rewards=0, - block_right_rewards=5, - choice_history=[True] * 20, - right_reward_prob=generator.block.right_reward_prob, - left_reward_prob=generator.block.left_reward_prob, - beh_stability_params=generator.spec.behavior_stability_parameters, - block_length=generator.block.min_length, - kernel_size=generator.spec.kernel_size, - ) - self.assertTrue(result) - - def test_block_switch_block_length_not_reached(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 20 - - result = generator.is_block_switch_allowed( - trials_in_block=10, # below min_length - min_block_reward=1, - block_left_rewards=0, - block_right_rewards=5, - choice_history=[True] * 10, - right_reward_prob=generator.block.right_reward_prob, - left_reward_prob=generator.block.left_reward_prob, - beh_stability_params=generator.spec.behavior_stability_parameters, - block_length=generator.block.min_length, - kernel_size=generator.spec.kernel_size, - ) - self.assertFalse(result) - - def test_block_switch_reward_not_met(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 20 - - result = generator.is_block_switch_allowed( - trials_in_block=20, - min_block_reward=5, - block_left_rewards=0, - block_right_rewards=0, # no rewards - choice_history=[True] * 20, - right_reward_prob=generator.block.right_reward_prob, - left_reward_prob=generator.block.left_reward_prob, - beh_stability_params=generator.spec.behavior_stability_parameters, - block_length=generator.block.min_length, - kernel_size=generator.spec.kernel_size, - ) - self.assertFalse(result) - - def test_block_switch_behavior_not_stable(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 20 - - result = generator.is_block_switch_allowed( - trials_in_block=20, - min_block_reward=1, - block_left_rewards=5, - block_right_rewards=0, - choice_history=[False] * 20, # always choosing low-reward side - right_reward_prob=generator.block.right_reward_prob, - left_reward_prob=generator.block.left_reward_prob, - beh_stability_params=generator.spec.behavior_stability_parameters, - block_length=generator.block.min_length, - kernel_size=generator.spec.kernel_size, - ) - self.assertFalse(result) - - #### Test generate_next_block #### - - def test_next_block_differs_from_current(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - current = generator.block - next_block = generator.generate_next_block( - reward_families=spec.reward_family, - reward_family_index=spec.reward_probability_parameters.family, - reward_pairs_n=spec.reward_probability_parameters.pairs_n, - base_reward_sum=spec.reward_probability_parameters.base_reward_sum, - block_len_distribution=spec.block_len_distribution, - current_block=current, - ) - self.assertNotEqual( - (next_block.right_reward_prob, next_block.left_reward_prob), - (current.right_reward_prob, current.left_reward_prob), - ) - - def test_next_block_switches_high_reward_side(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - current = generator.block - next_block = generator.generate_next_block( - reward_families=spec.reward_family, - reward_family_index=spec.reward_probability_parameters.family, - reward_pairs_n=spec.reward_probability_parameters.pairs_n, - base_reward_sum=spec.reward_probability_parameters.base_reward_sum, - block_len_distribution=spec.block_len_distribution, - current_block=current, - ) - current_high_is_right = current.right_reward_prob > current.left_reward_prob - next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob - self.assertNotEqual(current_high_is_right, next_high_is_right) - - def test_next_block_switches_high_reward_side_multiple_pairs(self): - spec = CoupledTrialGeneratorSpec( - reward_probability_parameters=RewardProbabilityParameters( - family=0, # [[8,1],[6,1],[3,1],[1,1]] - 4 pairs - pairs_n=3, - ) - ) - generator = spec.create_generator() - - current = generator.block - next_block = generator.generate_next_block( - reward_families=spec.reward_family, - reward_family_index=spec.reward_probability_parameters.family, - reward_pairs_n=spec.reward_probability_parameters.pairs_n, - base_reward_sum=spec.reward_probability_parameters.base_reward_sum, - block_len_distribution=spec.block_len_distribution, - current_block=current, - ) - - current_high_is_right = current.right_reward_prob > current.left_reward_prob - next_high_is_right = next_block.right_reward_prob > next_block.left_reward_prob - self.assertNotEqual(current_high_is_right, next_high_is_right) - - def test_next_block_never_repeats_current_multiple_pairs(self): - spec = CoupledTrialGeneratorSpec( - reward_probability_parameters=RewardProbabilityParameters( - family=0, - pairs_n=3, - ) - ) - generator = spec.create_generator() - - current = generator.block - for _ in range(50): - next_block = generator.generate_next_block( - reward_families=spec.reward_family, - reward_family_index=spec.reward_probability_parameters.family, - reward_pairs_n=spec.reward_probability_parameters.pairs_n, - base_reward_sum=spec.reward_probability_parameters.base_reward_sum, - block_len_distribution=spec.block_len_distribution, - current_block=current, - ) - self.assertNotEqual( - (next_block.right_reward_prob, next_block.left_reward_prob), - (current.right_reward_prob, current.left_reward_prob), - ) - self.assertNotEqual( - next_block.right_reward_prob > next_block.left_reward_prob, - current.right_reward_prob > current.left_reward_prob, - ) - current = next_block - - #### Test update #### - - def _make_outcome(self, is_right_choice, is_rewarded): - return TrialOutcome(trial=Trial(), is_right_choice=is_right_choice, is_rewarded=is_rewarded) - - def test_update_appends_to_history(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.update(self._make_outcome(True, True)) - self.assertEqual(len(generator.is_right_choice_history), 1) - self.assertEqual(len(generator.reward_history), 1) - - def test_update_ignored_trial_extends_block_length(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - original_length = generator.block.min_length - generator.update(self._make_outcome(None, False)) - self.assertEqual(generator.block.min_length, original_length + 1) - - def test_update_non_ignored_trial_does_not_extend_block(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - original_length = generator.block.min_length - generator.update(self._make_outcome(True, True)) - self.assertEqual(generator.block.min_length, original_length) - - def test_update_block_switches_after_conditions_met(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 5 - generator.trials_in_block = 0 - - initial_block = generator.block - - min_stable = generator.spec.behavior_stability_parameters.min_consecutive_stable_trials - kernel_size = generator.spec.kernel_size - for _ in range(min_stable + kernel_size - 1): - generator.update(self._make_outcome(True, True)) - - self.assertIsNot(generator.block, initial_block) - - def test_update_block_does_not_switch_before_min_length(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - generator.block.right_reward_prob = 0.8 - generator.block.left_reward_prob = 0.2 - generator.block.min_length = 100 - generator.trials_in_block = 0 - - initial_block = generator.block - - for _ in range(5): - generator.update(self._make_outcome(True, True)) - - self.assertIs(generator.block, initial_block) - - #### Test next #### - - def test_next_returns_trial(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - trial = generator.next() - self.assertIsInstance(trial, Trial) - - def test_next_returns_correct_reward_probs(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - trial = generator.next() - self.assertEqual(trial.p_reward_left, generator.block.left_reward_prob) - self.assertEqual(trial.p_reward_right, generator.block.right_reward_prob) - - def test_next_returns_none_after_max_trials(self): - spec = CoupledTrialGeneratorSpec() - generator = spec.create_generator() - - # exhaust the trial limit - generator.is_right_choice_history = [True] * (spec.trial_generation_end_parameters.max_trial + 1) - # bypass min_time - generator.start_time = generator.start_time - spec.trial_generation_end_parameters.min_time - - trial = generator.next() - self.assertIsNone(trial) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/trial_generators/test_coupled_trial_generator.py b/tests/trial_generators/test_coupled_trial_generator.py new file mode 100644 index 0000000..b98e260 --- /dev/null +++ b/tests/trial_generators/test_coupled_trial_generator.py @@ -0,0 +1,304 @@ +import logging +import unittest + +from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec +from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( + RewardProbabilityParameters, +) +from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome + +logging.basicConfig(level=logging.DEBUG) + + +class TestCoupledTrialGenerator(unittest.TestCase): + + def setUp(self): + self.spec = CoupledTrialGeneratorSpec() + self.generator = self.spec.create_generator() + + ##### Tests is_behavior_stable ##### + + def test_behavior_stable_end(self): + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = self.generator.block.right_reward_prob + left_prob = self.generator.block.left_reward_prob + kernel_size = self.generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "end" + + # stable at end: wrong side early, correct side at end + choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_not_stable_end(self): + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = self.generator.block.right_reward_prob + left_prob = self.generator.block.left_reward_prob + kernel_size = self.generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "end" + + # unstable at end: correct side early, wrong side at end + choices = [high_reward_is_right] * 10 + [not high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertFalse( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_anytime(self): + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = self.generator.block.right_reward_prob + left_prob = self.generator.block.left_reward_prob + kernel_size = self.generator.spec.kernel_size + min_stable = beh_params.min_consecutive_stable_trials + + high_reward_is_right = right_prob > left_prob + + beh_params.behavior_evaluation_mode = "anytime" + + # stable run early, then drifts off — should still pass + choices = [high_reward_is_right] * (min_stable + kernel_size - 1) + [not high_reward_is_right] * 10 + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + # stable at end: wrong side early, correct side at end + choices = [not high_reward_is_right] * 10 + [high_reward_is_right] * (min_stable + kernel_size - 1) + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_alteranating_choices_behavior_not_stable(self): + # IMPORTANT: Need to force right probability lower than left since + # the stability threshold lower bound is anchored to left_prob + delta. + # With very asymmetric probabilities (e.g. left=0.08) the threshold can be + # permissive enough that an alternating animal (choice fraction ~0.5) is considered + # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 + # is this right? + beh_params = self.generator.spec.behavior_stability_parameters + left_prob = 0.7111111111111111 + right_prob = 0.08888888888888889 + kernel_size = self.generator.spec.kernel_size + + # never stable: alternating throughout + choices = [True, False] * 15 + + beh_params.behavior_evaluation_mode = "anytime" + self.assertFalse( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + beh_params.behavior_evaluation_mode = "end" + self.assertFalse( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_alteranating_choices_behavior_stable(self): + # IMPORTANT: Force right probability higher than left since + # the stability threshold lower bound is anchored to left_prob + delta. + # With very asymmetric probabilities (e.g. left=0.08) the threshold can be + # permissive enough that an alternating animal (choice fraction ~0.5) is considered + # stable. This is inherited behavior from the original implementation https://github.com/AllenNeuralDynamics/dynamic-foraging-task/blob/653293091179fa284c22c6dccff4f0bd49848b1e/src/foraging_gui/MyFunctions.py#L639 + # is this right? + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = 0.7111111111111111 + left_prob = 0.08888888888888889 + kernel_size = self.generator.spec.kernel_size + + # never stable: alternating throughout + choices = [True, False] * 15 + + beh_params.behavior_evaluation_mode = "anytime" + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + beh_params.behavior_evaluation_mode = "end" + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_equal_reward_prob(self): + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = 0.5 + left_prob = 0.5 + kernel_size = self.generator.spec.kernel_size + + choices = [True] * 15 + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_choice_len_less_than_kernel(self): + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = self.generator.block.right_reward_prob + left_prob = self.generator.block.left_reward_prob + kernel_size = self.generator.spec.kernel_size + + choices = [True] + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + def test_behavior_stable_no_beh_stability_params(self): + spec = CoupledTrialGeneratorSpec(behavior_stability_parameters=None) + generator = spec.create_generator() + + beh_params = self.generator.spec.behavior_stability_parameters + right_prob = self.generator.block.right_reward_prob + left_prob = self.generator.block.left_reward_prob + kernel_size = self.generator.spec.kernel_size + + choices = [True] + self.assertTrue( + self.generator.is_behavior_stable(choices, right_prob, left_prob, beh_params, len(choices), kernel_size) + ) + + #### Test is_block_switch_allowed #### + + def test_block_switch_all_conditions_met_switches(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 20 + self.generator.trials_in_block = 20 + + result = self.generator.is_block_switch_allowed( + trials_in_block=self.generator.trials_in_block, + min_block_reward=1, + block_left_rewards=0, + block_right_rewards=5, + choice_history=[True] * 20, + right_reward_prob=self.generator.block.right_reward_prob, + left_reward_prob=self.generator.block.left_reward_prob, + beh_stability_params=self.generator.spec.behavior_stability_parameters, + block_length=self.generator.block.min_length, + kernel_size=self.generator.spec.kernel_size, + ) + self.assertTrue(result) + + def test_block_switch_block_length_not_reached(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 20 + + result = self.generator.is_block_switch_allowed( + trials_in_block=10, # below min_length + min_block_reward=1, + block_left_rewards=0, + block_right_rewards=5, + choice_history=[True] * 10, + right_reward_prob=self.generator.block.right_reward_prob, + left_reward_prob=self.generator.block.left_reward_prob, + beh_stability_params=self.generator.spec.behavior_stability_parameters, + block_length=self.generator.block.min_length, + kernel_size=self.generator.spec.kernel_size, + ) + self.assertFalse(result) + + def test_block_switch_reward_not_met(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 20 + + result = self.generator.is_block_switch_allowed( + trials_in_block=20, + min_block_reward=5, + block_left_rewards=0, + block_right_rewards=0, # no rewards + choice_history=[True] * 20, + right_reward_prob=self.generator.block.right_reward_prob, + left_reward_prob=self.generator.block.left_reward_prob, + beh_stability_params=self.generator.spec.behavior_stability_parameters, + block_length=self.generator.block.min_length, + kernel_size=self.generator.spec.kernel_size, + ) + self.assertFalse(result) + + def test_block_switch_behavior_not_stable(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 20 + + result = self.generator.is_block_switch_allowed( + trials_in_block=20, + min_block_reward=1, + block_left_rewards=5, + block_right_rewards=0, + choice_history=[False] * 20, # always choosing low-reward side + right_reward_prob=self.generator.block.right_reward_prob, + left_reward_prob=self.generator.block.left_reward_prob, + beh_stability_params=self.generator.spec.behavior_stability_parameters, + block_length=self.generator.block.min_length, + kernel_size=self.generator.spec.kernel_size, + ) + self.assertFalse(result) + + #### Test update #### + + def _make_outcome(self, is_right_choice, is_rewarded): + return TrialOutcome(trial=Trial(), is_right_choice=is_right_choice, is_rewarded=is_rewarded) + + def test_update_appends_to_history(self): + self.generator.update(self._make_outcome(True, True)) + self.assertEqual(len(self.generator.is_right_choice_history), 1) + self.assertEqual(len(self.generator.reward_history), 1) + + def test_update_ignored_trial_extends_block_length(self): + original_length = self.generator.block.min_length + self.generator.update(self._make_outcome(None, False)) + self.assertEqual(self.generator.block.min_length, original_length + 1) + + def test_update_non_ignored_trial_does_not_extend_block(self): + original_length = self.generator.block.min_length + self.generator.update(self._make_outcome(True, True)) + self.assertEqual(self.generator.block.min_length, original_length) + + def test_update_block_switches_after_conditions_met(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 5 + self.generator.trials_in_block = 0 + + initial_block = self.generator.block + + min_stable = self.generator.spec.behavior_stability_parameters.min_consecutive_stable_trials + kernel_size = self.generator.spec.kernel_size + for _ in range(min_stable + kernel_size - 1): + self.generator.update(self._make_outcome(True, True)) + + self.assertIsNot(self.generator.block, initial_block) + + def test_update_block_does_not_switch_before_min_length(self): + self.generator.block.right_reward_prob = 0.8 + self.generator.block.left_reward_prob = 0.2 + self.generator.block.min_length = 100 + self.generator.trials_in_block = 0 + + initial_block = self.generator.block + + for _ in range(5): + self.generator.update(self._make_outcome(True, True)) + + self.assertIs(self.generator.block, initial_block) + + #### Test next #### + + def test_next_returns_none_after_max_trials(self): + # exhaust the trial limit + self.generator.is_right_choice_history = [True] * (self.spec.trial_generation_end_parameters.max_trial + 1) + # bypass min_time + self.generator.start_time = self.generator.start_time - self.spec.trial_generation_end_parameters.min_time + + trial = self.generator.next() + self.assertIsNone(trial) + + +if __name__ == "__main__": + unittest.main() From cc70b5e68f17fff84991af9f3c4da5f755e0e444 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 27 Feb 2026 11:20:37 -0800 Subject: [PATCH 20/21] regenerates --- .../block_based_trial_generator.py | 1 - .../test_block_based_trial_generator.py | 5 ++--- .../test_coupled_trial_generator.py | 14 +++++--------- 3 files changed, 7 insertions(+), 13 deletions(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py index 7f9683f..bc67cbf 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/block_based_trial_generator.py @@ -148,7 +148,6 @@ def are_end_conditions_met(self) -> bool: """ Check if end conditions are met to stop session """ - def generate_next_block( self, diff --git a/tests/trial_generators/test_block_based_trial_generator.py b/tests/trial_generators/test_block_based_trial_generator.py index 4ef4210..f803922 100644 --- a/tests/trial_generators/test_block_based_trial_generator.py +++ b/tests/trial_generators/test_block_based_trial_generator.py @@ -1,17 +1,16 @@ import logging import unittest -from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import BlockBasedTrialGeneratorSpec from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( + BlockBasedTrialGeneratorSpec, RewardProbabilityParameters, ) -from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome +from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial logging.basicConfig(level=logging.DEBUG) class TestCoupledTrialGenerator(unittest.TestCase): - def setUp(self): self.spec = BlockBasedTrialGeneratorSpec() self.generator = self.spec.create_generator() diff --git a/tests/trial_generators/test_coupled_trial_generator.py b/tests/trial_generators/test_coupled_trial_generator.py index b98e260..bfc1018 100644 --- a/tests/trial_generators/test_coupled_trial_generator.py +++ b/tests/trial_generators/test_coupled_trial_generator.py @@ -2,20 +2,16 @@ import unittest from aind_behavior_dynamic_foraging.task_logic.trial_generators import CoupledTrialGeneratorSpec -from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( - RewardProbabilityParameters, -) from aind_behavior_dynamic_foraging.task_logic.trial_models import Trial, TrialOutcome logging.basicConfig(level=logging.DEBUG) class TestCoupledTrialGenerator(unittest.TestCase): - def setUp(self): self.spec = CoupledTrialGeneratorSpec() self.generator = self.spec.create_generator() - + ##### Tests is_behavior_stable ##### def test_behavior_stable_end(self): @@ -151,10 +147,10 @@ def test_behavior_stable_no_beh_stability_params(self): spec = CoupledTrialGeneratorSpec(behavior_stability_parameters=None) generator = spec.create_generator() - beh_params = self.generator.spec.behavior_stability_parameters - right_prob = self.generator.block.right_reward_prob - left_prob = self.generator.block.left_reward_prob - kernel_size = self.generator.spec.kernel_size + beh_params = generator.spec.behavior_stability_parameters + right_prob = generator.block.right_reward_prob + left_prob = generator.block.left_reward_prob + kernel_size = generator.spec.kernel_size choices = [True] self.assertTrue( From 1828bcf181776076ab71984044b646e1779dc0f3 Mon Sep 17 00:00:00 2001 From: Micah Woodard Date: Fri, 27 Feb 2026 12:09:32 -0800 Subject: [PATCH 21/21] adds tests for baiting --- .../coupled_trial_generator.py | 4 +- .../test_block_based_trial_generator.py | 64 ++++++++++++++++++- .../test_coupled_trial_generator.py | 46 +++++++++++++ 3 files changed, 111 insertions(+), 3 deletions(-) diff --git a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py index afc5895..83e8245 100644 --- a/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py +++ b/src/aind_behavior_dynamic_foraging/task_logic/trial_generators/coupled_trial_generator.py @@ -114,10 +114,10 @@ def update(self, outcome: TrialOutcome) -> None: self.trials_in_block += 1 if self.spec.baiting: - if outcome.is_right_choice: + if outcome.is_right_choice is True: logger.debug("Resesting right bait.") self.is_right_baited = False - elif not outcome.is_right_choice: + elif outcome.is_right_choice is False: logger.debug("Resesting left bait.") self.is_left_baited = False diff --git a/tests/trial_generators/test_block_based_trial_generator.py b/tests/trial_generators/test_block_based_trial_generator.py index f803922..e4a4f20 100644 --- a/tests/trial_generators/test_block_based_trial_generator.py +++ b/tests/trial_generators/test_block_based_trial_generator.py @@ -1,7 +1,11 @@ import logging import unittest +from unittest.mock import patch + +import numpy as np from aind_behavior_dynamic_foraging.task_logic.trial_generators.block_based_trial_generator import ( + Block, BlockBasedTrialGeneratorSpec, RewardProbabilityParameters, ) @@ -10,7 +14,7 @@ logging.basicConfig(level=logging.DEBUG) -class TestCoupledTrialGenerator(unittest.TestCase): +class TestBlockBasedTrialGenerator(unittest.TestCase): def setUp(self): self.spec = BlockBasedTrialGeneratorSpec() self.generator = self.spec.create_generator() @@ -109,6 +113,64 @@ def test_next_returns_correct_reward_probs(self): self.assertEqual(trial.p_reward_left, self.generator.block.left_reward_prob) self.assertEqual(trial.p_reward_right, self.generator.block.right_reward_prob) + ### test unbaited ### + + def test_baiting_disabled_reward_prob_unchanged(self): + """Without baiting, reward probs should equal block probs exactly.""" + self.generator.block = Block(right_reward_prob=0.8, left_reward_prob=0.2, min_length=10) + self.generator.is_left_baited = True + self.generator.is_right_baited = True + trial = self.generator.next() + + self.assertEqual(trial.p_reward_right, 0.8) + self.assertEqual(trial.p_reward_left, 0.2) + + +class TestBlockBaseBaitingTrialGenerator(unittest.TestCase): + ### test baiting ### + + def setUp(self): + self.spec = BlockBasedTrialGeneratorSpec(baiting=True) + self.generator = self.spec.create_generator() + + def test_baiting_sets_prob_to_1_when_baited(self): + """If bait is held, reward prob should be 1.0 on that side.""" + self.generator.block = Block(right_reward_prob=0.5, left_reward_prob=0.5, min_length=10) + self.generator.is_right_baited = True + self.generator.is_left_baited = True + + trial = self.generator.next() + + self.assertEqual(trial.p_reward_right, 1.0) + self.assertEqual(trial.p_reward_left, 1.0) + + def test_baiting_accumulates_when_random_exceeds_prob(self): + """Bait should carry over when random number exceeds reward prob.""" + self.generator.block = Block(right_reward_prob=0.5, left_reward_prob=0.5, min_length=10) + self.generator.is_right_baited = False + self.generator.is_left_baited = False + + # force random numbers above reward prob so bait does not trigger from RNG + with patch("numpy.random.random", return_value=np.array([0.9, 0.9])): + trial = self.generator.next() + + # reward prob should remain unchanged since bait was not set and RNG didn't trigger + self.assertEqual(trial.p_reward_right, 0.5) + self.assertEqual(trial.p_reward_left, 0.5) + + def test_baiting_triggers_when_random_below_prob(self): + """Bait should trigger reward prob of 1.0 when random number is below reward prob.""" + self.generator.block = Block(right_reward_prob=0.5, left_reward_prob=0.5, min_length=10) + self.generator.is_right_baited = False + self.generator.is_left_baited = False + + # force random numbers below reward prob so bait triggers from RNG + with patch("numpy.random.random", return_value=np.array([0.1, 0.1])): + trial = self.generator.next() + + self.assertEqual(trial.p_reward_right, 1.0) + self.assertEqual(trial.p_reward_left, 1.0) + if __name__ == "__main__": unittest.main() diff --git a/tests/trial_generators/test_coupled_trial_generator.py b/tests/trial_generators/test_coupled_trial_generator.py index bfc1018..d70efa5 100644 --- a/tests/trial_generators/test_coupled_trial_generator.py +++ b/tests/trial_generators/test_coupled_trial_generator.py @@ -295,6 +295,52 @@ def test_next_returns_none_after_max_trials(self): trial = self.generator.next() self.assertIsNone(trial) + ### test unbaited ### + def test_baiting_disabled_bait_state_never_changes(self): + self.generator.is_right_baited = True + self.generator.is_left_baited = True + self.generator.update(self._make_outcome(is_right_choice=True, is_rewarded=True)) + self.assertTrue(self.generator.is_right_baited) + self.assertTrue(self.generator.is_left_baited) + + +class TestCoupledBaitingTrialGenerator(unittest.TestCase): + def setUp(self): + self.spec = CoupledTrialGeneratorSpec(baiting=True) + self.generator = self.spec.create_generator() + + def _make_outcome(self, is_right_choice, is_rewarded): + return TrialOutcome(trial=Trial(), is_right_choice=is_right_choice, is_rewarded=is_rewarded) + + ### test baiting ### + + def test_right_bait_resets_on_right_choice(self): + self.generator.is_right_baited = True + self.generator.update(self._make_outcome(is_right_choice=True, is_rewarded=True)) + self.assertFalse(self.generator.is_right_baited) + + def test_left_bait_resets_on_left_choice(self): + self.generator.is_left_baited = True + self.generator.update(self._make_outcome(is_right_choice=False, is_rewarded=True)) + self.assertFalse(self.generator.is_left_baited) + + def test_right_bait_preserved_on_left_choice(self): + self.generator.is_right_baited = True + self.generator.update(self._make_outcome(is_right_choice=False, is_rewarded=False)) + self.assertTrue(self.generator.is_right_baited) + + def test_left_bait_preserved_on_right_choice(self): + self.generator.is_left_baited = True + self.generator.update(self._make_outcome(is_right_choice=True, is_rewarded=True)) + self.assertTrue(self.generator.is_left_baited) + + def test_bait_not_reset_on_ignored_trial(self): + self.generator.is_right_baited = True + self.generator.is_left_baited = True + self.generator.update(self._make_outcome(is_right_choice=None, is_rewarded=False)) + self.assertTrue(self.generator.is_right_baited) + self.assertTrue(self.generator.is_left_baited) + if __name__ == "__main__": unittest.main()