diff --git a/.github/workflows/publish-book.yml b/.github/workflows/publish-book.yml
new file mode 100644
index 0000000..61eda89
--- /dev/null
+++ b/.github/workflows/publish-book.yml
@@ -0,0 +1,33 @@
+name: publish-book
+
+on:
+ push:
+ branches:
+ - main
+
+jobs:
+ deploy-book:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.10"
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip setuptools
+ python -m pip install .[docs]
+ pip install jupyter-book sphinxcontrib-mermaid
+
+ - name: Build the book
+ run: |
+ jupyter-book build .
+
+ - name: GitHub Pages action
+ uses: peaceiris/actions-gh-pages@v3.9.3
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ publish_dir: ./_build/html
diff --git a/.gitignore b/.gitignore
index b169aed..6a10e64 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,6 +13,7 @@ __pycache__
__pycache__/
*.py[cod]
*$py.class
+notebooks/.ipynb_checkpoints/
# Binary files
*.jpg
@@ -23,6 +24,7 @@ __pycache__/
# Distribution / packaging
.Python
build/
+_build/
develop-eggs/
dist/
downloads/
diff --git a/_config.yml b/_config.yml
new file mode 100644
index 0000000..d844863
--- /dev/null
+++ b/_config.yml
@@ -0,0 +1,28 @@
+title: AmadeusGPT
+author: MLAI
+logo: docs/logo.png
+only_build_toc_files: true
+
+sphinx:
+ config:
+ autodoc_mock_imports: list #["wx"]
+ extra_extensions:
+ - numpydoc
+
+execute:
+ execute_notebooks: "off"
+
+html:
+ extra_navbar: ""
+ use_issues_button: true
+ use_repository_button: true
+ extra_footer: |
+
+
+repository:
+ url: https://github.com/AdaptiveMotorControlLab/AmadeusGPT
+ path_to_book: main
+ branch: main
+
+launch_buttons:
+ colab_url: "https://colab.research.google.com/github.com/AdaptiveMotorControlLab/AmadeusGPT/examples/yourdemo.ipynb"
diff --git a/_toc.yml b/_toc.yml
new file mode 100644
index 0000000..2386bd7
--- /dev/null
+++ b/_toc.yml
@@ -0,0 +1,11 @@
+format: jb-book
+root: README
+parts:
+- caption: Using AmadeusGPT
+ chapters:
+ - file: notebooks/EPM_demo
+ - file: notebooks/Horse_demo
+ - file: notebooks/MABe_demo
+ - file: notebooks/MausHaus_demo
+ - file: notebooks/Use_Task_Program
+ - file: notebooks/YourData
diff --git a/amadeusgpt/analysis_objects/model.py b/amadeusgpt/analysis_objects/model.py
index 23322ee..643d426 100644
--- a/amadeusgpt/analysis_objects/model.py
+++ b/amadeusgpt/analysis_objects/model.py
@@ -20,6 +20,10 @@ def _superanimal_inference(
):
import deeplabcut
+ # Patch for PyTorch 2.6 weights_only issue
+ from amadeusgpt.utils import patch_pytorch_weights_only
+ patch_pytorch_weights_only()
+
progress_obj = st.progress(0)
deeplabcut.video_inference_superanimal(
[video_file_path],
diff --git a/amadeusgpt/managers/animal_manager.py b/amadeusgpt/managers/animal_manager.py
index 189cd50..3f0bb48 100644
--- a/amadeusgpt/managers/animal_manager.py
+++ b/amadeusgpt/managers/animal_manager.py
@@ -96,6 +96,8 @@ def __init__(self, identifier: Identifier):
self.full_keypoint_names = []
self.superanimal_predicted_video = None
self.superanimal_name = None
+ self.model_name = None
+ self.detector_name = None
self.init_pose()
def configure_animal_from_meta(self, meta_info):
@@ -106,11 +108,17 @@ def configure_animal_from_meta(self, meta_info):
self.max_individuals = int(meta_info["individuals"])
species = meta_info["species"]
if species == "topview_mouse":
- self.superanimal_name = "superanimal_topviewmouse_hrnetw32"
+ self.superanimal_name = "superanimal_topviewmouse"
+ self.model_name = "hrnet_w32"
+ self.detector_name = "fasterrcnn_resnet50_fpn_v2"
elif species == "sideview_quadruped":
- self.superanimal_name = "superanimal_quadruped_hrnetw32"
+ self.superanimal_name = "superanimal_quadruped"
+ self.model_name = "hrnet_w32"
+ self.detector_name = "fasterrcnn_resnet50_fpn_v2"
else:
self.superanimal_name = None
+ self.model_name = None
+ self.detector_name = None
def init_pose(self):
@@ -304,10 +312,14 @@ def get_keypoints(self) -> ndarray:
from deeplabcut.modelzoo.video_inference import \
video_inference_superanimal
+ # Patch for PyTorch 2.6+ weights_only issue
+ from amadeusgpt.utils import patch_pytorch_weights_only
+ patch_pytorch_weights_only()
+
video_suffix = Path(self.video_file_path).suffix
self.keypoint_file_path = self.video_file_path.replace(
- video_suffix, "_" + self.superanimal_name + ".h5"
+ video_suffix, f"_superanimal_{self.superanimal_name.split('_', 1)[1]}_{self.detector_name}_{self.model_name}.h5"
)
self.superanimal_predicted_video = self.keypoint_file_path.replace(
".h5", "_labeled.mp4"
@@ -315,9 +327,15 @@ def get_keypoints(self) -> ndarray:
if not os.path.exists(self.keypoint_file_path):
print(f"going to inference video with {self.superanimal_name}")
+ if self.model_name is None:
+ raise ValueError("Model name not set. Please call configure_animal_from_meta first.")
+ if self.detector_name is None:
+ raise ValueError("Detector name not set. Please call configure_animal_from_meta first.")
video_inference_superanimal(
videos=[self.video_file_path],
superanimal_name=self.superanimal_name,
+ model_name=self.model_name,
+ detector_name=self.detector_name,
max_individuals=self.max_individuals,
video_adapt=False,
)
diff --git a/amadeusgpt/utils/__init__.py b/amadeusgpt/utils/__init__.py
new file mode 100644
index 0000000..d7c654a
--- /dev/null
+++ b/amadeusgpt/utils/__init__.py
@@ -0,0 +1,201 @@
+import ast
+import inspect
+import sys
+import time
+import traceback
+from collections import defaultdict
+import textwrap
+import numpy as np
+from amadeusgpt.analysis_objects.event import Event
+from amadeusgpt.logger import AmadeusLogger
+from IPython.display import Markdown, Video, display, HTML
+
+def filter_kwargs_for_function(func, kwargs):
+ sig = inspect.signature(func)
+ return {k: v for k, v in kwargs.items() if k in sig.parameters}
+
+def timer_decorator(func):
+ def wrapper(*args, **kwargs):
+ start_time = time.time() # before calling the function
+ result = func(*args, **kwargs) # call the function
+ end_time = time.time() # after calling the function
+ AmadeusLogger.debug(
+ f"The function {func.__name__} took {end_time - start_time} seconds to execute."
+ )
+ print(
+ f"The function {func.__name__} took {end_time - start_time} seconds to execute."
+ )
+ return result
+ return wrapper
+
+def parse_error_message_from_python():
+ exc_type, exc_value, exc_traceback = sys.exc_info()
+ traceback_str = "".join(
+ traceback.format_exception(exc_type, exc_value, exc_traceback)
+ )
+ return traceback_str
+
+def validate_openai_api_key(key):
+ import openai
+ openai.api_key = key
+ try:
+ openai.models.list()
+ return True
+ except openai.AuthenticationError:
+ return False
+
+def flatten_tuple(t):
+ """
+ Used to handle function returns
+ """
+ flattened = []
+ for item in t:
+ if isinstance(item, tuple):
+ flattened.extend(flatten_tuple(item))
+ else:
+ flattened.append(item)
+ return tuple(flattened)
+
+def func2json(func):
+ if isinstance(func, str):
+ func_str = textwrap.dedent(func)
+ parsed = ast.parse(func_str)
+ func_def = parsed.body[0]
+ func_name = func_def.name
+ docstring = ast.get_docstring(func_def)
+ if (
+ func_def.body
+ and isinstance(func_def.body[0], ast.Expr)
+ and isinstance(func_def.body[0].value, (ast.Str, ast.Constant))
+ ):
+ func_def.body.pop(0)
+ func_def.decorator_list = []
+ if hasattr(ast, "unparse"):
+ source_without_docstring_or_decorators = ast.unparse(func_def)
+ else:
+ source_without_docstring_or_decorators = None
+ return_annotation = "No return annotation"
+ if func_def.returns:
+ return_annotation = ast.unparse(func_def.returns)
+ json_obj = {
+ "name": func_name,
+ "inputs": "",
+ "source_code": source_without_docstring_or_decorators,
+ "docstring": docstring,
+ "return": return_annotation,
+ }
+ return json_obj
+ else:
+ sig = inspect.signature(func)
+ inputs = {name: str(param.annotation) for name, param in sig.parameters.items()}
+ docstring = inspect.getdoc(func)
+ if docstring:
+ docstring = textwrap.dedent(docstring)
+ full_source = inspect.getsource(func)
+ parsed = ast.parse(textwrap.dedent(full_source))
+ func_def = parsed.body[0]
+ if (
+ func_def.body
+ and isinstance(func_def.body[0], ast.Expr)
+ and isinstance(func_def.body[0].value, (ast.Str, ast.Constant))
+ ):
+ func_def.body.pop(0)
+ func_def.decorator_list = []
+ if hasattr(ast, "unparse"):
+ source_without_docstring_or_decorators = ast.unparse(func_def)
+ else:
+ source_without_docstring_or_decorators = None
+ json_obj = {
+ "name": func.__name__,
+ "inputs": inputs,
+ "source_code": textwrap.dedent(source_without_docstring_or_decorators),
+ "docstring": docstring,
+ "return": str(sig.return_annotation),
+ }
+ return json_obj
+
+class QA_Message:
+ def __init__(self, query: str, video_file_paths: list[str]):
+ self.query = query
+ self.video_file_paths = video_file_paths
+ self.code = None
+ self.chain_of_thought = None
+ self.error_message = defaultdict(list)
+ self.plots = defaultdict(list)
+ self.out_videos = defaultdict(list)
+ self.pose_video = defaultdict(list)
+ self.function_rets = defaultdict(list)
+ self.meta_info = {}
+ def get_masks(self) -> dict[str, np.ndarray]:
+ ret = {}
+ function_rets = self.function_rets
+ for video_path, rets in function_rets.items():
+ if isinstance(rets, list) and len(rets) > 0 and isinstance(rets[0], Event):
+ events = rets
+ masks = []
+ for event in events:
+ masks.append(event.generate_mask())
+ ret[video_path] = np.array(masks)
+ else:
+ ret[video_path] = None
+ return ret
+ def serialize_qa_message(self):
+ return {
+ "query": self.query,
+ "video_file_paths": self.video_file_paths,
+ "code": self.code,
+ "chain_of_thought": self.chain_of_thought,
+ "error_message": self.error_message,
+ "plots": None,
+ "out_videos": self.out_videos,
+ "pose_video": self.pose_video,
+ "function_rets": self.function_rets,
+ "meta_info": self.meta_info,
+ }
+def create_qa_message(query: str, video_file_paths: list[str]) -> QA_Message:
+ return QA_Message(query, video_file_paths)
+def parse_result(amadeus, qa_message, use_ipython=True, skip_code_execution=False):
+ if use_ipython:
+ display(Markdown(qa_message.chain_of_thought))
+ else:
+ print(qa_message.chain_of_thought)
+ sandbox = amadeus.sandbox
+ if not skip_code_execution:
+ qa_message = sandbox.code_execution(qa_message)
+ qa_message = sandbox.render_qa_message(qa_message)
+ if len(qa_message.out_videos) > 0:
+ print(f"videos generated to {qa_message.out_videos}")
+ print(
+ "Open it with media player if it does not properly display in the notebook"
+ )
+ if use_ipython:
+ if len(qa_message.out_videos) > 0:
+ for identifier, event_videos in qa_message.out_videos.items():
+ for event_video in event_videos:
+ display(Video(event_video, embed=True))
+ if use_ipython:
+ from matplotlib.animation import FuncAnimation
+ if len(qa_message.function_rets) > 0:
+ for identifier, rets in qa_message.function_rets.items():
+ if not isinstance(rets, (tuple, list)):
+ rets = [rets]
+ for ret in rets:
+ if isinstance(ret, FuncAnimation):
+ display(HTML(ret.to_jshtml()))
+ else:
+ display(Markdown(str(qa_message.function_rets[identifier])))
+ return qa_message
+
+def patch_pytorch_weights_only():
+ """
+ Patch for PyTorch 2.6 weights_only issue with DeepLabCut SuperAnimal models.
+ This adds safe globals to allow loading of ruamel.yaml.scalarfloat.ScalarFloat objects.
+ Only applies the patch if torch.serialization.add_safe_globals exists (PyTorch >=2.6).
+ """
+ try:
+ import torch
+ from ruamel.yaml.scalarfloat import ScalarFloat
+ if hasattr(torch.serialization, "add_safe_globals"):
+ torch.serialization.add_safe_globals([ScalarFloat])
+ except ImportError:
+ pass # If ruamel.yaml is not available, continue without the patch
\ No newline at end of file
diff --git a/docs/logo.png b/docs/logo.png
new file mode 100644
index 0000000..b3d795a
Binary files /dev/null and b/docs/logo.png differ
diff --git a/notebooks/EPM_demo.ipynb b/notebooks/EPM_demo.ipynb
index d88482e..e925915 100644
--- a/notebooks/EPM_demo.ipynb
+++ b/notebooks/EPM_demo.ipynb
@@ -1,196 +1,333 @@
{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "bceb3204-2a87-4671-8135-2533a7a51771",
- "metadata": {},
- "outputs": [],
- "source": [
- "#If th openai api key is not set already, please set it here.\n",
- "import os\n",
- "if 'OPENAI_API_KEY' not in os.environ: \n",
- " os.environ['OPENAI_API_KEY'] = 'your key'\n",
- "import amadeusgpt\n",
- "amadeusgpt.__file__"
- ]
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "ucAO18VGki49",
+ "metadata": {
+ "id": "ucAO18VGki49"
+ },
+ "source": [
+ "# AmadeusGPT Demo: Elevated Plus Maze\n",
+ "\n",
+ "- please get an openAI user key: https://platform.openai.com/api-keys.\n",
+ "- We suggest to run the demos locally, but it can be viewed on Google Colab. Some interactive features might not be available."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "G1T_fMhMk-eL",
+ "metadata": {
+ "id": "G1T_fMhMk-eL"
+ },
+ "outputs": [],
+ "source": [
+ "!pip install --pre amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "OfCVzMfZpoLm",
+ "metadata": {
+ "id": "OfCVzMfZpoLm"
+ },
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "y4EfAARMqMUU",
+ "metadata": {
+ "id": "y4EfAARMqMUU"
+ },
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "IbperdFZpnLn",
+ "metadata": {
+ "id": "IbperdFZpnLn"
+ },
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "8058bc49",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bceb3204-2a87-4671-8135-2533a7a51771",
+ "metadata": {
+ "id": "bceb3204-2a87-4671-8135-2533a7a51771"
+ },
+ "outputs": [],
+ "source": [
+ "#If th openai api key is not set already, please set it here.\n",
+ "import os\n",
+ "if 'OPENAI_API_KEY' not in os.environ:\n",
+ " os.environ['OPENAI_API_KEY'] = mykey\n",
+ "amadeusgpt.__file__"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1b8da600",
+ "metadata": {
+ "id": "1b8da600"
+ },
+ "outputs": [],
+ "source": [
+ "%matplotlib widget\n",
+ "from amadeusgpt import AMADEUS\n",
+ "import amadeusgpt\n",
+ "from pathlib import Path\n",
+ "import amadeusgpt\n",
+ "from amadeusgpt.utils import parse_result\n",
+ "from amadeusgpt import create_project"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ouLH5-MpmPsu",
+ "metadata": {
+ "id": "ouLH5-MpmPsu"
+ },
+ "source": [
+ "## Please upload the demo video and associated files:\n",
+ "- you can grab it from here: https://github.com/AdaptiveMotorControlLab/AmadeusGPT/tree/mwm/docs/examples/EPM\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "uxmS4XwSmQFP",
+ "metadata": {
+ "id": "uxmS4XwSmQFP"
+ },
+ "outputs": [],
+ "source": [
+ "from google.colab import files\n",
+ "\n",
+ "uploaded = files.upload()\n",
+ "for filepath, content in uploaded.items():\n",
+ " print(f'User uploaded file \"{filepath}\" with length {len(content)} bytes')\n",
+ "\n",
+ "video_path = Path(filepath).resolve()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2c861a70",
+ "metadata": {
+ "id": "2c861a70"
+ },
+ "source": [
+ "- Set the scene number to visualize your video in a specific frame\n",
+ "\n",
+ "- 🔥 Make sure your animal(s) are visible on that frame so gpt-4o can configure AmadeusGPT correctly"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "94210808-364c-44a9-a548-b600e75c5c25",
+ "metadata": {
+ "id": "94210808-364c-44a9-a548-b600e75c5c25"
+ },
+ "outputs": [],
+ "source": [
+ "scene_frame_number = 400\n",
+ "amadeus_root = Path(amadeusgpt.__file__).parent.parent\n",
+ "\n",
+ "kwargs = {\n",
+ " \"keypoint_info.body_orientation_keypoints.neck\" : \"nose\",\n",
+ " \"keypoint_info.body_orientation_keypoints.tail_base\" : \"tail_base\",\n",
+ " \"keypoint_info.body_orientation_keypoints.animal_center\" : \"neck\",\n",
+ " \"keypoint_info.head_orientation_keypoints.nose\" : \"nose\",\n",
+ " \"keypoint_info.head_orientation_keypoints.neck\" : \"neck\",\n",
+ " \"video_info.scene_frame_number\" : scene_frame_number,\n",
+ "}\n",
+ "\n",
+ "config = create_project(data_folder = \"../content\", # if you use locally: \"../examples/EPM\"\n",
+ " result_folder = \"EPM_results\",\n",
+ " **kwargs\n",
+ " )\n",
+ "amadeus = AMADEUS(config, use_vlm=True)\n",
+ "video_file_paths = amadeus.get_video_file_paths()\n",
+ "print (video_file_paths)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "NG15yauFo6rN",
+ "metadata": {
+ "id": "NG15yauFo6rN"
+ },
+ "source": [
+ "#### 🚨 warning, if you see an error `AttributeError: 'NoneType' object has no attribute 'choices'`, look above for openAI errors"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "e0c27287",
+ "metadata": {
+ "id": "e0c27287"
+ },
+ "source": [
+ "## Draw ROIs. Press Esc when you are done drawing each ROI.\n",
+ "- After you are done just run the next cell!"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4770c747-c426-4f99-847e-f853c1d32d20",
+ "metadata": {
+ "id": "4770c747-c426-4f99-847e-f853c1d32d20"
+ },
+ "outputs": [],
+ "source": [
+ "behavior_analysis = amadeus.get_behavior_analysis('../content/EPM_11.mp4') #check the path!\n",
+ "behavior_analysis.gui_manager.add_roi_from_video_selection()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d2257d16",
+ "metadata": {
+ "id": "d2257d16"
+ },
+ "source": [
+ "### Get video clips, ethogram and trajectory plots for mouse in the ROI 0"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "b58bf7af-b75b-4fe5-a422-8fe55aa226ad",
+ "metadata": {
+ "id": "b58bf7af-b75b-4fe5-a422-8fe55aa226ad"
+ },
+ "outputs": [],
+ "source": [
+ "query = \"When is the mouse in ROI0\"\n",
+ "qa_message = amadeus.step(query)\n",
+ "qa_message = parse_result(amadeus, qa_message)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9773a5b2",
+ "metadata": {
+ "id": "9773a5b2"
+ },
+ "source": [
+ "### You can get a list of binary masks (equivalent to ethogram) for the underlying behavior, if your query is about retriving a described behavior"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "496beae7",
+ "metadata": {
+ "id": "496beae7"
+ },
+ "outputs": [],
+ "source": [
+ "# the return masks is of shape (num_of_events, video_length)\n",
+ "# where each boolean array of (video_length,) is binary where True indicates whether the behavior is happening at that frame\n",
+ "masks = qa_message.get_masks()\n",
+ "print (masks)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d28b3f10-ecba-4ecf-a283-142d2d43ea8f",
+ "metadata": {
+ "id": "d28b3f10-ecba-4ecf-a283-142d2d43ea8f"
+ },
+ "outputs": [],
+ "source": [
+ "query = \"Plot the trajectory of the animal using the animal center and color it by time\"\n",
+ "qa_message = amadeus.step(query)\n",
+ "qa_message = parse_result(amadeus, qa_message)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3a83f2ea",
+ "metadata": {
+ "id": "3a83f2ea"
+ },
+ "source": [
+ "### How to retrieve results using the query"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6c2f1dfd",
+ "metadata": {
+ "id": "6c2f1dfd"
+ },
+ "outputs": [],
+ "source": [
+ "messages = amadeus.get_messages()\n",
+ "\n",
+ "for query, qa_message in messages.items():\n",
+ " print (query)\n",
+ " print (qa_message.get_masks())"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "amadeusgpt-cpu",
+ "language": "python",
+ "name": "amadeusgpt-cpu"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.0"
+ }
},
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "1b8da600",
- "metadata": {},
- "outputs": [],
- "source": [
- "%matplotlib widget\n",
- "from amadeusgpt import AMADEUS\n",
- "import amadeusgpt\n",
- "from pathlib import Path\n",
- "import amadeusgpt\n",
- "from amadeusgpt.utils import parse_result\n",
- "from amadeusgpt import create_project"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "2c861a70",
- "metadata": {},
- "source": [
- "### Set the scene number to visualize your video in a specific frame\n",
- "### Make sure your animal(s) are visible on that frame so gpt-4o can configure AmadeusGPT correctly"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "94210808-364c-44a9-a548-b600e75c5c25",
- "metadata": {},
- "outputs": [],
- "source": [
- "scene_frame_number = 400\n",
- "amadeus_root = Path(amadeusgpt.__file__).parent.parent\n",
- "\n",
- "kwargs = {\n",
- " \"keypoint_info.body_orientation_keypoints.neck\" : \"nose\",\n",
- " \"keypoint_info.body_orientation_keypoints.tail_base\" : \"tail_base\",\n",
- " \"keypoint_info.body_orientation_keypoints.animal_center\" : \"neck\",\n",
- " \"keypoint_info.head_orientation_keypoints.nose\" : \"nose\",\n",
- " \"keypoint_info.head_orientation_keypoints.neck\" : \"neck\",\n",
- " \"video_info.scene_frame_number\" : scene_frame_number,\n",
- "}\n",
- "\n",
- "config = create_project(data_folder = \"../examples/EPM\",\n",
- " result_folder = \"results\",\n",
- " **kwargs\n",
- " )\n",
- "amadeus = AMADEUS(config, use_vlm=True)\n",
- "video_file_paths = amadeus.get_video_file_paths()\n",
- "print (video_file_paths) "
- ]
- },
- {
- "cell_type": "markdown",
- "id": "e0c27287",
- "metadata": {},
- "source": [
- "### Draw ROIs. Press Esc when you are done drawing each ROI.\n",
- "### After done just run the next cell."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "4770c747-c426-4f99-847e-f853c1d32d20",
- "metadata": {},
- "outputs": [],
- "source": [
- "behavior_analysis = amadeus.get_behavior_analysis('../examples/EPM/EPM_11.mp4')\n",
- "behavior_analysis.gui_manager.add_roi_from_video_selection()"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "d2257d16",
- "metadata": {},
- "source": [
- "### Get video clips, ethogram and trajectory plots for mouse in the ROI0"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "b58bf7af-b75b-4fe5-a422-8fe55aa226ad",
- "metadata": {},
- "outputs": [],
- "source": [
- "query = \"When is the mouse in ROI0\"\n",
- "qa_message = amadeus.step(query)\n",
- "qa_message = parse_result(amadeus, qa_message)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9773a5b2",
- "metadata": {},
- "source": [
- "### You can get a list of binary masks (equivalent to ethogram) for the underlying behavior, if your query is about retriving a described behavior"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "496beae7",
- "metadata": {},
- "outputs": [],
- "source": [
- "# the return masks is of shape (num_of_events, video_length)\n",
- "# where each boolean array of (video_length,) is binary where True indicates whether the behavior is happening at that frame\n",
- "masks = qa_message.get_masks()\n",
- "print (masks)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "d28b3f10-ecba-4ecf-a283-142d2d43ea8f",
- "metadata": {},
- "outputs": [],
- "source": [
- "query = \"Plot the trajectory of the animal using the animal center and color it by time\"\n",
- "qa_message = amadeus.step(query)\n",
- "qa_message = parse_result(amadeus, qa_message)"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "3a83f2ea",
- "metadata": {},
- "source": [
- "### How to retrieve results using the query"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "6c2f1dfd",
- "metadata": {},
- "outputs": [],
- "source": [
- "messages = amadeus.get_messages()\n",
- "\n",
- "for query, qa_message in messages.items():\n",
- " print (query)\n",
- " print (qa_message.get_masks())"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "id": "374bb54b",
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "amadeusgpt-cpu",
- "language": "python",
- "name": "amadeusgpt-cpu"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.10.0"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 5
+ "nbformat": 4,
+ "nbformat_minor": 5
}
diff --git a/notebooks/Horse_demo.ipynb b/notebooks/Horse_demo.ipynb
index 69a24a1..7df2195 100644
--- a/notebooks/Horse_demo.ipynb
+++ b/notebooks/Horse_demo.ipynb
@@ -1,16 +1,60 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "id": "649a7dd7",
+ "metadata": {},
+ "source": [
+ "# AmadeusGPT Demo: Horse Gait Analysis\n",
+ "\n",
+ "- please get an openAI user key: https://platform.openai.com/api-keys.\n",
+ "- We suggest to run the demos locally, but it can be viewed on Google Colab. Some interactive features might not be available."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "80877a1a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install --pre amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2cbd7674",
+ "metadata": {},
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "1fdfca27",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
- "id": "6245b791",
+ "id": "bda5a44e",
"metadata": {},
"outputs": [],
"source": [
- "#If th openai api key is not set already, please set it here.\n",
- "import os\n",
- "if 'OPENAI_API_KEY' not in os.environ: \n",
- " os.environ['OPENAI_API_KEY'] = 'your key'"
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
]
},
{
@@ -29,6 +73,41 @@
"from amadeusgpt import create_project"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "b646d478",
+ "metadata": {},
+ "source": [
+ "## Please upload the demo video and associated files:\n",
+ "- you can grab it from here: https://github.com/AdaptiveMotorControlLab/AmadeusGPT/tree/mwm/docs/examples/Horse\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "907fdda2",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from google.colab import files\n",
+ "\n",
+ "uploaded = files.upload()\n",
+ "for filepath, content in uploaded.items():\n",
+ " print(f'User uploaded file \"{filepath}\" with length {len(content)} bytes')\n",
+ "\n",
+ "video_path = Path(filepath).resolve()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b418ecd9",
+ "metadata": {},
+ "source": [
+ "- Set the scene number to visualize your video in a specific frame\n",
+ "\n",
+ "- 🔥 Make sure your animal(s) are visible on that frame so gpt-4o can configure AmadeusGPT correctly"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -38,7 +117,7 @@
"source": [
"scene_frame_number = 100\n",
"amadeus_root = Path(amadeusgpt.__file__).parent.parent\n",
- "config = Config(amadeus_root / \"amadeusgpt/configs/Horse_template.yaml\")\n",
+ "config = Config(amadeus_root / \"amadeusgpt/configs/Horse_template.yaml\") #check the path to the config file\n",
"\n",
"kwargs = { \n",
" \"video_info.scene_frame_number\" : scene_frame_number,\n",
@@ -48,7 +127,7 @@
"\n",
"}\n",
"\n",
- "config = create_project(data_folder = \"../examples/Horse\",\n",
+ "config = create_project(data_folder = \"../examples/Horse\", #check the path to the data folder\n",
" result_folder = \"results\",\n",
" **kwargs\n",
" )\n",
@@ -65,7 +144,7 @@
"metadata": {},
"outputs": [],
"source": [
- "behavior_analysis = amadeus.get_behavior_analysis(video_file_path = '../examples/Horse/BrownHorseinShadow.mp4')\n",
+ "behavior_analysis = amadeus.get_behavior_analysis(video_file_path = '../examples/Horse/BrownHorseinShadow.mp4') #check the path to the video file\n",
"scene_image = behavior_analysis.visual_manager.get_scene_image()\n",
"plt.imshow(scene_image)"
]
diff --git a/notebooks/MABe_demo.ipynb b/notebooks/MABe_demo.ipynb
index 57a526f..34b7348 100644
--- a/notebooks/MABe_demo.ipynb
+++ b/notebooks/MABe_demo.ipynb
@@ -1,16 +1,60 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "id": "e08a36db",
+ "metadata": {},
+ "source": [
+ "# AmadeusGPT Demo: MABe dataset\n",
+ "\n",
+ "- please get an openAI user key: https://platform.openai.com/api-keys.\n",
+ "- We suggest to run the demos locally, but it can be viewed on Google Colab. Some interactive features might not be available."
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
- "id": "7829458f",
+ "id": "b190e238",
"metadata": {},
"outputs": [],
"source": [
- "#If th openai api key is not set already, please set it here.\n",
- "import os\n",
- "if 'OPENAI_API_KEY' not in os.environ: \n",
- " os.environ['OPENAI_API_KEY'] = 'your key'"
+ "!pip install --pre amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "ee2fdd05",
+ "metadata": {},
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "fbc5a041",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4512f9f4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
]
},
{
@@ -29,6 +73,15 @@
"from amadeusgpt import create_project"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "eebe3919",
+ "metadata": {},
+ "source": [
+ "## Please upload the demo video and associated files:\n",
+ "- you can grab it from here: https://github.com/AdaptiveMotorControlLab/AmadeusGPT/tree/mwm/docs/examples/MABe"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -47,7 +100,7 @@
" \"video_info.scene_frame_number\" : 1400,\n",
"}\n",
"\n",
- "config = create_project(data_folder = \"../examples/MABe\",\n",
+ "config = create_project(data_folder = \"../examples/MABe\", # check the path to the data folder\n",
" result_folder = \"results\",\n",
" **kwargs\n",
" )\n",
@@ -64,8 +117,8 @@
"metadata": {},
"outputs": [],
"source": [
- "behavior_analysis = amadeus.get_behavior_analysis(video_file_path='../examples/MABe/EGS8X2MN4SSUGFWAV976.mp4',\n",
- " keypoint_file_path='../examples/MABe/EGS8X2MN4SSUGFWAV976.h5')\n",
+ "behavior_analysis = amadeus.get_behavior_analysis(video_file_path='../examples/MABe/EGS8X2MN4SSUGFWAV976.mp4', # check the path to the video file\n",
+ " keypoint_file_path='../examples/MABe/EGS8X2MN4SSUGFWAV976.h5') # check the path to the keypoint file\n",
"scene_image = behavior_analysis.visual_manager.get_scene_image()\n",
"plt.imshow(scene_image)"
]
diff --git a/notebooks/MausHaus_demo.ipynb b/notebooks/MausHaus_demo.ipynb
index 7a25fca..ae8e1db 100644
--- a/notebooks/MausHaus_demo.ipynb
+++ b/notebooks/MausHaus_demo.ipynb
@@ -1,16 +1,60 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "id": "21d149e4",
+ "metadata": {},
+ "source": [
+ "# AmadeusGPT Demo: MausHaus\n",
+ "\n",
+ "- please get an openAI user key: https://platform.openai.com/api-keys.\n",
+ "- We suggest to run the demos locally, but it can be viewed on Google Colab. Some interactive features might not be available."
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
- "id": "5fbc36a0",
+ "id": "7c5c60a0",
"metadata": {},
"outputs": [],
"source": [
- "#If th openai api key is not set already, please set it here.\n",
- "import os\n",
- "if 'OPENAI_API_KEY' not in os.environ: \n",
- " os.environ['OPENAI_API_KEY'] = 'your key'"
+ "!pip install --pre amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "17cac544",
+ "metadata": {},
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "700c29e4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e6c4ae02",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
]
},
{
@@ -29,6 +73,15 @@
"from amadeusgpt import create_project"
]
},
+ {
+ "cell_type": "markdown",
+ "id": "19befaf4",
+ "metadata": {},
+ "source": [
+ "## Please upload the demo video and associated files:\n",
+ "- you can grab it from here: https://github.com/AdaptiveMotorControlLab/AmadeusGPT/tree/mwm/docs/examples/MausHaus"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -48,7 +101,7 @@
" \"video_info.scene_frame_number\" : 100,\n",
"}\n",
"\n",
- "config = create_project(data_folder = \"../examples/MausHaus\",\n",
+ "config = create_project(data_folder = \"../examples/MausHaus\", #check the path to the data folder\n",
" result_folder = \"results\",\n",
" **kwargs\n",
" )\n",
@@ -65,8 +118,8 @@
"metadata": {},
"outputs": [],
"source": [
- "behavior_analysis = amadeus.get_behavior_analysis(video_file_path='../examples/MausHaus/maushaus_trimmed.mp4',\n",
- " keypoint_file_path='../examples/MausHaus/maushaus_trimmed.h5')\n",
+ "behavior_analysis = amadeus.get_behavior_analysis(video_file_path='../examples/MausHaus/maushaus_trimmed.mp4', #check the path to the video file\n",
+ " keypoint_file_path='../examples/MausHaus/maushaus_trimmed.h5') #check the path to the keypoint file\n",
"\n",
"behavior_analysis.gui_manager.add_roi_from_video_selection()"
]
diff --git a/notebooks/Use_Task_Program.ipynb b/notebooks/Use_Task_Program.ipynb
index b452bfe..086b4c7 100644
--- a/notebooks/Use_Task_Program.ipynb
+++ b/notebooks/Use_Task_Program.ipynb
@@ -4,7 +4,50 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "### initialize a config"
+ "# Demo: set up your own task! \n",
+ "- initialize a config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install --pre amadeusgpt"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
]
},
{
diff --git a/notebooks/YourData.ipynb b/notebooks/YourData.ipynb
index 848575f..a6bceb5 100644
--- a/notebooks/YourData.ipynb
+++ b/notebooks/YourData.ipynb
@@ -1,16 +1,83 @@
{
"cells": [
+ {
+ "cell_type": "markdown",
+ "id": "7436cb96",
+ "metadata": {},
+ "source": [
+ "# Demo: use AmadeusGPT on your own data 🚀"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5e16b7b4",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "!pip install --pre amadeusgpt\n",
+ "!pip install --pre deeplabcut"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "076dcb4a",
+ "metadata": {},
+ "source": [
+ "- Let's test that your open AI API Key works:"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
- "id": "f01f49c5",
+ "id": "98ed0019",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "mykey = \"paste-your-key-here\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9d7a2165",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from openai import OpenAI\n",
+ "\n",
+ "client = OpenAI(api_key=mykey)\n",
+ "\n",
+ "response = client.chat.completions.create(\n",
+ " model=\"gpt-4\",\n",
+ " messages=[{\"role\": \"user\", \"content\": \"Hello\"}]\n",
+ ")\n",
+ "print(response.choices[0].message.content)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "55acfefe",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import amadeusgpt\n",
+ "import deeplabcut"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "963f03b1",
"metadata": {},
"outputs": [],
"source": [
"#If th openai api key is not set already, please set it here.\n",
"import os\n",
- "if 'OPENAI_API_KEY' not in os.environ: \n",
- " os.environ['OPENAI_API_KEY'] = 'your key'"
+ "if 'OPENAI_API_KEY' not in os.environ:\n",
+ " os.environ['OPENAI_API_KEY'] = mykey\n",
+ "amadeusgpt.__file__"
]
},
{
@@ -30,11 +97,13 @@
"id": "603f97b9",
"metadata": {},
"source": [
- "### Note that unlike other notebooks, we don't have keypoint_file_path here (as it's not provided)\n",
- "### By default, we use gpt-4o to determine which SuperAnimal models to run and it will run SuperAnimal in the first time the keypoints related queries are asked. Note to use superanimal, you will need to install the newest DeepLabCut.\n",
- "### Make sure you use a short video clips if you are not using GPUs in Linux (Mac silicon support to be added)"
+ "## Now, unlike other notebooks, we don't have keypoint_file_path here (as it's not provided)\n",
+ "\n",
+ "- By default, we use gpt-4o to determine which SuperAnimal models to run and it will run SuperAnimal in the first time the keypoints related queries are asked. Note to use superanimal, you will need to install the latest DeepLabCut.\n",
+ "- Make sure you use a short video clips if you are not using GPUs in Linux (Mac silicon support to be added)!"
]
},
+
{
"cell_type": "code",
"execution_count": null,