From c62713b9f1d8822ecc200f238514ffe8b9739a72 Mon Sep 17 00:00:00 2001 From: Lorenzo Delgado Date: Tue, 25 Mar 2025 13:35:49 +0100 Subject: [PATCH 1/8] lnsd/dips-debug Signed-off-by: Lorenzo Delgado --- .env | 1 + .gitmodules | 3 ++ README.md | 86 ++++++++++++++++++++++++++++++++++++++ dipper/Dockerfile | 2 +- dipper/source | 2 +- docker-compose.yaml | 24 ++++++++--- indexer-agent/Dockerfile | 2 +- indexer-agent/run.sh | 4 ++ indexer-service/Dockerfile | 45 +++++++++++++++++++- indexer-service/run.sh | 18 +++++++- indexer-service/source | 1 + scripts/add-subgraph.sh | 21 ++++++---- subgraph-deploy/run.sh | 14 ++++--- 13 files changed, 198 insertions(+), 25 deletions(-) create mode 160000 indexer-service/source mode change 100644 => 100755 scripts/add-subgraph.sh diff --git a/.env b/.env index 54343da..ba46c20 100644 --- a/.env +++ b/.env @@ -8,6 +8,7 @@ GRAPH_NODE_STATUS=8030 GRAPH_NODE_METRICS=8040 INDEXER_MANAGEMENT=7600 INDEXER_SERVICE=7601 +INDEXER_SERVICE_DIPS_RPC_PORT=7602 GATEWAY=7700 REDPANDA_KAFKA=9092 REDPANDA_ADMIN=9644 diff --git a/.gitmodules b/.gitmodules index ea9dde2..7ad58e8 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +1,6 @@ [submodule "dipper/source"] path = dipper/source url = git@github.com:edgeandnode/dipper.git +[submodule "indexer-service/source"] + path = indexer-service/source + url = git@github.com:graphprotocol/indexer-rs.git diff --git a/README.md b/README.md index 2404305..511d2cd 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,30 @@ Epochs are set up to be 554 blocks long, use `scripts/mine-block.sh` to advance - `graph indexer connect http://localhost:7600` - `graph indexer --network=hardhat status` +## indexer-service + +- `docker compose up --build indexer-service` +- `docker compose stop indexer-service` +- `docker compose logs -f indexer-service` + +### Building from source + +Building from source requires the Git submodules to be initialized. + +- `git submodule update --init --recursive --force indexer-service/source` + +And then select the `wrapper-dev` target when building the Docker image in the `docker-compose.yaml` file. + +```diff + indexer-service: + container_name: indexer-service + build: { +- target: "wrapper", # Set to "wrapper-dev" for building from source ++ target: "wrapper-dev", # Set to "wrapper-dev" for building from source + context: indexer-service, + } +``` + ## gateway ```bash @@ -140,3 +164,65 @@ docker exec -it redpanda rpk topic consume gateway_client_query_results --broker } } ``` + +## dipper + +> [!IMPORTANT] +> The Dipper service repository is private. You will need to initialize the submodules manually. +> +> ```bash +> git submodule update --init --recursive --force dipper/source +> ``` + +- `docker compose up --build dipper` +- `docker compose stop dipper` +- `docker compose logs -f dipper` + +### Dipper admin CLI + +Download the dipper Admin CLI from the GitHUb Actions CI build job artifacts section, e.g, https://github.com/edgeandnode/dipper/actions/runs/14228474216 + +```bash +# Source the environment variables (required for the next step) +source .env + +# Export the DIPs CLI auth config +export DIPS_SIGNING_KEY="${RECEIVER_SECRET}" && export DIPS_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" + +## Commands +# Request to index the "block-oracle" subgraph on the "hardhat" network +./dipper-cli indexings register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" 1337 + +# List all indexings +./dipper-cli indexings list + +# Deregister an indexing +./dipper-cli indexings cancel " + echo " The name of the subgraph in the graph-node" exit 1 fi + +# Get the deployment hash from the graph-node deployment="$(curl -s "http://localhost:${GRAPH_NODE_GRAPHQL}/subgraphs/name/$name" \ -H 'content-type: application/json' \ -d '{"query": "{ _meta { deployment } }" }' \ | jq -r '.data._meta.deployment')" echo "deployment=${deployment}" -deployment_hex="$(curl -s -X POST "http://localhost:${IPFS_RPC}/api/v0/cid/format?arg=${deployment}&b=base16" \ - | jq -r '.Formatted')" - -# Remove the first 8 bytes of the hex string matching the IPFS prefix -deployment_hex="${deployment_hex#f01701220}" +# Extract the deployment hash from the IPFS CID and strip the IPFS CID prefix +deployment_hex="$(curl -s -X POST "http://localhost:${IPFS_RPC}/api/v0/cid/format?arg=${deployment}&b=base16" \ + | jq -r '.Formatted' | sed 's/^f01701220//')" echo "deployment_hex=${deployment_hex}" -gns="$(jq -r '."1337".L1GNS.address' contracts.json)" +# Get the GNS address for the chain +gns="$(jq -r ".\"${CHAIN_ID}\".L1GNS.address" contracts.json)" # https://github.com/graphprotocol/contracts/blob/3eb16c80d4652c238d3e6b2c396da712af5072b4/packages/sdk/src/deployments/network/actions/gns.ts#L38 cast send --rpc-url="http://localhost:${CHAIN_RPC}" --confirmations=0 --mnemonic="${MNEMONIC}" \ "${gns}" 'publishNewSubgraph(bytes32,bytes32,bytes32)' \ @@ -33,6 +38,6 @@ echo "Now run graph indexer to allocate to this subgraph ${deployment} :" echo "------------------------------------------------------------------" echo echo "./bin/graph-indexer indexer connect \"http://localhost:${INDEXER_MANAGEMENT}\"" -echo "./bin/graph-indexer indexer actions queue allocate ${deployment} 0.001 --network=hardhat" +echo "./bin/graph-indexer indexer actions queue allocate ${deployment} 0.001 --network=${CHAIN_NAME}" echo "./bin/graph-indexer indexer actions get" echo "./bin/graph-indexer indexer actions update --id [id] status approved" diff --git a/subgraph-deploy/run.sh b/subgraph-deploy/run.sh index 9ef2a9c..50b5fc5 100755 --- a/subgraph-deploy/run.sh +++ b/subgraph-deploy/run.sh @@ -2,7 +2,8 @@ set -eu . /opt/.env -# don't rerun when retriggered via a service_completed_successfully condition +# Skip if the subgraph is already deployed, i.e., is present in the graph-network +# Don't rerun when retriggered via a 'service_completed_successfully' condition if curl -s "http://graph-node:${GRAPH_NODE_GRAPHQL}/subgraphs/name/graph-network" \ -H 'content-type: application/json' \ -d '{"query": "{ subgraphs { id } }" }' \ @@ -28,16 +29,17 @@ echo "network_subgraph_deployment=${network_subgraph_deployment}" echo "block_oracle_deployment=${block_oracle_deployment}" echo "tap_deployment=${tap_deployment}" -# force index block oracle subgraph & network subgraph +# Force index subgraphs: network, block oracle, tap graph-indexer indexer connect "http://indexer-agent:${INDEXER_MANAGEMENT}" graph-indexer indexer --network=hardhat rules prepare "${network_subgraph_deployment}" -o json graph-indexer indexer --network=hardhat rules prepare "${block_oracle_deployment}" -o json graph-indexer indexer --network=hardhat rules prepare "${tap_deployment}" -o json -deployment_hex="$(curl -s -X POST "http://ipfs:${IPFS_RPC}/api/v0/cid/format?arg=${block_oracle_deployment}&b=base16" \ - | jq -r '.Formatted')" -deployment_hex="${deployment_hex#f01701220}" +# Extract the deployment hash from the IPFS CID and strip the IPFS CID prefix +deployment_hex="$(curl -s -X POST "http://ipfs:${IPFS_RPC}/api/v0/cid/format?arg=${network_subgraph_deployment}&b=base16" | jq -r '.Formatted' | sed 's/^f01701220//')" echo "deployment_hex=${deployment_hex}" + +# Publish the subgraph gns="$(jq -r '."1337".L1GNS.address' /opt/contracts.json)" cast send --rpc-url="http://chain:${CHAIN_RPC}" --confirmations=0 --mnemonic="${MNEMONIC}" \ "${gns}" 'publishNewSubgraph(bytes32,bytes32,bytes32)' \ @@ -45,7 +47,7 @@ cast send --rpc-url="http://chain:${CHAIN_RPC}" --confirmations=0 --mnemonic="${ '0x0000000000000000000000000000000000000000000000000000000000000000' \ '0x0000000000000000000000000000000000000000000000000000000000000000' -graph-indexer indexer --network=hardhat rules set "${block_oracle_deployment}" decisionBasis always -o json +graph-indexer indexer --network=hardhat rules set "${network_subgraph_deployment}" decisionBasis always -o json while true; do # Fetch output from the command and handle errors From bc9fcaacaeddffde6568c7e8aaca8267bc122823 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 8 May 2025 15:40:41 +0000 Subject: [PATCH 2/8] chore: use newer indexer-agent --- indexer-agent/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/indexer-agent/Dockerfile b/indexer-agent/Dockerfile index a6f1933..864a1bc 100644 --- a/indexer-agent/Dockerfile +++ b/indexer-agent/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/graphprotocol/indexer-agent:pr-1094@sha256:f5e20f4d63380ff68c3dfca93ae9acfba6fe9e9ae7aec61049bfadf93963df71 +FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:3fa8a1d49c9e9069a08d5d5eacc15526c4392ba35a7db36c43cb42444ab9bde3 RUN apt-get update \ && apt-get install -y jq \ && rm -rf /var/lib/apt/lists/* From a8c25cf1c8817582e4d6f0823cde66a1b02d2211 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 27 May 2025 14:27:24 +0000 Subject: [PATCH 3/8] fix: newer agent, agent config, localhost only --- docker-compose.yaml | 36 ++++++++++++++++++------------------ indexer-agent/Dockerfile | 2 +- indexer-agent/run.sh | 5 +++++ 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index 347e802..fdb7274 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -2,7 +2,7 @@ services: chain: container_name: chain image: ghcr.io/foundry-rs/foundry:v1.0.0 - ports: ["${CHAIN_RPC}:8545"] + ports: ["127.0.0.1:${CHAIN_RPC}:8545"] command: ["anvil --host=0.0.0.0 --chain-id=1337 --base-fee=0"] healthcheck: { interval: 1s, retries: 10, test: cast block } @@ -10,7 +10,7 @@ services: ipfs: container_name: ipfs image: ipfs/kubo:v0.34.1 - ports: ["${IPFS_RPC}:5001"] + ports: ["127.0.0.1:${IPFS_RPC}:5001"] environment: IPFS_PROFILE: server healthcheck: @@ -19,7 +19,7 @@ services: postgres: container_name: postgres image: postgres:17-alpine - ports: ["${POSTGRES}:5432"] + ports: ["127.0.0.1:${POSTGRES}:5432"] command: postgres -c 'max_connections=1000' -c 'shared_preload_libraries=pg_stat_statements' volumes: - ./postgres/setup.sql:/docker-entrypoint-initdb.d/setup.sql:ro @@ -39,10 +39,10 @@ services: postgres: { condition: service_healthy } stop_signal: SIGKILL ports: - - ${GRAPH_NODE_GRAPHQL}:8000 - - ${GRAPH_NODE_ADMIN}:8020 - - ${GRAPH_NODE_STATUS}:8030 - - ${GRAPH_NODE_METRICS}:8040 + - 127.0.0.1:${GRAPH_NODE_GRAPHQL}:8000 + - 127.0.0.1:${GRAPH_NODE_ADMIN}:8020 + - 127.0.0.1:${GRAPH_NODE_STATUS}:8030 + - 127.0.0.1:${GRAPH_NODE_METRICS}:8040 volumes: - ./.env:/opt/.env:ro healthcheck: @@ -85,7 +85,7 @@ services: build: { context: indexer-agent } depends_on: block-oracle: { condition: service_healthy } - ports: ["${INDEXER_MANAGEMENT}:7600"] + ports: ["127.0.0.1:${INDEXER_MANAGEMENT}:7600"] stop_signal: SIGKILL volumes: - ./.env:/opt/.env:ro @@ -116,8 +116,8 @@ services: ipfs: { condition: service_healthy } tap-escrow-manager: { condition: service_started } ports: [ - "${INDEXER_SERVICE}:7601", - "${INDEXER_SERVICE_DIPS_RPC_PORT}:7602" + "127.0.0.1:${INDEXER_SERVICE}:7601", + "127.0.0.1:${INDEXER_SERVICE_DIPS_RPC_PORT}:7602" ] stop_signal: SIGKILL volumes: @@ -134,7 +134,7 @@ services: build: { context: tap-aggregator } depends_on: tap-contracts: { condition: service_completed_successfully } - ports: ["${TAP_AGGREGATOR}:7610"] + ports: ["127.0.0.1:${TAP_AGGREGATOR}:7610"] stop_signal: SIGKILL volumes: - ./.env:/opt/.env:ro @@ -154,10 +154,10 @@ services: container_name: redpanda image: docker.redpanda.com/redpandadata/redpanda:v23.3.5 ports: - - ${REDPANDA_KAFKA}:9092 - - ${REDPANDA_ADMIN}:9644 - - ${REDPANDA_PANDAPROXY}:8082 - - ${REDPANDA_SCHEMA_REGISTRY}:8081 + - 127.0.0.1:${REDPANDA_KAFKA}:9092 + - 127.0.0.1:${REDPANDA_ADMIN}:9644 + - 127.0.0.1:${REDPANDA_PANDAPROXY}:8082 + - 127.0.0.1:${REDPANDA_SCHEMA_REGISTRY}:8081 command: - redpanda start - --smp 1 @@ -190,7 +190,7 @@ services: indexer-service: { condition: service_healthy } redpanda: { condition: service_healthy } tap-escrow-manager: { condition: service_started } - ports: ["${GATEWAY}:7700"] + ports: ["127.0.0.1:${GATEWAY}:7700"] stop_signal: SIGKILL volumes: - ./.env:/opt/.env:ro @@ -211,8 +211,8 @@ services: gateway: { condition: service_healthy } postgres: { condition: service_healthy } ports: - - "${DIPPER_ADMIN_RPC_PORT}:9000" - - "${DIPPER_INDEXER_RPC_PORT}:9001" + - "127.0.0.1:${DIPPER_ADMIN_RPC_PORT}:9000" + - "127.0.0.1:${DIPPER_INDEXER_RPC_PORT}:9001" stop_signal: SIGKILL environment: RUST_LOG: info,dipper_service=trace,dipper_service::network=info diff --git a/indexer-agent/Dockerfile b/indexer-agent/Dockerfile index 864a1bc..9f76631 100644 --- a/indexer-agent/Dockerfile +++ b/indexer-agent/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:3fa8a1d49c9e9069a08d5d5eacc15526c4392ba35a7db36c43cb42444ab9bde3 +FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:0f8254487511c08b95e2230cd93052fa24e8f48e214af06a31b0988b6b3dadcf RUN apt-get update \ && apt-get install -y jq \ && rm -rf /var/lib/apt/lists/* diff --git a/indexer-agent/run.sh b/indexer-agent/run.sh index 8b86577..9a5a4c5 100755 --- a/indexer-agent/run.sh +++ b/indexer-agent/run.sh @@ -44,6 +44,11 @@ export INDEXER_AGENT_POSTGRES_USERNAME=postgres export INDEXER_AGENT_POSTGRES_PASSWORD= export INDEXER_AGENT_PUBLIC_INDEXER_URL="http://indexer-service:${INDEXER_SERVICE}" export INDEXER_AGENT_TAP_SUBGRAPH_ENDPOINT="http://graph-node:${GRAPH_NODE_GRAPHQL}/subgraphs/semiotic/tap" +export INDEXER_AGENT_ENABLE_DIPS=true +export INDEXER_AGENT_DIPS_EPOCHS_MARGIN=1 +export INDEXER_AGENT_DIPPER_ENDPOINT="http://dipper:${DIPPER_INDEXER_RPC_PORT}" +export INDEXER_AGENT_DIPS_ALLOCATION_AMOUNT=1 + mkdir -p ./config/ cat >./config/config.yaml <<-EOF networkIdentifier: "hardhat" From 4932df9fc4015e03ed515f55e52501d91be0474b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 24 Jul 2025 17:01:05 +0000 Subject: [PATCH 4/8] feat: add initial Claude setup --- CLAUDE.md | 238 +++++++++++++++++++++++++++++++++++++ dipper/source | 2 +- flows/README.md | 42 +++++++ flows/dips-testing.md | 185 ++++++++++++++++++++++++++++ indexer-agent/Dockerfile | 2 +- indexer-agent/run.sh | 1 + indexer-service/Dockerfile | 5 +- indexer-service/source | 2 +- scripts/dipper-cli.sh | 18 +++ scripts/reload-agent.sh | 8 ++ 10 files changed, 498 insertions(+), 5 deletions(-) create mode 100644 CLAUDE.md create mode 100644 flows/README.md create mode 100644 flows/dips-testing.md create mode 100755 scripts/dipper-cli.sh create mode 100644 scripts/reload-agent.sh diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..9ce56ec --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,238 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +local-network is a complete local Graph Protocol ecosystem designed for debugging Graph Protocol components (indexer stack, gateway, contracts changes, etc) and running integration tests. It orchestrates 15+ services using Docker Compose to simulate the entire Graph Protocol stack locally. + +## Architecture + +### Core Services +- **chain**: Foundry-based Ethereum node (hardhat network, chain ID 1337) with pre-deployed Graph Protocol contracts +- **ipfs**: IPFS node for storing subgraph manifests and metadata +- **postgres**: Shared PostgreSQL database for indexer components and graph-node + +### Indexer Stack +- **graph-node**: The Graph Protocol's query processing node (indexes subgraphs and serves queries) +- **indexer-agent**: Manages allocations and interacts with the network (uses CLI) +- **indexer-service**: Rust-based service handling cost models and pricing (built from source) +- **tap-agent**: Timeline Aggregation Protocol agent for micro-payments + +### Gateway & DIPs (Distributed Indexing Payments) +- **gateway**: Routes queries to appropriate indexers with payment handling +- **dipper**: Rust service managing distributed indexing payments + +### Supporting Infrastructure +- **redpanda**: Kafka-compatible message broker +- **block-oracle**: Tracks blockchain blocks +- **graph-contracts**: Deploys Graph Protocol contracts on startup +- **tap-contracts**: Deploys TAP (Timeline Aggregation Protocol) contracts +- **tap-escrow-manager**: Manages TAP escrow functionality +- **tap-aggregator**: Aggregates TAP receipts +- **subgraph-deploy**: Deploys necessary subgraphs + +## Key Commands + +### Starting the Network +```bash +docker compose down && docker compose up --build # Full restart with rebuild +docker compose up -d # Start all services +docker compose ps # Check service status +docker compose logs -f [service] # View logs +``` + +**Important**: The initial startup can take 5-10 minutes due to the complex dependency chain: +1. **Base services** start first: `chain`, `ipfs`, `postgres`, `redpanda` +2. **graph-node** waits for base services to be healthy +3. **graph-contracts** deploys Graph Protocol contracts (then exits) +4. **tap-contracts** deploys TAP contracts (then exits) +5. **block-oracle** starts after TAP contracts are deployed +6. **indexer-agent** waits for block-oracle to be healthy +7. **subgraph-deploy** waits for indexer-agent (deploys subgraphs, then exits) +8. **tap-escrow-manager** waits for subgraph deployment +9. **indexer-service** waits for indexer-agent and tap-escrow-manager +10. **gateway** waits for indexer-service to be healthy +11. **dipper** waits for gateway to be healthy + +Services like `graph-contracts`, `tap-contracts`, and `subgraph-deploy` run once and exit successfully. + +### Building from Source +Some services can be built from source using Git submodules. To enable source builds: + +1. Initialize the submodule: + ```bash + git submodule update --init --recursive --force [service]/source + ``` + +2. Set the build target to `wrapper-dev` in docker-compose.yaml: + ```yaml + build: { + target: "wrapper-dev", # Set to "wrapper" to use pre-built images + context: [service], + } + ``` + +Services that support source builds: +- **indexer-service**: Requires `indexer-service/source` submodule +- **dipper**: Requires `dipper/source` submodule (private repo - manual init required) + +### Utility Scripts +```bash +./scripts/advance-blocks.sh # Mine new blocks on the chain +./scripts/mine-block.sh # Alternative block mining script (requires foundry on host) +./scripts/reload-agent.sh # Reload indexer-agent with new allocations +``` + +### Database Access +```bash +docker compose exec postgres psql -U postgres # Access PostgreSQL +# Databases: indexer, tap_agent, gateway, graph_node_1 +``` + +## Service Details + +### Indexer Service (Rust) +- Built from source at `indexer-service/source` +- Uses indexer-service-rs v1.1.1 +- Configured via environment variables +- Serves on port 7600 + +### Dipper (DIPs) +- Built from source at `dipper/source` (private repo - requires manual submodule init) +- Manages distributed indexing payments +- Works with redpanda for message passing +- Configured for testnet-01 DIPs channel +- Admin CLI available for managing indexings (download from GitHub Actions or run from source) + +### TAP (Timeline Aggregation Protocol) +- indexer-tap-agent handles micro-payments +- Receipts stored in PostgreSQL tap_escrow_subgraph database +- Configured for 1-second receipt aggregation + +## Development Workflow + +1. Services include health checks - wait for healthy status before use +2. Chain starts with pre-deployed contracts at block 100 +3. Indexer components share a PostgreSQL database with migrations +4. Gateway requires indexers to be synced before routing queries +5. Monitor services using exposed metrics endpoints (Prometheus format) + +## Debugging Best Practices + +When troubleshooting issues in local-network: + +1. **Always check logs first** - Never take actions without understanding the problem: + ```bash + docker logs [service-name] --tail 50 + ``` + +2. **Look for specific error patterns** in logs: + - Connection errors (database, other services) + - Authentication/authorization failures + - Missing dependencies or configuration + - Panic messages or stack traces + +3. **Ask before acting** - If you find errors or unexpected behavior: + - Show the relevant logs to the user + - Explain what you found + - Ask for confirmation before taking debugging actions + +4. **Document common issues** - When you solve a problem: + - Note the symptoms + - Document the root cause + - Provide clear solution steps + - Add verification steps + +This approach prevents unnecessary service restarts and helps build a knowledge base of solutions. + +## Common Issues & Solutions + +### Services Stuck in "Created" State +When services remain in "Created" state despite dependencies being met: + +1. **Check logs first** - Never blindly restart services: + ```bash + docker logs [service-name] --tail 50 + ``` + +2. **Verify dependency health**: + ```bash + docker compose ps # Check only running services + docker compose ps -a # Check all services including exited ones + ``` + +3. **Manual service cascade** - If auto-start fails, trigger services manually: + ```bash + docker compose up -d block-oracle # Often the first to get stuck + # Wait for it to be healthy, then: + docker compose up -d indexer-agent + # Continue with other dependent services + ``` + +4. **Use single docker compose up -d** after manual fixes to start remaining services + +### TAP Escrow Account Setup Issue +When services fail with "No sender found for signer" or return 402 (Payment Required): + +**Symptoms**: +- indexer-service logs: `There was an error while accessing escrow account: No sender found for signer 0xf39...` +- dipper crashes with: `GraphQL request failed: bad indexers: {0xf4e...2266: BadResponse(402)}` +- Gateway can query indexers but DIPs payment flow fails + +**Root Cause**: TAP escrow accounts aren't automatically created on first run + +**Solution**: Restart tap-escrow-manager to trigger escrow account creation: +```bash +docker compose restart tap-escrow-manager +# Wait for escrow setup (check logs for "sender=... authorized=true") +docker compose restart indexer-service +docker compose restart dipper # If it was crashing +``` + +**Verification**: tap-escrow-manager logs should show: +``` +sender=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 +signer=0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 authorized=true +allowance=100.00000000000001 +``` + +### Other Common Issues +- **Database migrations**: indexer-agent runs migrations on startup +- **Block advancement**: Use advance-blocks.sh if chain appears stuck (epochs are 554 blocks long) +- **Gateway errors**: Ensure indexers are properly allocated and synced +- **DIPs issues**: Check redpanda connectivity and dipper logs +- **IPFS hex digests**: Valid CID is hex digits prefixed by `f1220` (e.g., `0xd6b...` → `f1220d6b...`) + +## Testing Flows + +Detailed step-by-step guides for specific workflows are available in the `flows/` directory: +- [DIPs Testing](flows/dips-testing.md) - Test distributed indexing payments +- Additional flows coming soon (indexer setup, subgraph deployment, gateway testing) + +## Working with Git Submodules + +**Important**: This repository contains Git submodules (indexer-service/source, dipper/source). Before committing: + +1. **Check your current directory**: + ```bash + pwd # Verify you're in the intended repository + git status # Check which repository you're about to commit to + ``` + +2. **Common scenarios**: + - Main repo changes (CLAUDE.md, docker-compose.yaml, scripts/): Commit from repo root + - Indexer-service changes: Commit from `indexer-service/source/` + - Dipper changes: Commit from `dipper/source/` + +3. **Verify before pushing**: + - Always check `git status` to ensure you're committing to the correct repository + - Submodule commits need to be pushed separately from main repo commits + +## Environment Configuration + +Key environment variables are set in docker-compose.yml. Notable patterns: +- ETHEREUM_NETWORK="hardhat" for local development +- Chain RPC: http://chain:8545 +- IPFS: http://ipfs:5001 +- Most services expose metrics on port 7300 \ No newline at end of file diff --git a/dipper/source b/dipper/source index e8b8845..d1a7cf9 160000 --- a/dipper/source +++ b/dipper/source @@ -1 +1 @@ -Subproject commit e8b8845408a18662e12116bf24a93979a964113c +Subproject commit d1a7cf995a807fe8a48e1ef9897f74b21979129e diff --git a/flows/README.md b/flows/README.md new file mode 100644 index 0000000..4082777 --- /dev/null +++ b/flows/README.md @@ -0,0 +1,42 @@ +# Testing Flows + +This directory contains step-by-step guides for testing specific features and workflows in the local-network environment. + +## Available Flows + +### [DIPs Testing](./dips-testing.md) +Test the Distributed Indexing Payments (DIPs) system including: +- Setting up dipper credentials +- Registering indexing requests +- Monitoring payment flows +- Verifying receipt aggregation + +### [Indexer Setup](./indexer-setup.md) *(coming soon)* +Complete workflow for setting up a new indexer including: +- Indexer registration +- Allocation management +- Cost model configuration +- Health monitoring + +### [Subgraph Deployment](./subgraph-deploy.md) *(coming soon)* +Deploy and test subgraphs including: +- IPFS upload verification +- Allocation creation +- Query testing +- Indexing status monitoring + +### [Gateway Testing](./gateway-testing.md) *(coming soon)* +Test gateway query routing including: +- Query submission with payment +- Receipt verification +- Load balancing behavior +- Error handling + +## Creating New Flow Documentation + +When documenting a new flow, include: +1. Prerequisites (services that must be running, initial state) +2. Step-by-step commands with expected outputs +3. Verification steps +4. Common issues and troubleshooting +5. Cleanup procedures \ No newline at end of file diff --git a/flows/dips-testing.md b/flows/dips-testing.md new file mode 100644 index 0000000..601a07a --- /dev/null +++ b/flows/dips-testing.md @@ -0,0 +1,185 @@ +# DIPs Testing Flow + +This guide walks through testing the Distributed Indexing Payments (DIPs) system in the local-network environment. + +## Prerequisites + +1. All services running and healthy: + ```bash + docker compose ps + ``` + +2. Dipper service must be built from source (private repo): + ```bash + git submodule update --init --recursive --force dipper/source + # Ensure docker-compose.yaml has target: "wrapper-dev" for dipper + docker compose up -d --build dipper + ``` + +3. Source environment variables: + ```bash + source .env + ``` + +## Setup Dipper CLI + +You have two options for running the dipper CLI: + +### Option 1: Use the Wrapper Script (Recommended) +```bash +# From repo root - automatically handles environment variables +./scripts/dipper-cli.sh [command] +``` + +### Option 2: Run from Source +```bash +cd dipper/source +# All commands will be run from this directory using cargo +# Note: You'll need to set environment variables manually (see below) +``` + +## Configure Authentication + +**Important**: The dipper-cli requires environment variables to be set for EVERY command. You have two options: + +### Option 1: Export Once per Session +```bash +# Set up DIPs CLI authentication (valid for current shell session) +source ../../.env # Load environment from repo root +export DIPS_SIGNING_KEY="${RECEIVER_SECRET}" +export DIPS_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" +``` + +### Option 2: Include with Each Command +```bash +# Source .env and set variables inline with each command +source ../../.env && export DIPS_SIGNING_KEY="${RECEIVER_SECRET}" && export DIPS_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" && cargo run --bin dipper-cli -- [command] +``` + +**Note**: The CLI will fail with `missing field 'server_url'` if these environment variables are not set. + +## Testing Flow + +### 1. Register an Indexing Request + +```bash +# Using wrapper script (from repo root): +./scripts/dipper-cli.sh requests register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" 1337 + +# OR using cargo directly (from dipper/source): +cargo run --bin dipper-cli -- requests register "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" 1337 + +# Expected output: +# Creating indexing request for deployment ID: DeploymentId(QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP) +# Created indexing request with ID: 01983d54-a2a0-7933-a4f5-bb96d7f4dd52 +``` + +### 2. Verify Registration + +```bash +# Using wrapper script (from repo root): +./scripts/dipper-cli.sh requests list + +# OR using cargo directly (from dipper/source): +cargo run --bin dipper-cli -- requests list + +# Expected output: JSON array showing your indexing request with status "OPEN" +# Example: +# [{ +# "id": "01983d54-a2a0-7933-a4f5-bb96d7f4dd52", +# "status": "OPEN", +# "requested_by": "0xf4ef6650e48d099a4972ea5b414dab86e1998bd3", +# "deployment_id": "QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" +# }] +``` + +### 3. Check Dipper Logs + +Monitor dipper service logs for payment processing: +```bash +# Watch for indexing registration and payment activity +docker compose logs -f dipper + +# Or filter for specific events: +docker compose logs -f dipper | grep -E "(payment|receipt|indexing|registered)" + +# Expected log patterns: +# - "Indexing request registered" +# - "Processing payment" +# - "Receipt validated" +``` + +### 4. Verify Database State + +Check PostgreSQL for DIPs-related data: +```bash +docker compose exec postgres psql -U postgres -d dipper -c "SELECT * FROM indexing_requests;" +``` + +### 5. Test Query with Payment + +Send a query through the gateway (which should trigger DIPs): +```bash +curl "http://localhost:7700/api/subgraphs/id/QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" \ + -H 'content-type: application/json' \ + -H "Authorization: Bearer deadbeefdeadbeefdeadbeefdeadbeef" \ + -d '{"query": "{ _meta { block { number } } }"}' +``` + +### 6. Cancel an Indexing Request + +```bash +# Get the UUID from the list command +cargo run --bin dipper-cli -- requests cancel + +# Example: +cargo run --bin dipper-cli -- requests cancel 01983d54-a2a0-7933-a4f5-bb96d7f4dd52 +``` + +## Verification Steps + +1. **Dipper Health**: Check endpoint at http://localhost:9000/ +2. **Indexer RPC**: Verify indexer endpoint at http://localhost:9001/ +3. **Receipt Aggregation**: TAP receipts should aggregate every second +4. **Payment Flow**: Gateway → Dipper → Indexer Service + +## Common Issues + +### Dipper Not Starting +- Check if submodule is initialized: `ls dipper/source/` +- Verify docker-compose.yaml has `target: "wrapper-dev"` +- Check logs: `docker compose logs dipper` +- Ensure Postgres is healthy and migrations completed + +### Authentication Errors +- Verify `DIPS_SIGNING_KEY` is set correctly +- Ensure `RECEIVER_SECRET` is available in .env +- Check `DIPS_SERVER_URL` includes the port +- Try: `echo $DIPS_SIGNING_KEY` to verify it's set + +### CLI Connection Issues +- Ensure dipper service is healthy: `docker compose ps dipper` +- Check admin RPC is accessible: `curl http://localhost:9000/` +- Verify port mapping in docker-compose.yaml + +### Environment Variable Issues +- **"missing field 'server_url'"**: Environment variables not set +- Remember: Variables must be set for EVERY dipper-cli command +- If switching terminals/sessions, re-export the variables +- Alternative: Create a shell script that sets variables and runs commands + +### No Payment Activity +- Ensure gateway is healthy and can route queries +- Verify indexer-service has the DIPs RPC port exposed (7602) +- Check that an allocation exists for the subgraph +- Look for errors in indexer-service logs: `docker compose logs indexer-service` + +## Cleanup + +```bash +# Stop watching logs/consumers +# Ctrl+C to exit + +# Optionally restart services +docker compose restart dipper gateway +``` \ No newline at end of file diff --git a/indexer-agent/Dockerfile b/indexer-agent/Dockerfile index 9f76631..fc7c407 100644 --- a/indexer-agent/Dockerfile +++ b/indexer-agent/Dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:0f8254487511c08b95e2230cd93052fa24e8f48e214af06a31b0988b6b3dadcf +FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:3c59d1bb151bb25261adb6a93dc22433b9ccec6039802532151277728c892da8 RUN apt-get update \ && apt-get install -y jq \ && rm -rf /var/lib/apt/lists/* diff --git a/indexer-agent/run.sh b/indexer-agent/run.sh index 9a5a4c5..ecb6947 100755 --- a/indexer-agent/run.sh +++ b/indexer-agent/run.sh @@ -48,6 +48,7 @@ export INDEXER_AGENT_ENABLE_DIPS=true export INDEXER_AGENT_DIPS_EPOCHS_MARGIN=1 export INDEXER_AGENT_DIPPER_ENDPOINT="http://dipper:${DIPPER_INDEXER_RPC_PORT}" export INDEXER_AGENT_DIPS_ALLOCATION_AMOUNT=1 +export INDEXER_AGENT_LOG_LEVEL=trace mkdir -p ./config/ cat >./config/config.yaml <<-EOF diff --git a/indexer-service/Dockerfile b/indexer-service/Dockerfile index e895890..5bc75a2 100644 --- a/indexer-service/Dockerfile +++ b/indexer-service/Dockerfile @@ -8,6 +8,7 @@ RUN apt-get update \ pkg-config \ protobuf-compiler \ libssl-dev \ + libsasl2-dev \ && rm -rf /var/lib/apt/lists/* WORKDIR /opt @@ -28,7 +29,7 @@ RUN --mount=type=cache,sharing=locked,id=cargo-registry,target=/usr/local/cargo/ FROM debian:bookworm-slim AS wrapper-dev RUN apt-get update \ - && apt-get install -y curl jq \ + && apt-get install -y curl jq openssl ca-certificates protobuf-compiler libsasl2-2 \ && rm -rf /var/lib/apt/lists/* # Copy the built binary from the rust builder image @@ -47,4 +48,4 @@ RUN apt-get update \ COPY ./run.sh /opt/run.sh -ENTRYPOINT bash -cl /opt/run.sh \ No newline at end of file +ENTRYPOINT bash -cl /opt/run.sh diff --git a/indexer-service/source b/indexer-service/source index 322a709..ec8f5bb 160000 --- a/indexer-service/source +++ b/indexer-service/source @@ -1 +1 @@ -Subproject commit 322a70965d7b6843073c0adf24b476f039538fc2 +Subproject commit ec8f5bb954d175aea906998be4e83f0d57e6bb48 diff --git a/scripts/dipper-cli.sh b/scripts/dipper-cli.sh new file mode 100755 index 0000000..310fe71 --- /dev/null +++ b/scripts/dipper-cli.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Wrapper script for dipper-cli that automatically sets required environment variables + +# Get the directory where this script is located +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" + +# Source the .env file from repo root +source "$SCRIPT_DIR/../.env" + +# Set required environment variables +export DIPS_SIGNING_KEY="${RECEIVER_SECRET}" +export DIPS_SERVER_URL="http://localhost:${DIPPER_ADMIN_RPC_PORT}/" + +# Change to dipper source directory +cd "$SCRIPT_DIR/../dipper/source" + +# Run dipper-cli with all passed arguments +cargo run --bin dipper-cli -- "$@" \ No newline at end of file diff --git a/scripts/reload-agent.sh b/scripts/reload-agent.sh new file mode 100644 index 0000000..8a4a794 --- /dev/null +++ b/scripts/reload-agent.sh @@ -0,0 +1,8 @@ +#!/bin/bash + + +docker compose down indexer-agent +docker compose build indexer-agent +docker compose create indexer-agent +docker compose start indexer-agent + From 134fc030825cba7d3d0856a7a41cb3bbebee322b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 24 Jul 2025 17:08:45 +0000 Subject: [PATCH 5/8] fix: details in dips flow --- flows/dips-testing.md | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/flows/dips-testing.md b/flows/dips-testing.md index 601a07a..f5308e2 100644 --- a/flows/dips-testing.md +++ b/flows/dips-testing.md @@ -2,6 +2,8 @@ This guide walks through testing the Distributed Indexing Payments (DIPs) system in the local-network environment. +**What is DIPs?** DIPs (Distributed Indexing Payments) is a system for paying indexers to index specific subgraphs. Unlike query fees, DIPs payments incentivize indexers to allocate resources to index subgraphs that may not yet have query traffic. + ## Prerequisites 1. All services running and healthy: @@ -116,16 +118,29 @@ Check PostgreSQL for DIPs-related data: docker compose exec postgres psql -U postgres -d dipper -c "SELECT * FROM indexing_requests;" ``` -### 5. Test Query with Payment +### 5. Verify Indexer Allocation + +DIPs is about paying for indexing, not queries. To verify the agreement is working, check if the indexer has allocated to the subgraph: -Send a query through the gateway (which should trigger DIPs): ```bash -curl "http://localhost:7700/api/subgraphs/id/QmNngXzFajkQHRj3ZjAJAF7jc2AibTQKB4dwftjiKXC9RP" \ +# Query the network subgraph to check allocations +curl -s http://localhost:8000/subgraphs/name/graph-network -X POST \ -H 'content-type: application/json' \ - -H "Authorization: Bearer deadbeefdeadbeefdeadbeefdeadbeef" \ - -d '{"query": "{ _meta { block { number } } }"}' + -d '{ + "query": "{ indexer(id: \"0xf4ef6650e48d099a4972ea5b414dab86e1998bd3\") { allocations { id subgraphDeployment { ipfsHash } status } } }" + }' | jq . + +# Look for an allocation with your deployment ID +# Note: This can take several minutes as the indexer-agent processes the agreement ``` +**Important**: Check indexer-agent logs while waiting: +```bash +docker logs indexer-agent --tail 50 -f | grep -E "(allocation|QmNng|agreement)" +``` + +**Timing**: The indexer-agent runs on a cycle and may take 5-10 minutes to create the allocation after the DIPs agreement is established. + ### 6. Cancel an Indexing Request ```bash @@ -138,10 +153,11 @@ cargo run --bin dipper-cli -- requests cancel 01983d54-a2a0-7933-a4f5-bb96d7f4dd ## Verification Steps -1. **Dipper Health**: Check endpoint at http://localhost:9000/ -2. **Indexer RPC**: Verify indexer endpoint at http://localhost:9001/ -3. **Receipt Aggregation**: TAP receipts should aggregate every second -4. **Payment Flow**: Gateway → Dipper → Indexer Service +1. **Dipper Health**: Check endpoint returns 405 (expected for root path): `curl http://localhost:9000/` +2. **Agreement Created**: Look for "Agreement proposal accepted" in dipper logs +3. **Indexer Allocation**: Query network subgraph for active allocations +4. **Indexer Agent Activity**: Monitor logs for allocation creation +5. **DIPs Flow**: Admin → Dipper → Indexer Service (port 7602) → Indexer Agent ## Common Issues From 67a88ffe9c7818308c4df5566fcd3f7fcc4328c5 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 28 Jul 2025 20:04:10 +0000 Subject: [PATCH 6/8] feat: enable indexer-agent source builds and update documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Enable wrapper-dev mode in docker-compose.yaml for building indexer-agent from source - Add multi-stage Dockerfile supporting both pre-built and source builds - Add indexer-agent source as git submodule - Update CLAUDE.md with debugging best practices and common issues - Update README.md with clearer project documentation 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- .gitmodules | 3 ++ CLAUDE.md | 10 ++++++ README.md | 18 +++++++++++ docker-compose.yaml | 5 ++- indexer-agent/Dockerfile | 68 +++++++++++++++++++++++++++++++++++++++- indexer-agent/source | 1 + 6 files changed, 103 insertions(+), 2 deletions(-) create mode 160000 indexer-agent/source diff --git a/.gitmodules b/.gitmodules index 7ad58e8..5501a4b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,3 +4,6 @@ [submodule "indexer-service/source"] path = indexer-service/source url = git@github.com:graphprotocol/indexer-rs.git +[submodule "indexer-agent/source"] + path = indexer-agent/source + url = git@github.com:graphprotocol/indexer.git diff --git a/CLAUDE.md b/CLAUDE.md index 9ce56ec..198da8e 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -42,6 +42,8 @@ docker compose ps # Check service status docker compose logs -f [service] # View logs ``` +**Important for Claude Code**: When building Docker images or running `docker compose up`, always use longer timeouts (5-10 minutes) as these operations can take considerable time, especially when building from source or starting the entire network. + **Important**: The initial startup can take 5-10 minutes due to the complex dependency chain: 1. **Base services** start first: `chain`, `ipfs`, `postgres`, `redpanda` 2. **graph-node** waits for base services to be healthy @@ -74,6 +76,7 @@ Some services can be built from source using Git submodules. To enable source bu ``` Services that support source builds: +- **indexer-agent**: Requires `indexer-agent/source` submodule (Node.js/TypeScript monorepo) - **indexer-service**: Requires `indexer-service/source` submodule - **dipper**: Requires `dipper/source` submodule (private repo - manual init required) @@ -92,6 +95,13 @@ docker compose exec postgres psql -U postgres # Access PostgreSQL ## Service Details +### Indexer Agent (Node.js/TypeScript) +- Built from source at `indexer-agent/source` (graphprotocol/indexer monorepo) +- Manages allocations and interactions with the network +- Runs database migrations on startup +- Serves management API on port 7600 +- Health check endpoint: http://localhost:7600/health + ### Indexer Service (Rust) - Built from source at `indexer-service/source` - Uses indexer-service-rs v1.1.1 diff --git a/README.md b/README.md index 511d2cd..249cec6 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,24 @@ Epochs are set up to be 554 blocks long, use `scripts/mine-block.sh` to advance - `graph indexer connect http://localhost:7600` - `graph indexer --network=hardhat status` +### Building from source + +Building from source requires the Git submodules to be initialized. + +- `git submodule update --init --recursive --force indexer-agent/source` + +And then select the `wrapper-dev` target when building the Docker image in the `docker-compose.yaml` file. + +```diff + indexer-agent: + container_name: indexer-agent + build: { +- target: "wrapper", # Set to "wrapper-dev" for building from source ++ target: "wrapper-dev", # Set to "wrapper-dev" for building from source + context: indexer-agent, + } +``` + ## indexer-service - `docker compose up --build indexer-service` diff --git a/docker-compose.yaml b/docker-compose.yaml index fdb7274..5a3e301 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -82,7 +82,10 @@ services: indexer-agent: container_name: indexer-agent - build: { context: indexer-agent } + build: { + target: "wrapper-dev", # Set to "wrapper-dev" for building from source + context: indexer-agent, + } depends_on: block-oracle: { condition: service_healthy } ports: ["127.0.0.1:${INDEXER_MANAGEMENT}:7600"] diff --git a/indexer-agent/Dockerfile b/indexer-agent/Dockerfile index fc7c407..207e0f4 100644 --- a/indexer-agent/Dockerfile +++ b/indexer-agent/Dockerfile @@ -1,4 +1,70 @@ -FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:3c59d1bb151bb25261adb6a93dc22433b9ccec6039802532151277728c892da8 +######################################################################## +# Build stage - builds from source +FROM node:20.11-bookworm-slim AS builder + +ENV NODE_ENV production + +RUN apt-get update && apt-get install -y python3 build-essential git curl + +WORKDIR /opt/indexer + +# Copy root files from source submodule +COPY source/package.json . +COPY source/yarn.lock . +COPY source/tsconfig.json . +COPY source/lerna.json . + +# Copy shared and package files +COPY source/packages/indexer-common/ ./packages/indexer-common +COPY source/packages/indexer-agent/ ./packages/indexer-agent + +# Install dependencies; include dev dependencies for building +RUN yarn --frozen-lockfile --non-interactive --production=false + +######################################################################## +# Wrapper development image - builds from source +FROM node:20.11-bookworm-slim AS wrapper-dev + +ENV NODE_ENV production +# Increase memory for large transactions +ENV NODE_OPTIONS="--max-old-space-size=4096" + +RUN apt-get update && apt-get install -y python3 build-essential git curl jq \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /opt/indexer + +# Copy root files +COPY source/package.json . +COPY source/yarn.lock . +COPY source/tsconfig.json . +COPY source/lerna.json . + +# Copy build output from builder +COPY --from=builder /opt/indexer/packages/indexer-common/package.json /opt/indexer/packages/indexer-common/package.json +COPY --from=builder /opt/indexer/packages/indexer-common/dist /opt/indexer/packages/indexer-common/dist +COPY --from=builder /opt/indexer/packages/indexer-agent/package.json /opt/indexer/packages/indexer-agent/package.json +COPY --from=builder /opt/indexer/packages/indexer-agent/dist /opt/indexer/packages/indexer-agent/dist + +# Install production dependencies only +RUN yarn --frozen-lockfile --non-interactive + +# Install extra development tools +RUN npm install -g tsx nodemon prettier eslint + +# Install Foundry +COPY --from=ghcr.io/foundry-rs/foundry:v1.0.0 \ + /usr/local/bin/forge /usr/local/bin/cast /usr/local/bin/anvil /usr/local/bin/chisel /usr/local/bin/ + +# Copy the custom run script +COPY ./run.sh /opt/run.sh + +WORKDIR /opt/indexer/packages/indexer-agent +ENTRYPOINT bash -cl /opt/run.sh + +######################################################################## +# Wrapper image - uses pre-built image +FROM ghcr.io/graphprotocol/indexer-agent:pr-1113@sha256:3c59d1bb151bb25261adb6a93dc22433b9ccec6039802532151277728c892da8 AS wrapper RUN apt-get update \ && apt-get install -y jq \ && rm -rf /var/lib/apt/lists/* diff --git a/indexer-agent/source b/indexer-agent/source new file mode 160000 index 0000000..df76679 --- /dev/null +++ b/indexer-agent/source @@ -0,0 +1 @@ +Subproject commit df766797f2f7915e77a508dd45391ea0137ab90f From 5d844a3cb82312c568c7006d4a15f77d9192fe9a Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 28 Jul 2025 20:07:16 +0000 Subject: [PATCH 7/8] feat: add indexer-agent testing tools and documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add test-indexer-agent.sh script for running indexer tests - Add indexer-agent-testing.md flow documentation with troubleshooting guide - These tools help debug and test indexer-agent functionality locally 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude --- flows/indexer-agent-testing.md | 180 +++++++++++++++++++++++++++++++++ scripts/test-indexer-agent.sh | 143 ++++++++++++++++++++++++++ 2 files changed, 323 insertions(+) create mode 100644 flows/indexer-agent-testing.md create mode 100755 scripts/test-indexer-agent.sh diff --git a/flows/indexer-agent-testing.md b/flows/indexer-agent-testing.md new file mode 100644 index 0000000..f8faf05 --- /dev/null +++ b/flows/indexer-agent-testing.md @@ -0,0 +1,180 @@ +# Indexer Agent Testing Flow + +This guide explains how to run tests for the indexer-agent when developing from source. + +## Prerequisites + +- Docker installed and running +- Node.js 20 or 22 installed +- Indexer-agent source initialized: `git submodule update --init --recursive indexer-agent/source` + +## Quick Start + +From the local-network root directory: + +```bash +# Run all tests +./scripts/test-indexer-agent.sh + +# Run specific test command +./scripts/test-indexer-agent.sh test:ci +./scripts/test-indexer-agent.sh test # More verbose output +``` + +**⚠️ Important**: +- Tests can take 10-15 minutes or more to complete, especially on first run when dependencies are being installed. The test suite runs tests for multiple packages (indexer-common, indexer-agent, indexer-cli) sequentially. +- **The test script may exit with a non-zero status code even when it runs successfully** - this just means some tests failed. Always check the output or log file to see the actual test results and failure details. + +## How It Works + +The test script automatically: + +1. **Starts a fresh PostgreSQL container** on port 5433 (to avoid conflicts with local-network's PostgreSQL on 5432) +2. **Sets up test environment variables** required by the test suite +3. **Installs dependencies** if not already present +4. **Runs the tests** using the test command specified +5. **Cleans up** the PostgreSQL container on exit + +## Test Environment + +The script sets up the following test database: +- Host: `localhost` +- Port: `5433` +- Database: `indexer_tests` +- User: `testuser` +- Password: `testpass` + +## Running Specific Tests + +You can pass any yarn test command to the script: + +```bash +# Run tests for a specific file +./scripts/test-indexer-agent.sh test src/__tests__/agent.ts + +# Run tests with coverage +./scripts/test-indexer-agent.sh test --coverage + +# Run tests in watch mode +./scripts/test-indexer-agent.sh test --watch +``` + +### Running Tests for Specific Packages + +Since the monorepo contains multiple packages, you can run tests for specific packages to save time: + +```bash +# Run tests only for indexer-agent package +cd indexer-agent/source/packages/indexer-agent +export POSTGRES_TEST_HOST=localhost +export POSTGRES_TEST_PORT=5433 +export POSTGRES_TEST_DATABASE=indexer_tests +export POSTGRES_TEST_USERNAME=testuser +export POSTGRES_TEST_PASSWORD=testpass +yarn test + +# Note: You'll need to set up PostgreSQL container manually for this approach +``` + +## Important Learnings + +### Directory Navigation +- **Always check your current directory** before running commands with `pwd` +- The test script changes directories to `indexer-agent/source` during execution +- Test output files are created in the directory where you run the script +- After debugging, you might be in `indexer-agent/source` instead of the local-network root +- Use absolute paths when in doubt: `/home/pablo/repos/local-network/scripts/test-indexer-agent.sh` + +### Understanding Test Output +- The test script exits with non-zero status if any tests fail - this is normal +- Always check the actual test output to understand what happened +- Tests run for multiple packages sequentially: + 1. `@graphprotocol/indexer-common` (runs first) + 2. `@graphprotocol/indexer-agent` (only runs if indexer-common passes) + 3. `@graphprotocol/indexer-cli` (only runs if previous packages pass) +- If indexer-common fails, the other packages won't run at all + +### Making Code Changes +- After modifying TypeScript files, you must compile before running tests: + ```bash + cd indexer-agent/source + yarn compile + ``` +- Test error line numbers may not match exactly due to transpilation +- Debug console.log statements work and will appear in test output + +### Environment Variables +- `INDEXER_TEST_JRPC_PROVIDER_URL` - Ethereum RPC endpoint (defaults to public Arbitrum Sepolia) +- `INDEXER_TEST_API_KEY` - API key for The Graph's subgraph endpoints (may be required) + +## Troubleshooting + +### Tests fail with connection errors +- Ensure Docker is running +- Check if port 5433 is available: `lsof -i :5433` +- Try running with more verbose output: `./scripts/test-indexer-agent.sh test` + +### Dependencies not found +- The script should auto-install dependencies, but you can manually run: + ```bash + cd indexer-agent/source + yarn install --frozen-lockfile + ``` + +### PostgreSQL container issues +- The script automatically cleans up containers, but you can manually remove: + ```bash + docker stop indexer-tests-postgres + docker rm indexer-tests-postgres + ``` + +### Cleaning Test Output +- Remove ANSI escape codes from test output for easier reading: + ```bash + cat test-output.log | sed 's/\x1b\[[0-9;]*m//g' > test-output-clean.log + ``` + +## CI Integration + +The tests run in CI using GitHub Actions with: +- PostgreSQL service container +- Matrix testing for Node.js 20 and 22 +- Environment secrets for integration tests (optional) + +### Timeout Considerations + +When running tests in automation or CI: +- Set appropriate timeouts (15-20 minutes minimum) +- First runs take longer due to dependency installation +- The test suite runs multiple packages sequentially (indexer-common → indexer-agent → indexer-cli) + +## Manual Testing (Advanced) + +If you need more control, you can run the PostgreSQL container manually: + +```bash +# Start PostgreSQL +docker run -d \ + --name indexer-tests-postgres \ + -e POSTGRES_DB=indexer_tests \ + -e POSTGRES_USER=testuser \ + -e POSTGRES_PASSWORD=testpass \ + -p 5433:5432 \ + postgres:13 + +# Set environment +export POSTGRES_TEST_HOST=localhost +export POSTGRES_TEST_PORT=5433 +export POSTGRES_TEST_DATABASE=indexer_tests +export POSTGRES_TEST_USERNAME=testuser +export POSTGRES_TEST_PASSWORD=testpass +export NODE_OPTIONS="--dns-result-order=ipv4first" + +# Run tests +cd indexer-agent/source +yarn test + +# Clean up +docker stop indexer-tests-postgres +docker rm indexer-tests-postgres +``` \ No newline at end of file diff --git a/scripts/test-indexer-agent.sh b/scripts/test-indexer-agent.sh new file mode 100755 index 0000000..97cea26 --- /dev/null +++ b/scripts/test-indexer-agent.sh @@ -0,0 +1,143 @@ +#!/bin/bash +set -e + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration +CONTAINER_NAME="indexer-tests-postgres" +POSTGRES_PORT=5433 +POSTGRES_DB="indexer_tests" +POSTGRES_USER="testuser" +POSTGRES_PASSWORD="testpass" + +echo -e "${GREEN}Starting indexer-agent test runner...${NC}" + +# Function to cleanup on exit +cleanup() { + echo -e "\n${YELLOW}Cleaning up...${NC}" + docker stop $CONTAINER_NAME 2>/dev/null || true + docker rm $CONTAINER_NAME 2>/dev/null || true + echo -e "${GREEN}Cleanup complete${NC}" +} + +# Set trap to cleanup on exit +trap cleanup EXIT + +# Check if we're in the correct directory +if [ ! -f "docker-compose.yaml" ]; then + echo -e "${RED}Error: Must run this script from the local-network root directory${NC}" + exit 1 +fi + +# Check if indexer-agent source is initialized +if [ ! -d "indexer-agent/source/packages" ]; then + echo -e "${RED}Error: indexer-agent source not found. Run: git submodule update --init --recursive indexer-agent/source${NC}" + exit 1 +fi + +# Remove any existing test container +echo -e "${YELLOW}Removing any existing test containers...${NC}" +docker stop $CONTAINER_NAME 2>/dev/null || true +docker rm $CONTAINER_NAME 2>/dev/null || true + +# Start PostgreSQL container +echo -e "${YELLOW}Starting PostgreSQL container for tests...${NC}" +docker run -d \ + --name $CONTAINER_NAME \ + -e POSTGRES_DB=$POSTGRES_DB \ + -e POSTGRES_USER=$POSTGRES_USER \ + -e POSTGRES_PASSWORD=$POSTGRES_PASSWORD \ + -p $POSTGRES_PORT:5432 \ + postgres:13 + +# Wait for PostgreSQL to be ready +echo -e "${YELLOW}Waiting for PostgreSQL to be ready...${NC}" +for i in {1..30}; do + if docker exec $CONTAINER_NAME pg_isready -U $POSTGRES_USER >/dev/null 2>&1; then + echo -e "${GREEN}PostgreSQL is ready!${NC}" + break + fi + if [ $i -eq 30 ]; then + echo -e "${RED}PostgreSQL failed to start in time${NC}" + exit 1 + fi + echo -n "." + sleep 1 +done + +# Set environment variables +export POSTGRES_TEST_HOST=localhost +export POSTGRES_TEST_PORT=$POSTGRES_PORT +export POSTGRES_TEST_DATABASE=$POSTGRES_DB +export POSTGRES_TEST_USERNAME=$POSTGRES_USER +export POSTGRES_TEST_PASSWORD=$POSTGRES_PASSWORD +export NODE_OPTIONS="--dns-result-order=ipv4first" + +# RPC Provider configuration +# Option 1: Use environment variable if already set +# Option 2: Use local chain if docker-compose is running +# Option 3: Use public Arbitrum Sepolia RPC as fallback +if [ -z "$INDEXER_TEST_JRPC_PROVIDER_URL" ]; then + # Check if local chain is running + if docker compose ps chain 2>/dev/null | grep -q "running\|Up"; then + echo -e "${YELLOW}Using local chain for tests${NC}" + export INDEXER_TEST_JRPC_PROVIDER_URL="http://localhost:8545" + else + echo -e "${YELLOW}Using public Arbitrum Sepolia RPC for tests${NC}" + export INDEXER_TEST_JRPC_PROVIDER_URL="https://sepolia-rollup.arbitrum.io/rpc" + fi +else + echo -e "${YELLOW}Using custom RPC provider: $INDEXER_TEST_JRPC_PROVIDER_URL${NC}" +fi + +# API Key for The Graph subgraph endpoints (optional - tests will use public endpoints if not set) +export INDEXER_TEST_API_KEY="${INDEXER_TEST_API_KEY:-}" + +# Navigate to indexer source +cd indexer-agent/source + +# Install dependencies if needed +if [ ! -d "node_modules" ]; then + echo -e "${YELLOW}Installing dependencies...${NC}" + yarn install --frozen-lockfile +fi + +# Run tests +echo -e "${GREEN}Running indexer-agent tests...${NC}" +echo -e "${YELLOW}Test environment:${NC}" +echo " PostgreSQL: $POSTGRES_TEST_HOST:$POSTGRES_TEST_PORT" +echo " Database: $POSTGRES_TEST_DATABASE" +echo " User: $POSTGRES_TEST_USERNAME" +echo " RPC Provider: $INDEXER_TEST_JRPC_PROVIDER_URL" +if [ -n "$INDEXER_TEST_API_KEY" ]; then + echo " Graph API Key: [configured]" +else + echo " Graph API Key: [not set - using public endpoints]" +fi +echo "" + +# Allow passing custom test commands +if [ $# -eq 0 ]; then + # Default: run all tests + yarn test:ci +else + # Run custom test command + yarn "$@" +fi + +TEST_EXIT_CODE=$? + +# Return to original directory +cd ../.. + +if [ $TEST_EXIT_CODE -eq 0 ]; then + echo -e "\n${GREEN}✓ Tests completed successfully!${NC}" +else + echo -e "\n${RED}✗ Tests failed with exit code $TEST_EXIT_CODE${NC}" +fi + +exit $TEST_EXIT_CODE \ No newline at end of file From b71c5d1ec10e043f1e96bcf31c6d6bd121d641ab Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 8 Aug 2025 15:56:40 +0000 Subject: [PATCH 8/8] feat: progress on dips new payments approach --- dipper/source | 2 +- docs/dips-safe-payments/README.md | 44 +++ docs/dips-safe-payments/architecture.md | 272 ++++++++++++++++ docs/dips-safe-payments/dipper-plan.md | 308 ++++++++++++++++++ docs/dips-safe-payments/indexer-agent-plan.md | 305 +++++++++++++++++ .../indexer-service-plan.md | 250 ++++++++++++++ indexer-agent/source | 2 +- indexer-service/source | 2 +- 8 files changed, 1182 insertions(+), 3 deletions(-) create mode 100644 docs/dips-safe-payments/README.md create mode 100644 docs/dips-safe-payments/architecture.md create mode 100644 docs/dips-safe-payments/dipper-plan.md create mode 100644 docs/dips-safe-payments/indexer-agent-plan.md create mode 100644 docs/dips-safe-payments/indexer-service-plan.md diff --git a/dipper/source b/dipper/source index d1a7cf9..fe3eb23 160000 --- a/dipper/source +++ b/dipper/source @@ -1 +1 @@ -Subproject commit d1a7cf995a807fe8a48e1ef9897f74b21979129e +Subproject commit fe3eb2360daaf3b7e5ff204d86df726ec3c916a0 diff --git a/docs/dips-safe-payments/README.md b/docs/dips-safe-payments/README.md new file mode 100644 index 0000000..c7c022c --- /dev/null +++ b/docs/dips-safe-payments/README.md @@ -0,0 +1,44 @@ +# DIPs Safe Payments Documentation + +This folder contains all documentation for the DIPs (Distributed Indexing Payments) Safe-based payment system implementation. + +## Overview + +The DIPs Safe payment system replaces TAP (Timeline Aggregation Protocol) for indexing fee payments, using on-chain GRT transfers via Safe Module pattern with asynchronous processing. + +## Documentation Structure + +### Architecture +- [`architecture.md`](./architecture.md) - System architecture, design principles, and component interactions + +### Implementation Plans +- [`indexer-agent-plan.md`](./indexer-agent-plan.md) - Changes needed for the Indexer Agent to handle Receipt IDs and polling +- [`dipper-plan.md`](./dipper-plan.md) - Core payment processing implementation in the Dipper service +- [`indexer-service-plan.md`](./indexer-service-plan.md) - Minimal protocol buffer updates for the Indexer Service + +## Quick Links + +### For Indexer Agent Development +Start with the [Indexer Agent Plan](./indexer-agent-plan.md) which covers: +- Receipt ID tracking +- Status polling mechanism +- Database schema updates + +### For Dipper Development +See the [Dipper Plan](./dipper-plan.md) for: +- Safe Module client implementation +- Worker-based payment processing +- Receipt status management + +### For Understanding the System +Read the [Architecture Document](./architecture.md) to understand: +- Payment flow and state machine +- Security considerations +- API specifications + +## Key Concepts + +- **Receipt ID**: Replaces TAP receipts, enables async processing +- **State Machine**: PENDING → SUBMITTED/FAILED +- **Safe Module**: Direct execution pattern for GRT transfers +- **1% Protocol Burn**: Automatic burn on all payments \ No newline at end of file diff --git a/docs/dips-safe-payments/architecture.md b/docs/dips-safe-payments/architecture.md new file mode 100644 index 0000000..26b0ce8 --- /dev/null +++ b/docs/dips-safe-payments/architecture.md @@ -0,0 +1,272 @@ +# DIPs Safe Payments Architecture + +## Overview + +This document describes the architecture for implementing Safe-based on-chain payments for DIPs (Distributed Indexing Payments) in The Graph Protocol, as specified in RFC-001. This implementation replaces TAP (Timeline Aggregation Protocol) for indexing fees due to impractical allocation requirements. + +## Background + +RFC-001 identifies critical issues with TAP for DIPs: +- **High Capital Requirements**: $50-$1000 allocations needed for $5-$100 monthly payments +- **Complex Allocation Management**: Variable allocation amounts create operational complexity +- **Capital Inefficiency**: Large amounts of stake must be kept free for DIPs +- **Missing Infrastructure**: TAP escrow management functionality not yet implemented + +## Architecture Overview + +### Core Design Principles + +1. **Asynchronous Processing**: Non-blocking receipt ID system for immediate responses +2. **Safe Module Pattern**: Direct execution via `execTransactionFromModule` without multi-sig complexity +3. **State Machine**: Clear PENDING → SUBMITTED/FAILED status tracking +4. **Protocol Compliance**: 1% burn on all payments +5. **Clear Separation**: DIPs use Safe payments, query fees continue using TAP + +### System Components + +``` +┌─────────────┐ RPC ┌──────────────┐ Worker ┌──────────────┐ +│ Indexer │ ──────────> │ Dipper │ ──────────> │ Payment │ +│ Agent │ <────────── │ Service │ │ Handler │ +└─────────────┘ Receipt ID └──────────────┘ └──────────────┘ + │ │ │ + │ Poll Status │ │ + ▼ ▼ ▼ +┌─────────────┐ ┌──────────────┐ ┌──────────────┐ +│ Receipt │ │ Receipt │ │ Safe Module │ +│ Storage │ │ Registry │ │ Client │ +└─────────────┘ └──────────────┘ └──────────────┘ + │ │ + │ ▼ + │ ┌──────────────┐ + └──────────────────────>│ Blockchain │ + Status Update │ (GRT + Burn) │ + └──────────────┘ +``` + +## Payment Flow + +### 1. Payment Collection (Indexer → Dipper) + +The indexer initiates payment collection by reporting completed work: + +``` +Indexer sends collect_payment request: +- Agreement ID +- Work metrics (entity count, etc.) +- Indexer address +``` + +### 2. Receipt Creation (Dipper Service) + +Dipper validates the work and creates a receipt: + +1. Validate work report against agreement +2. Calculate payment amount including 1% burn +3. Create receipt record with PENDING status +4. Queue PayOnChain job for async processing +5. Return Receipt ID immediately + +### 3. Asynchronous Payment Processing (Worker) + +Worker processes payments in the background: + +1. Retrieve receipt from registry +2. Verify PENDING status (skip if already processed) +3. Calculate exact burn amount (1% of total) +4. Submit payment via Safe Module: + - Transfer GRT to indexer + - Burn 1% to protocol address +5. Update receipt status: + - SUBMITTED with transaction hash on success + - FAILED with error message on failure + +### 4. Status Polling (Indexer) + +Indexers poll for payment status: + +1. Request status using Receipt ID +2. Receive current status and details +3. For SUBMITTED status, get transaction hash +4. Verify payment on-chain if desired + +## State Machine + +``` + ┌─────────┐ + │ PENDING │──────┐ + └────┬────┘ │ + │ │ + Submit│ │Fatal + Success│ │Error + │ │ + ▼ ▼ + ┌──────────┐ ┌────────┐ + │SUBMITTED │ │ FAILED │ + └──────────┘ └────────┘ +``` + +**State Definitions:** +- **PENDING**: Receipt created, payment queued for processing +- **SUBMITTED**: Payment successfully submitted to blockchain with transaction hash +- **FAILED**: Payment failed due to error (gas, balance, revocation, etc.) + +## Safe Module Implementation + +### Configuration + +The Safe Module pattern requires: +- Dipper EOA authorized as Safe Module +- Safe contract holding GRT tokens +- Module can execute transactions directly +- No multi-sig coordination required + +### Transaction Execution + +Payments are executed as batch operations using the Safe MultiSend contract: + +1. **Batch Construction**: + - Create array of operations (GRT transfer + 1% burn) + - Encode using MultiSend contract interface + +2. **Execution Flow**: + - Module calls `execTransactionFromModule` on the Safe + - Safe delegatecalls to MultiSend contract + - MultiSend executes both operations atomically: + - Transfer GRT to indexer + - Burn 1% to protocol address + +3. **Benefits**: + - Single transaction for multiple operations + - Atomic execution (all or nothing) + - Gas efficient batching + - Standard Safe pattern + +## Security Considerations + +### Access Control +- Safe Module authorization can be revoked by Safe owners +- Module limited to specific operations (GRT transfers) +- No ability to modify Safe configuration + +### Key Management +- Module signer key stored securely (environment variables) +- Regular key rotation capability +- No hardcoded keys in code + +### Transaction Security +- Nonce management prevents replay attacks +- Gas price limits prevent excessive fees +- Amount validation ensures correct payments + +## Database Schema + +### Receipt Status Tracking + +```sql +-- Enhanced receipts table +ALTER TABLE indexing_receipts +ADD COLUMN payment_status TEXT DEFAULT 'PENDING', +ADD COLUMN transaction_hash TEXT, +ADD COLUMN payment_submitted_at TIMESTAMP, +ADD COLUMN payment_error TEXT, +ADD COLUMN retry_count INTEGER DEFAULT 0; + +CREATE INDEX idx_receipts_payment_status ON indexing_receipts(payment_status); +``` + +## API Specifications + +### gRPC Interface (Indexer Service → Dipper) + +#### CollectPaymentRequest (Unchanged) +```protobuf +message CollectPaymentRequest { + uint64 version = 1; + bytes signed_collection = 2; // ERC-712 signed work report +} +``` + +#### CollectPaymentResponse (Modified) +```protobuf +message CollectPaymentResponse { + uint64 version = 1; + string receipt_id = 2; // Receipt ID for polling (replaces tap_receipt) + string amount = 3; // Total amount including burn + string status = 4; // Initial status: "PENDING" +} +``` + +### Dipper gRPC Interface Extension + +#### GetReceiptById (New RPC method) +```protobuf +message GetReceiptByIdRequest { + uint64 version = 1; + string receipt_id = 2; +} + +message GetReceiptByIdResponse { + uint64 version = 1; + string receipt_id = 2; + string status = 3; // "PENDING" | "SUBMITTED" | "FAILED" + string transaction_hash = 4; // Present when SUBMITTED + string error_message = 5; // Present when FAILED + string amount = 6; + string payment_submitted_at = 7; // ISO timestamp when SUBMITTED +} +``` + +## Configuration + +### Dipper Service +```yaml +safe_payment: + safe_address: "0x..." # Safe with module + grt_token_address: "0x..." # GRT token contract + module_signer_key: "${KEY}" # Module EOA key + rpc_url: "https://..." # Ethereum RPC + burn_percentage: 1 # Protocol tax + min_payment_amount_grt: "50" # Minimum payment +``` + +### Indexer Agent +```yaml +payment_collection: + dipper_endpoint: "http://dipper:8000" +``` + +## Components to Modify + +### 1. Dipper Service (Gateway Side) +The dipper service requires the most significant changes as it manages the payment flow: + +- **Receipt Registry**: Add payment status tracking (PENDING/SUBMITTED/FAILED) +- **Worker System**: Add PayOnChain message handler for async payment processing +- **Safe Module Client**: Implement GRT transfers with 1% burn via execTransactionFromModule +- **RPC Interface**: Return Receipt IDs instead of TAP receipts, add polling endpoint + +### 2. Indexer Agent +The indexer agent needs updates to handle the new async payment pattern: + +- **DipsCollector**: Replace TAP receipt storage with Receipt ID tracking +- **Polling Mechanism**: Add background task to poll for payment status +- **Database Models**: Update to store Receipt IDs and payment status +- **RPC Client**: Update to handle new response format and polling endpoint + +### 3. Indexer Service +The indexer service requires minimal changes, primarily to the gRPC protocol definitions: + +- **Protocol Update**: Modify `CollectPaymentResponse` in `gateway.proto` to return Receipt ID +- **Response Handling**: Update response structure to include status field +- **No Core Logic Changes**: The indexer service acts as a pass-through for DIPs + +## Success Criteria + +The implementation succeeds when: +- ✅ Non-blocking payment requests with Receipt IDs +- ✅ Asynchronous GRT transfers via Safe Module +- ✅ 1% protocol burn on all payments +- ✅ Clear state machine transitions +- ✅ Indexers can verify payments on-chain +- ✅ Complete separation from TAP for indexing fees \ No newline at end of file diff --git a/docs/dips-safe-payments/dipper-plan.md b/docs/dips-safe-payments/dipper-plan.md new file mode 100644 index 0000000..bab8997 --- /dev/null +++ b/docs/dips-safe-payments/dipper-plan.md @@ -0,0 +1,308 @@ +# Dipper DIPs Implementation Plan + +## Overview + +This plan details the changes required to the Dipper service to implement Safe-based GRT payments for DIPs, replacing the current TAP receipt system. + +## Current State + +The Dipper service currently: +- Generates TAP receipts for payment collection +- Returns receipts synchronously +- Has worker infrastructure for async tasks +- Stores receipts in PostgreSQL database + +## Required Changes + +### 1. Database Schema Updates + +**Migration**: `dipper/source/migrations/20250130_payment_status.sql` + +```sql +-- Add payment tracking to receipts +ALTER TABLE indexing_receipts +ADD COLUMN payment_status TEXT DEFAULT 'PENDING' + CHECK (payment_status IN ('PENDING', 'SUBMITTED', 'FAILED')), +ADD COLUMN transaction_hash TEXT, +ADD COLUMN payment_submitted_at TIMESTAMP, +ADD COLUMN payment_error TEXT, +ADD COLUMN retry_count INTEGER DEFAULT 0; + +-- Indexes for efficient queries +CREATE INDEX idx_receipts_payment_status ON indexing_receipts(payment_status); +CREATE INDEX idx_receipts_transaction_hash ON indexing_receipts(transaction_hash); +``` + +### 2. Registry Updates + +**Location**: `dipper/source/bin/dipper-service/src/store/registry.rs` + +Add payment status management: + +``` +New methods needed: +- update_receipt_payment_status(): Atomic status updates +- get_receipt_with_status(): Retrieve receipt with payment info +- get_receipts_by_status(): Query receipts by status +- increment_retry_count(): Track retry attempts +``` + +### 3. Worker System Integration + +**Location**: `dipper/source/bin/dipper-service/src/worker/` + +#### Add PayOnChain Message Type + +``` +WorkerMessage enum addition: + PayOnChain { + receipt_id: ReceiptId, + amount: U256, + recipient: Address, + agreement_id: AgreementId, + } +``` + +#### Implement Payment Handler + +``` +Payment handler logic: +1. Verify receipt is still PENDING +2. Calculate 1% burn amount +3. Submit payment via Safe Module +4. Update receipt status based on result +5. Handle retries for transient errors +``` + +### 4. Safe Module Client + +**Location**: `dipper/source/bin/dipper-service/src/safe_client/` + +Replace stub with working implementation: + +``` +Safe Module client needs: +- Initialize with Safe, GRT token, and MultiSend contracts +- Build batch transaction using MultiSend: + 1. Encode GRT transfer operation + 2. Encode 1% burn operation + 3. Pack operations into MultiSend call data +- Execute via execTransactionFromModule: + - Target: Safe contract + - Operation: delegatecall to MultiSend + - Data: Encoded batch operations +- Handle nonce management for module transactions +- Return transaction hash after confirmation +``` + +### 5. gRPC Interface Updates + +**Location**: `dipper/source` (proto files and service implementation) + +#### Update CollectPayment + +``` +Changes: +1. Create receipt with PENDING status +2. Queue PayOnChain worker job +3. Return Receipt ID instead of TAP receipt in CollectPaymentResponse +4. Include initial status in response +``` + +#### Add GetReceiptById + +``` +New gRPC method: +- Add to proto definition +- Input: GetReceiptByIdRequest with receipt_id +- Query receipt with current status from database +- Return GetReceiptByIdResponse with status, transaction hash, error info +- Handle not found gracefully +``` + +### 6. Configuration + +**Location**: `dipper/source/bin/dipper-service/src/config.rs` + +``` +Safe payment configuration: +- safe_address: Address of Safe contract +- grt_token_address: GRT token contract +- module_signer_key: Private key for module EOA +- rpc_url: Ethereum RPC endpoint +- burn_percentage: 1% protocol tax +- gas_settings: Limits and pricing +``` + +## Implementation Phases + +### Phase 1: Database & Registry +1. Create and run migration +2. Update registry trait and implementation +3. Add status update methods +4. Test atomic operations + +### Phase 2: Worker Integration +1. Add PayOnChain message type +2. Implement payment handler +3. Add retry logic +4. Integrate with registry + +### Phase 3: Safe Client +1. Set up contract interfaces (Safe, GRT, MultiSend) +2. Implement batch transaction building: + - Encode ERC20 transfer call for GRT to indexer + - Encode ERC20 transfer call for 1% burn + - Pack both into MultiSend.multiSend() call data +3. Add execTransactionFromModule calls: + - Target: Safe contract + - Value: 0 (no ETH) + - Data: Delegatecall to MultiSend with packed operations + - Operation: 1 (delegatecall) +4. Handle nonce and gas management + +### Phase 4: RPC Updates +1. Modify collect_payment flow +2. Add polling endpoint +3. Remove TAP generation +4. Update response types + +### Phase 5: Testing +1. Unit tests for all components +2. Integration tests +3. Local network testing +4. Testnet deployment + +## Testing Strategy + +### Unit Tests +``` +Test coverage needed: +- Registry status updates +- Worker message handling +- Safe transaction building +- 1% burn calculations +- RPC response formatting +``` + +### Integration Tests +``` +End-to-end scenarios: +- Receipt creation and status updates +- Payment execution flow +- Error handling and retries +- Concurrent payment processing +- Database consistency +``` + +### Local Testing +``` +Validation steps: +1. Deploy with test Safe +2. Configure module authorization +3. Fund with test GRT +4. Process test payments +5. Verify on local chain +``` + +## Security Considerations + +### Key Management +- Module key in environment variable +- No hardcoded keys +- Rotation capability + +### Transaction Security +- Validate payment amounts +- Check Safe authorization +- Monitor gas prices +- Handle reverted transactions + +### Access Control +- Verify indexer signatures +- Rate limit requests +- Validate work reports + +## Monitoring + +### Metrics +``` +Key metrics: +- receipt_creation_rate +- payment_processing_time +- payment_success_rate +- retry_count_by_error +- gas_cost_per_payment +``` + +### Logging +``` +Important events: +- Receipt creation +- Payment submission +- Status transitions +- Transaction hashes +- Error details +``` + +### Alerts +``` +Alert conditions: +- High failure rate +- Stuck PENDING receipts +- Low Safe balance +- Module authorization issues +``` + +## Rollback Plan + +1. Keep TAP code available +2. Feature flag for payment method +3. Database backups before migration +4. Manual payment capability +5. Clear rollback procedures + +## Dependencies + +### External +- Ethereum RPC provider +- Safe Module authorization +- GRT token in Safe +- Gas for transactions + +### Internal +- Worker system operational +- Database available +- Registry functioning +- RPC server running + +## Configuration Example + +```toml +[safe_payment] +safe_address = "0x1234..." # Safe contract with dipper EOA as module +grt_token_address = "0x5678..." # GRT token contract +multisend_address = "0xA238..." # Safe MultiSend contract +module_signer_key = "${SAFE_MODULE_KEY}" # Private key for module EOA +rpc_url = "https://sepolia.infura.io/v3/${KEY}" +burn_percentage = 1 +burn_address = "0x0000000000000000000000000000000000000000" # Or protocol burn address +min_payment_amount_grt = "50000000000000000000" # 50 GRT + +[safe_payment.gas] +max_price_gwei = 100 +limit = 300000 # Higher limit for batch operations + +[worker] +payment_retry_attempts = 3 +payment_retry_delay_seconds = 60 +``` + +## Success Criteria + +- Receipt IDs returned immediately +- Payments process within 60 seconds +- State machine works correctly +- 1% burn executed properly +- No TAP receipts for DIPs +- Transactions verifiable on chain \ No newline at end of file diff --git a/docs/dips-safe-payments/indexer-agent-plan.md b/docs/dips-safe-payments/indexer-agent-plan.md new file mode 100644 index 0000000..8d3aac3 --- /dev/null +++ b/docs/dips-safe-payments/indexer-agent-plan.md @@ -0,0 +1,305 @@ +# Indexer Agent DIPs Implementation Plan + +## Overview + +This plan details the changes required to the Indexer Agent to support the new Safe-based DIPs payment system. The implementation replaces TAP receipt collection with a Receipt ID polling mechanism. + +## Development Branch + +**Branch**: `dips-horizon-rebase` + +This work will continue on top of the existing `dips-horizon-rebase` branch, which already contains DIPs-related improvements. + +## Current State + +The Indexer Agent currently: +- Uses `DipsCollector` class to collect TAP receipts from the gateway +- Stores TAP receipts locally for later redemption +- Makes synchronous calls expecting immediate receipt data +- Has existing database models for tracking receipts + +## Required Changes + +### 1. Database Schema Updates + +**Location**: `indexer-agent/source/packages/indexer-common/src/indexer-management/models/` + +Create a new model for DIPs receipts: + +``` +DipsReceipt model: +- receiptId: string (primary key - unique identifier from dipper) +- agreementId: foreign key to indexing_agreements +- amount: bigint (payment amount in GRT wei) +- status: enum ['PENDING', 'SUBMITTED', 'FAILED'] +- transactionHash: string (optional, populated when SUBMITTED) +- errorMessage: string (optional, populated when FAILED) +- createdAt: timestamp +- updatedAt: timestamp +``` + +**Note**: These schema changes will also need to be mirrored in the indexer-service migrations for Rust testing compatibility. + +### 2. Update DipsCollector Class + +**Location**: `indexer-agent/source/packages/indexer-common/src/indexing-fees/dips.ts` + +#### Remove TAP Dependencies + +- Remove all TAP receipt handling code +- Remove receipt signature verification +- Remove TAP-specific imports and types + +#### Implement Receipt ID Collection + +Update the payment collection flow: + +``` +async collectPayment(agreement): + 1. Calculate work metrics (entity count, etc.) + 2. Call dipper.collect_payment with work report + 3. Receive Receipt ID and initial status + 4. Store Receipt ID in database with PENDING status + 5. Return immediately (no polling here) +``` + +**Critical**: The collectPayment method does NOT start any polling. All polling is handled by a single background task that processes all pending receipts together. + +### 3. Update gRPC Client + +**Location**: `indexer-agent/source/packages/indexer-common/src/indexing-fees/gateway-dips-service-client.ts` + +#### Update gRPC Calls + +Extend the Dipper gRPC client: + +``` +CollectPayment: + Input: Same as before (work report) + Output: { + receiptId: string + amount: string + status: 'PENDING' + } + +GetReceiptById (new): + Input: { receiptId: string } + Output: { + receiptId: string + status: 'PENDING' | 'SUBMITTED' | 'FAILED' + transactionHash?: string + errorMessage?: string + amount: string + } +``` + +### 4. Update collectAllPayments Method + +**Location**: `indexer-agent/source/packages/indexer-common/src/indexing-fees/dips.ts` + +Extend the existing `collectAllPayments` method to also poll for pending receipts: + +``` +async collectAllPayments(): + // Existing logic - collect new payments + 1. Find outstanding agreements + 2. For each agreement: tryCollectPayment() + + // New logic - poll pending receipts + 3. Query database for ALL PENDING receipts + 4. For each receipt: + - Call dipper.get_receipt_by_id + - Update database with new status if changed + - Log state transitions +``` + +**Benefits of this approach**: +- Reuses existing periodic task (runs every 60 seconds) +- Keeps all DIPs payment logic in one place +- No need for separate background task +- Natural fit since both operations deal with payment lifecycle +- Simplifies the implementation + +### 5. Monitoring and Logging + +**Location**: Throughout the codebase + +Add comprehensive logging: + +``` +Log Events: +- Payment request initiated +- Receipt ID received +- Each polling attempt +- Status transitions +- Transaction hash when SUBMITTED +- Error details when FAILED +- Polling attempts +``` + +Add metrics: + +``` +Metrics to track: +- payment_requests_total +- receipt_status_transitions +- polling_duration_seconds +- payment_success_rate +- payment_failure_reasons +``` + +## Implementation Steps + +### Phase 1: Database and Models + +1. Create migration for new receipt fields +2. Update DipsReceipt model +3. Add status enum type +4. Test database changes + +### Phase 2: RPC Client Updates + +1. Add GetReceiptById method to RPC client +2. Update CollectPayment response handling +3. Remove TAP-specific response parsing +4. Add proper error handling + +### Phase 3: Core Collection Logic + +1. Refactor DipsCollector class +2. Remove all TAP receipt logic +3. Implement Receipt ID storage +4. Add polling mechanism +5. Handle all status transitions + +### Phase 4: Extend collectAllPayments + +1. Add receipt polling logic to existing method +2. Implement timeout handling for stale receipts +3. Add batch status checking +4. Ensure fault tolerance + +### Phase 5: Testing and Validation + +1. Unit tests for new collection flow +2. Integration tests with mock dipper +3. Test status polling mechanism +4. Test error scenarios +5. Performance testing + +## Testing Plan + +### Unit Tests + +``` +Test Cases: +- Receipt ID storage and retrieval +- Status update logic +- Polling state tracking +- Error propagation +``` + +### Integration Tests + +``` +Test Scenarios: +- Full payment flow with mock dipper +- Status transitions (PENDING → SUBMITTED) +- Failure scenarios (PENDING → FAILED) +- Network interruption handling +- Concurrent receipt polling +``` + +### Manual Testing + +``` +Validation Steps: +1. Deploy to local-network +2. Create test indexing agreement +3. Trigger payment collection +4. Monitor Receipt ID creation +5. Verify polling behavior +6. Check final transaction on chain +``` + +## Configuration Changes + +### Environment Variables + +```bash +# Dipper endpoint +DIPPER_ENDPOINT=http://dipper:8000 +``` + +### Command Line Arguments + +```bash +indexer-agent start \ + --dipper-endpoint http://dipper:8000 +``` + +## Error Handling + +### Transient Errors +- Network timeouts: Retry polling +- Dipper unavailable: Exponential backoff +- Database errors: Log and retry + +### Fatal Errors +- Invalid Receipt ID: Log error +- Authentication errors: Alert operator + +## Migration Considerations + +### Backward Compatibility +- Keep TAP code for query fees +- Add feature flag for new payment system +- Support gradual rollout + +### Data Migration +- No migration needed for existing TAP receipts +- New receipts use Receipt ID system +- Clear separation in database + +## Success Metrics + +### Functional Metrics +- All payments create Receipt IDs +- Status polling works reliably +- Transactions appear on chain +- No TAP receipts for indexing fees + +### Performance Metrics +- Receipt creation < 1 second +- Status updates within 30 seconds +- Polling doesn't impact performance +- Database queries remain efficient + +## Rollback Plan + +If issues arise: +1. Disable new payment flow via feature flag +2. Revert to TAP receipt collection +3. Investigate and fix issues +4. Re-deploy with fixes + +## Dependencies + +### External Dependencies +- Dipper service with new RPC methods +- Safe Module configuration on chain +- GRT tokens in Safe + +### Internal Dependencies +- Database schema changes +- RPC client updates +- Background task system + +## Implementation Order + +1. Database updates +2. RPC client changes +3. Core logic implementation +4. Background tasks +5. Testing and validation +6. Integration and deployment \ No newline at end of file diff --git a/docs/dips-safe-payments/indexer-service-plan.md b/docs/dips-safe-payments/indexer-service-plan.md new file mode 100644 index 0000000..244ae63 --- /dev/null +++ b/docs/dips-safe-payments/indexer-service-plan.md @@ -0,0 +1,250 @@ +# Indexer Service DIPs Implementation Plan + +## Overview + +This plan details the minimal changes required to the Indexer Service to support the new Safe-based DIPs payment system. The indexer service acts primarily as a pass-through for DIPs, so changes are limited to protocol definitions and response handling. + +## Development Branch + +**Branch**: `pcv/ipfs-dips-timeout` + +This work will continue on the existing `pcv/ipfs-dips-timeout` branch, which has been used for debugging and improving DIPs functionality. + +## Current State + +The Indexer Service currently: +- Implements gRPC server for DIPs using protocol buffers +- Forwards collect payment requests to the gateway +- Returns TAP receipts in the response +- Has no business logic for payment processing (pass-through) + +## Required Changes + +### 1. Database Migration for DIPs Receipts + +**Location**: `indexer-service/source/migrations/` + +Create a new migration for DIPs receipts table to support testing: + +```sql +-- 20250XXX_dips_receipts.up.sql +CREATE TABLE IF NOT EXISTS dips_receipts ( + id UUID PRIMARY KEY, + agreement_id UUID NOT NULL REFERENCES indexing_agreements(id), + receipt_id VARCHAR(255) NOT NULL UNIQUE, + amount NUMERIC(39) NOT NULL, + status VARCHAR(20) NOT NULL DEFAULT 'PENDING', + transaction_hash CHAR(66), + error_message TEXT, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL, + last_polled_at TIMESTAMP WITH TIME ZONE, + CONSTRAINT valid_status CHECK (status IN ('PENDING', 'SUBMITTED', 'FAILED')) +); + +CREATE INDEX idx_dips_receipts_agreement_id ON dips_receipts(agreement_id); +CREATE INDEX idx_dips_receipts_status ON dips_receipts(status); +CREATE INDEX idx_dips_receipts_receipt_id ON dips_receipts(receipt_id); +``` + +### 2. Protocol Buffer Updates + +**Location**: `indexer-service/source/crates/dips/proto/gateway.proto` + +#### Update CollectPaymentResponse + +The main change is to modify the response to return a Receipt ID instead of TAP receipt: + +```protobuf +// Current definition +message CollectPaymentResponse { + uint64 version = 1; + CollectPaymentStatus status = 2; + bytes tap_receipt = 3; +} + +// New definition +message CollectPaymentResponse { + uint64 version = 1; + CollectPaymentStatus status = 2; + string receipt_id = 3; // Receipt ID for polling + string amount = 4; // Payment amount in GRT + string payment_status = 5; // Initial status: "PENDING" +} +``` + +#### Add GetReceiptById RPC Method + +Add a new RPC method to the service definition: + +```protobuf +service GatewayDipsService { + // ... existing methods ... + + /** + * Get the status of a payment receipt by ID. + * + * This method allows the indexer to poll for the status of a previously + * initiated payment collection. + */ + rpc GetReceiptById(GetReceiptByIdRequest) returns (GetReceiptByIdResponse); +} + +message GetReceiptByIdRequest { + uint64 version = 1; + string receipt_id = 2; +} + +message GetReceiptByIdResponse { + uint64 version = 1; + string receipt_id = 2; + string status = 3; // "PENDING" | "SUBMITTED" | "FAILED" + string transaction_hash = 4; // Present when SUBMITTED + string error_message = 5; // Present when FAILED + string amount = 6; + string payment_submitted_at = 7; // ISO timestamp when SUBMITTED +} +``` + +#### Remove TAP-specific Status Codes + +Review and potentially update the `CollectPaymentStatus` enum if any codes are TAP-specific: + +```protobuf +enum CollectPaymentStatus { + ACCEPT = 0; // Keep - payment request accepted + ERR_TOO_EARLY = 1; // Keep - still relevant + ERR_TOO_LATE = 2; // Keep - still relevant + ERR_AMOUNT_OUT_OF_BOUNDS = 3; // Keep - still relevant + ERR_UNKNOWN = 99; // Keep - generic error +} +``` + +### 3. Generated Code Updates + +**Location**: `indexer-service/source/crates/dips/src/proto/` + +After updating the proto files: + +1. Regenerate the Rust bindings using the build script +2. The generated files will automatically include the new fields +3. No manual edits needed to generated code + +### 4. Client Code Updates (if any) + +**Location**: Check for any client code that constructs or handles `CollectPaymentResponse` + +Search for usage of `CollectPaymentResponse` to ensure compatibility: +- Update any code that accesses the old `tap_receipt` field +- Add handling for new fields (`receipt_id`, `amount`, `payment_status`) + +## Implementation Steps + +### Step 1: Create Database Migration + +1. Create new migration file for dips_receipts table +2. Add down migration to drop the table +3. Test migration up and down + +### Step 2: Update Protocol Definitions + +1. Modify `gateway.proto` with new response structure +2. Remove `tap_receipt` field +3. Add `receipt_id`, `amount`, and `payment_status` fields +4. Ensure backward compatibility considerations + +### Step 3: Regenerate Protocol Bindings + +```bash +cd indexer-service/source +cargo build -p dips +``` + +This will trigger the build script to regenerate the proto bindings. + +### Step 4: Update Response Handling + +Search for any code that constructs `CollectPaymentResponse`: + +```bash +# Find usage of CollectPaymentResponse +grep -r "CollectPaymentResponse" crates/ +``` + +Update any found usage to use the new fields. + +### Step 5: Version Compatibility + +Consider protocol version handling: +- Current version = 1 +- Decide if version bump is needed +- Document any breaking changes + +## Testing Requirements + +### Unit Tests + +1. Test proto serialization/deserialization with new fields +2. Verify generated code compiles correctly +3. Test any client code that uses the response + +### Integration Tests + +1. Test with mock gateway returning new response format +2. Verify indexer-agent can handle new response +3. Test error scenarios with new fields + +### Manual Testing + +1. Deploy updated indexer-service +2. Trigger payment collection from indexer-agent +3. Verify Receipt ID is returned +4. Confirm no TAP receipts are generated + +## Configuration + +No configuration changes required for indexer-service. The service continues to forward requests to the gateway. + +## Rollback Plan + +If issues arise: +1. Revert to previous proto definitions +2. Regenerate bindings +3. Deploy previous version +4. Ensure indexer-agent compatibility + +## Dependencies + +### External Dependencies +- Gateway must implement new response format +- Indexer-agent must handle Receipt IDs + +### Build Dependencies +- Protocol buffer compiler (protoc) +- Prost build dependencies + +## Implementation Sequence + +This requires both schema and protocol changes: + +1. Create database migration for dips_receipts table +2. Update protocol definitions in gateway.proto +3. Regenerate bindings and test compilation +4. Search and update any response handling code +5. Write and run tests +6. Integration testing with other components + +## Success Criteria + +- Protocol definitions updated with new fields +- Generated code compiles without errors +- No TAP receipt field in responses +- Receipt ID returned successfully +- Integration tests pass with indexer-agent + +## Notes + +- The indexer-service remains a thin pass-through layer +- No business logic changes required +- Main change is protocol definition only +- Ensure coordination with gateway and indexer-agent teams \ No newline at end of file diff --git a/indexer-agent/source b/indexer-agent/source index df76679..a83e0b9 160000 --- a/indexer-agent/source +++ b/indexer-agent/source @@ -1 +1 @@ -Subproject commit df766797f2f7915e77a508dd45391ea0137ab90f +Subproject commit a83e0b9a9bb195f9ac234e3b4a3ac8e70188a137 diff --git a/indexer-service/source b/indexer-service/source index ec8f5bb..e754fbb 160000 --- a/indexer-service/source +++ b/indexer-service/source @@ -1 +1 @@ -Subproject commit ec8f5bb954d175aea906998be4e83f0d57e6bb48 +Subproject commit e754fbb8c86cfbca6c15d69a4ce1218d205f4683