From 38d6b7e911147d7e58a45264d5a36ff8102e2b63 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Mar 2026 14:59:24 +0200 Subject: [PATCH 1/9] gnd: implement transaction receipt support for gnd test Build mock receipts from block events and attach them to log triggers. Events sharing the same txHash share a receipt whose logs contains all their logs in declaration order; events without an explicit txHash each get a unique auto-generated hash and their own single-log receipt. Receipts are only attached to logs whose event selector (topic0) matches a handler that declares receipt: true in the manifest, mirroring production behaviour where graph-node only fetches receipts from the RPC for those handlers. The selector is computed using the same normalisation as graph-node's MappingEventHandler::topic0() to handle the manifest's indexed-before-type convention (e.g. Transfer(indexed address,...)). Most receipt fields (gas, from, to, status) are hardcoded stubs; only receipt.logs reflects the actual test data. --- gnd/docs/gnd-test.md | 52 +++++++- gnd/src/commands/test/runner.rs | 40 +++++- gnd/src/commands/test/trigger.rs | 123 +++++++++++++++--- .../fixtures/gnd_test/subgraph/schema.graphql | 14 ++ .../gnd_test/subgraph/src/receipts.ts | 40 ++++++ .../fixtures/gnd_test/subgraph/subgraph.yaml | 21 +++ .../gnd_test/subgraph/tests/receipts.json | 39 ++++++ 7 files changed, 312 insertions(+), 17 deletions(-) create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts create mode 100644 gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json diff --git a/gnd/docs/gnd-test.md b/gnd/docs/gnd-test.md index 561c881a940..4a6fcd18425 100644 --- a/gnd/docs/gnd-test.md +++ b/gnd/docs/gnd-test.md @@ -144,6 +144,56 @@ Event parameters are automatically ABI-encoded based on the signature. Supported } ``` +## Transaction Receipts + +Mock receipts are constructed for every log trigger and attached only to handlers that declare `receipt: true` in the manifest, mirroring production behaviour. Handlers without `receipt: true` receive a null receipt — the same as on a real node. + +**Limitation:** Only `receipt.logs` reflects your test data. All other receipt fields (`from`, `to`, `gas_used`, `status`, etc.) are hardcoded stubs and do not correspond to real transaction data. If your handler reads those fields, the values will be fixed defaults regardless of what you put in the test JSON. + +### How receipts are built + +Every event gets a mock receipt attached automatically. The key rule is **`txHash` grouping**: + +- Events sharing the same `txHash` share **one receipt** — `event.receipt!.logs` contains all of their logs in declaration order. +- Events without an explicit `txHash` each get a unique auto-generated hash (`keccak256(block_number || log_index)`), so each gets its own single-log receipt. + +### Example: Two events sharing a receipt + +```json +{ + "events": [ + { + "address": "0x1234...", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { "from": "0xaaaa...", "to": "0xbbbb...", "value": "100" }, + "txHash": "0xdeadbeef0000000000000000000000000000000000000000000000000000000" + }, + { + "address": "0x1234...", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { "from": "0xbbbb...", "to": "0xcccc...", "value": "50" }, + "txHash": "0xdeadbeef0000000000000000000000000000000000000000000000000000000" + } + ] +} +``` + +Both handlers receive a receipt where `receipt.logs` has two entries, in declaration order. + +### Mock receipt defaults + +| Field | Value | +|-------|-------| +| `status` | success | +| `cumulative_gas_used` | `21000` | +| `gas_used` | `21000` | +| transaction type | `2` (EIP-1559) | +| `from` | `0x000...000` | +| `to` | `null` | +| `effective_gas_price` | `0` | + +Handlers without `receipt: true` in the manifest are unaffected — they never access `event.receipt`. + ## Block Handlers Block handlers are **automatically triggered** for every block. You don't need to specify block triggers in the JSON. @@ -599,7 +649,7 @@ my-subgraph/ | Block handlers (all filters) | ✅ Supported | | eth_call mocking | ✅ Supported | | Dynamic/template data sources | ✅ Supported | -| Transaction receipts (`receipt: true`) | ❌ Not implemented — handlers get `null` | +| Transaction receipts (`receipt: true`) | ⚠️ Partial — `receipt.logs` is populated and grouped by `txHash`; other fields (gas, from, to, etc.) are hardcoded stubs (see [Transaction Receipts](#transaction-receipts)) | | File data sources / IPFS mocking | ❌ Not implemented | | Call triggers (traces) | ❌ Not implemented | | `--json` CI output | ❌ Not implemented | diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index a4694198844..a6be52f3ae3 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -110,6 +110,10 @@ pub(super) struct ManifestInfo { pub start_block_override: Option, pub hash: DeploymentHash, pub subgraph_name: SubgraphName, + /// Event selectors (topic0) for handlers that declare `receipt: true`. + /// Only logs whose topic0 is in this set receive a non-null receipt. + pub receipt_required_selectors: + std::collections::HashSet, } /// Compute a `DeploymentHash` from a path and seed. @@ -185,6 +189,8 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { let subgraph_name = SubgraphName::new(format!("test/{}-{}", root_dir_name, seed)) .map_err(|e| anyhow!("{}", e))?; + let receipt_required_selectors = extract_receipt_required_selectors(&manifest); + Ok(ManifestInfo { build_dir, manifest_path: built_manifest_path, @@ -193,9 +199,37 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { start_block_override, hash, subgraph_name, + receipt_required_selectors, }) } +/// Collect the event selectors (topic0) for all handlers that declare `receipt: true`. +/// +/// Covers both data sources and templates. Only logs whose topic0 appears in +/// this set will have a non-null receipt attached to their trigger. +/// +/// The selector is computed using the same method as graph-node's +/// `MappingEventHandler::topic0()`: strip `"indexed "` then all spaces, then +/// keccak256. This handles the manifest format `Transfer(indexed address,...)` +/// where `indexed` precedes the type rather than following it. +fn extract_receipt_required_selectors( + manifest: &Manifest, +) -> std::collections::HashSet { + use graph::prelude::alloy::primitives::keccak256; + + manifest + .data_sources + .iter() + .flat_map(|ds| &ds.event_handlers) + .chain(manifest.templates.iter().flat_map(|t| &t.event_handlers)) + .filter(|h| h.receipt) + .map(|h| { + let normalized = h.event.replace("indexed ", "").replace(' ', ""); + keccak256(normalized.as_bytes()) + }) + .collect() +} + fn extract_network_from_manifest(manifest: &Manifest) -> Result { let network = manifest .data_sources @@ -246,7 +280,11 @@ pub async fn run_single_test( // Default block numbering starts at the manifest's startBlock so that // test blocks without explicit numbers fall in the subgraph's indexed range. - let blocks = build_blocks_with_triggers(test_file, manifest_info.min_start_block)?; + let blocks = build_blocks_with_triggers( + test_file, + manifest_info.min_start_block, + &manifest_info.receipt_required_selectors, + )?; // Create the database for this test. For pgtemp, the `db` value must // stay alive for the duration of the test — dropping it destroys the database. diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index c6fef3494e8..b375d8bd0d3 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -9,21 +9,30 @@ use super::schema::{LogEvent, TestFile}; use anyhow::{anyhow, ensure, Context, Result}; use graph::blockchain::block_stream::BlockWithTriggers; +use graph::components::ethereum::AnyTransactionReceiptBare; +use graph::prelude::alloy::consensus::{Eip658Value, Receipt, ReceiptWithBloom}; use graph::prelude::alloy::dyn_abi::{DynSolType, DynSolValue}; use graph::prelude::alloy::json_abi::Event; -use graph::prelude::alloy::primitives::{keccak256, Address, Bytes, B256, I256, U256}; -use graph::prelude::alloy::rpc::types::Log; +use graph::prelude::alloy::network::AnyReceiptEnvelope; +use graph::prelude::alloy::primitives::{keccak256, Address, Bloom, Bytes, B256, I256, U256}; +use graph::prelude::alloy::rpc::types::{Log, TransactionReceipt}; use graph::prelude::{BlockPtr, LightEthereumBlock}; use graph_chain_ethereum::chain::BlockFinality; use graph_chain_ethereum::trigger::{EthereumBlockTriggerType, EthereumTrigger, LogRef}; use graph_chain_ethereum::Chain; +use std::collections::HashMap; use std::sync::Arc; /// Convert test blocks into `BlockWithTriggers`, chained by parent hash. /// Block numbers auto-increment from `start_block` when not explicit. +/// +/// `receipt_required_selectors` is the set of event selectors (topic0) for +/// handlers that declare `receipt: true`. Only matching logs get a non-null +/// receipt; all others receive `None`, mirroring production behaviour. pub fn build_blocks_with_triggers( test_file: &TestFile, start_block: u64, + receipt_required_selectors: &std::collections::HashSet, ) -> Result>> { let mut blocks = Vec::new(); let mut current_number = start_block; @@ -47,9 +56,31 @@ pub fn build_blocks_with_triggers( let mut triggers = Vec::new(); - for (log_index, log_event) in test_block.events.iter().enumerate() { - let eth_trigger = build_log_trigger(number, hash, log_index as u64, log_event)?; - triggers.push(eth_trigger); + // Pass 1: parse each event into (tx_hash, Arc). + let event_logs: Vec<(B256, Arc)> = test_block + .events + .iter() + .enumerate() + .map(|(i, e)| prepare_event_log(number, hash, i as u64, e)) + .collect::>()?; + + // Pass 2: build one mock receipt per tx-hash group. + let receipts = build_receipts_by_tx(number, hash, &event_logs); + + // Pass 3: create one log trigger per event. + // Only attach a receipt when the log's event selector (topic0) belongs + // to a handler that declared `receipt: true` in the manifest — matching + // production behaviour where handlers without that flag receive null. + for (tx_hash, full_log) in &event_logs { + let needs_receipt = full_log + .topics() + .first() + .is_some_and(|t| receipt_required_selectors.contains(t)); + let receipt = needs_receipt.then(|| receipts[tx_hash].clone()); + triggers.push(EthereumTrigger::Log(LogRef::FullLog( + full_log.clone(), + receipt, + ))); } // Auto-inject block triggers for every block so that block handlers @@ -81,17 +112,17 @@ pub fn build_blocks_with_triggers( Ok(blocks) } -/// Build an `EthereumTrigger::Log` from a test JSON event. -fn build_log_trigger( +/// Parse a test event into `(tx_hash, Arc)` without building a trigger. +/// +/// The tx_hash is either taken from `trigger.tx_hash` or auto-generated as +/// `keccak256(block_number || log_index)`, which is unique per event. +fn prepare_event_log( block_number: u64, block_hash: B256, log_index: u64, trigger: &LogEvent, -) -> Result { - let address: Address = trigger - .address - .parse() - .context("Invalid contract address")?; +) -> Result<(B256, Arc)> { + let address: Address = trigger.address.parse()?; let (topics, data) = encode_event_log(&trigger.event, &trigger.params)?; @@ -100,8 +131,7 @@ fn build_log_trigger( .tx_hash .as_ref() .map(|h| h.parse::()) - .transpose() - .context("Invalid tx hash")? + .transpose()? .unwrap_or_else(|| { keccak256([block_number.to_be_bytes(), log_index.to_be_bytes()].concat()) }); @@ -122,7 +152,70 @@ fn build_log_trigger( removed: false, }); - Ok(EthereumTrigger::Log(LogRef::FullLog(full_log, None))) + Ok((tx_hash, full_log)) +} + +/// Build one mock receipt per unique tx_hash from a block's event logs. +/// +/// Events sharing the same `tx_hash` share a receipt whose `logs` contains +/// all of their logs in declaration order. Events without an explicit +/// `tx_hash` each have a unique auto-generated hash, so they each get their +/// own single-log receipt. +fn build_receipts_by_tx( + block_number: u64, + block_hash: B256, + event_logs: &[(B256, Arc)], +) -> HashMap> { + // Collect logs per tx_hash, preserving insertion order for tx_index. + let mut tx_order: Vec = Vec::new(); + let mut logs_by_tx: HashMap> = HashMap::new(); + + for (tx_hash, log) in event_logs { + let entry = logs_by_tx.entry(*tx_hash).or_insert_with(|| { + tx_order.push(*tx_hash); + Vec::new() + }); + entry.push((**log).clone()); + } + + let mut receipts: HashMap> = + HashMap::with_capacity(tx_order.len()); + + for (tx_index, tx_hash) in tx_order.into_iter().enumerate() { + let logs = logs_by_tx.remove(&tx_hash).unwrap_or_default(); + + let core_receipt = Receipt { + status: Eip658Value::Eip658(true), + cumulative_gas_used: 21_000, + logs, + }; + + let receipt_with_bloom = ReceiptWithBloom::new(core_receipt, Bloom::default()); + + let any_envelope = AnyReceiptEnvelope { + inner: receipt_with_bloom, + r#type: 2, + }; + + let receipt = TransactionReceipt { + transaction_hash: tx_hash, + transaction_index: Some(tx_index as u64), + block_hash: Some(block_hash), + block_number: Some(block_number), + gas_used: 21_000, + from: Address::ZERO, + to: None, + effective_gas_price: 0, + blob_gas_used: None, + blob_gas_price: None, + contract_address: None, + inner: any_envelope, + }; + + receipts.insert(tx_hash, Arc::new(receipt)); + } + + receipts } /// Encode event parameters into EVM log topics and data using `alloy::json_abi::Event::parse()`. diff --git a/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql b/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql index f665f3118f9..37ec979870d 100644 --- a/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql +++ b/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql @@ -31,3 +31,17 @@ type PollingBlock @entity(immutable: true) { id: Bytes! number: BigInt! } + +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} + +type Approval @entity(immutable: true) { + id: Bytes! + owner: Bytes! + spender: Bytes! + value: BigInt! +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts b/gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts new file mode 100644 index 00000000000..7ee1291b576 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts @@ -0,0 +1,40 @@ +import { Transfer as TransferEvent } from '../generated/StandardToken/ERC20' +import { Transfer, Approval } from '../generated/schema' +import { Bytes, ethereum, log } from "@graphprotocol/graph-ts"; + +const APPROVAL_TOPIC0 = "0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925" + +export function handleEventsWithReceipts(event: TransferEvent): void { + let receipt = event.receipt; + + if (!receipt) { + log.error("No receipt found for transaction {}", [event.transaction.hash.toHexString()]); + return; + } + + // iterate over the logs in the receipt and find the Approval event + for (let i = 0; i < receipt.logs.length; i++) { + let logEntry = receipt.logs[i]; + + if (logEntry.topics[0].toHexString() == APPROVAL_TOPIC0) { + let approval = new Approval(event.transaction.hash); + // trim the padding from the topics to get the actual address + approval.owner = changetype(logEntry.topics[1].subarray(12, 32)); + approval.spender = changetype(logEntry.topics[2].subarray(12, 32)); + let data = ethereum.decode("uint256", logEntry.data); + if (data) { + approval.value = data.toBigInt(); + } else { + log.error("Failed to decode approval value for transaction {}", [event.transaction.hash.toHexString()]); + return; + } + approval.save(); + } + } + + let transfer = new Transfer(event.transaction.hash); + transfer.from = event.params.from; + transfer.to = event.params.to; + transfer.value = event.params.value; + transfer.save(); +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml b/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml index 5b2e7b3ab6b..46fe971c642 100644 --- a/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml +++ b/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml @@ -21,6 +21,27 @@ dataSources: eventHandlers: - event: Transfer(indexed address,indexed address,uint256) handler: handleTransfer + - name: Receipts + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + address: "0x000000000000000000000000000000000000000a" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/receipts.ts + entities: + - Transfer + - Approve + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleEventsWithReceipts + receipt: true - name: EveryBlock kind: ethereum network: arbitrum diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json b/gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json new file mode 100644 index 00000000000..67388f6faa8 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json @@ -0,0 +1,39 @@ +{ + "name": "Test events and eth_call mocking", + "blocks": [ + { + "events": [ + { + "address": "0x000000000000000000000000000000000000000a", + "event": "Approval(address indexed owner, address indexed spender, uint256 value)", + "params": { + "owner": "0xaaaa000000000000000000000000000000000000", + "spender": "0xbbbb000000000000000000000000000000000000", + "value": "1000000000000000000" + }, + "txHash": "0xdeadbeef00000000000000000000000000000000000000000000000000000000" + }, + { + "address": "0x000000000000000000000000000000000000000a", + "event": "Transfer(address indexed from, address indexed to, uint256 value)", + "params": { + "from": "0xbbbb000000000000000000000000000000000000", + "to": "0xcccc000000000000000000000000000000000000", + "value": "1000000000000000000" + }, + "txHash": "0xdeadbeef00000000000000000000000000000000000000000000000000000000" + } + ] + } + ], + "assertions": [ + { + "query": "{ transfers { id from to value } }", + "expected": { "transfers": [ { "id": "0xdeadbeef00000000000000000000000000000000000000000000000000000000", "from": "0xbbbb000000000000000000000000000000000000", "to": "0xcccc000000000000000000000000000000000000", "value": "1000000000000000000"} ] } + }, + { + "query": "{ approvals {id owner spender value } }", + "expected": { "approvals": [ { "id": "0xdeadbeef00000000000000000000000000000000000000000000000000000000", "owner": "0xaaaa000000000000000000000000000000000000", "spender": "0xbbbb000000000000000000000000000000000000", "value": "1000000000000000000" } ] } + } + ] +} From b2e2db97c494e3271971c98d19672a0757d90b57 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Fri, 13 Mar 2026 15:00:09 +0200 Subject: [PATCH 2/9] gnd: fix txHash deserialization in test schema --- gnd/src/commands/test/schema.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index fb254e340f9..37c492486d3 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -98,7 +98,7 @@ pub struct LogEvent { pub params: serde_json::Map, /// Explicit tx hash, or generated as `keccak256(block_number || log_index)`. - #[serde(default)] + #[serde(default, rename = "txHash")] pub tx_hash: Option, } From b40d1fa5bb23365d6fb09809ff8aede31787dcb6 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Sat, 14 Mar 2026 17:45:46 +0200 Subject: [PATCH 3/9] gnd: add mock IPFS client for file data source testing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the dummy IpfsRpcClient with a MockIpfsClient that serves pre-loaded CID → bytes from a "files" array in test JSON files. Missing CIDs are reported with a clear error instead of a 60-second timeout. Also adds IpfsResponse::for_test() helper and documents the unmocked eth_call timeout in the troubleshooting guide. --- gnd/docs/gnd-test.md | 24 +++++++++- gnd/src/commands/test/mock_ipfs.rs | 46 ++++++++++++++++++ gnd/src/commands/test/mod.rs | 3 +- gnd/src/commands/test/runner.rs | 77 ++++++++++++++++++++++++++---- gnd/src/commands/test/schema.rs | 63 ++++++++++++++++++++++++ graph/src/ipfs/client.rs | 10 ++++ 6 files changed, 212 insertions(+), 11 deletions(-) create mode 100644 gnd/src/commands/test/mock_ipfs.rs diff --git a/gnd/docs/gnd-test.md b/gnd/docs/gnd-test.md index 4a6fcd18425..cb60f8e616d 100644 --- a/gnd/docs/gnd-test.md +++ b/gnd/docs/gnd-test.md @@ -767,7 +767,7 @@ GraphQL queries → Assertions - **Real WASM runtime:** Uses `EthereumRuntimeAdapterBuilder` with real `ethereum.call` host function - **Pre-populated call cache:** `eth_call` responses are cached before indexing starts - **No IPFS for manifest:** Uses `FileLinkResolver` to load manifest/WASM from build directory -- **Dummy RPC adapter:** Registered at `http://0.0.0.0:0` for capability lookup; never actually called +- **Dummy RPC adapter:** Registered at `http://0.0.0.0:0` — exists so the runtime can resolve an adapter with the required capabilities. If a mapping makes an `ethereum.call` that has no matching mock in `ethCalls`, the call misses the cache and falls through to this dummy adapter. The connection is refused immediately (port 0 is invalid), which graph-node treats as a possible reorg and restarts the block stream. The indexer then loops until the 60-second test timeout. See [Unmocked eth_call](#unmocked-eth_call-causes-60-second-timeout) in Troubleshooting. ## Troubleshooting @@ -799,6 +799,28 @@ GraphQL queries → Assertions 2. Check function signature format: `"functionName(inputTypes)(returnTypes)"` 3. Ensure parameters are in correct order +### Unmocked eth_call Causes 60-Second Timeout + +**Cause:** A mapping handler calls `ethereum.call` (directly or via a generated contract binding) for a call that has no matching entry in `ethCalls`. The call misses the pre-populated cache and is forwarded to the dummy RPC adapter at `http://0.0.0.0:0`. The connection is refused immediately, but graph-node interprets connection errors as a possible chain reorganisation and restarts the block stream instead of failing. The indexer loops indefinitely until the test runner's 60-second timeout expires. + +**Symptom:** Test fails with `Sync timeout after 60s` with no indication of which call was missing. + +**Fix:** +1. Add the missing call to `ethCalls` in your test block: + ```json + "ethCalls": [ + { + "address": "0xContractAddress", + "function": "myFunction(uint256):(address)", + "params": ["42"], + "returns": ["0xSomeAddress"] + } + ] + ``` +2. If the call is not supposed to happen, check the mapping logic — a code path may be executing unexpectedly. + +**Known limitation:** There is currently no fail-fast error for unmocked calls. The only signal is the timeout. A future improvement will make the dummy adapter panic immediately on a cache miss with a descriptive message. + ### Block Handler Not Firing **Cause:** Block handlers auto-fire, but might be outside data source's active range. diff --git a/gnd/src/commands/test/mock_ipfs.rs b/gnd/src/commands/test/mock_ipfs.rs new file mode 100644 index 00000000000..4c31f958682 --- /dev/null +++ b/gnd/src/commands/test/mock_ipfs.rs @@ -0,0 +1,46 @@ +//! Mock IPFS client for `gnd test`. +//! +//! Replaces the real `IpfsRpcClient` with a map of pre-loaded CID → bytes. +//! Any CID not found in the map is sent to the `unresolved_tx` channel and +//! an error is returned so the `OffchainMonitor` retries with backoff. +//! After sync, the runner drains the channel and reports missing CIDs. + +use std::collections::HashMap; +use std::sync::Arc; + +use async_trait::async_trait; +use graph::bytes::Bytes; +use graph::ipfs::{ + ContentPath, IpfsClient, IpfsError, IpfsMetrics, IpfsRequest, IpfsResponse, IpfsResult, +}; +use tokio::sync::mpsc::UnboundedSender; + +pub struct MockIpfsClient { + pub files: HashMap, + pub metrics: IpfsMetrics, + pub unresolved_tx: UnboundedSender, +} + +#[async_trait] +impl IpfsClient for MockIpfsClient { + fn metrics(&self) -> &IpfsMetrics { + &self.metrics + } + + async fn call(self: Arc, req: IpfsRequest) -> IpfsResult { + let path = match req { + IpfsRequest::Cat(p) | IpfsRequest::GetBlock(p) => p, + }; + + match self.files.get(&path) { + Some(bytes) => Ok(IpfsResponse::for_test(path, bytes.clone())), + None => { + let _ = self.unresolved_tx.send(path.clone()); + Err(IpfsError::ContentNotAvailable { + path, + reason: anyhow::anyhow!("CID not found in mock 'files'"), + }) + } + } + } +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 1fe4debc3d4..8bd66e73787 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -32,6 +32,7 @@ mod block_stream; mod eth_calls; mod matchstick; mod mock_chain; +mod mock_ipfs; mod noop; mod output; mod runner; @@ -170,7 +171,7 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { } }; - match runner::run_single_test(&opt, &manifest_info, &test_file).await { + match runner::run_single_test(&opt, &manifest_info, &test_file, &path).await { Ok(result) => { output::print_test_result(&test_file.name, &result); if result.is_passed() { diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index a6be52f3ae3..63925f90a90 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -13,6 +13,7 @@ use super::assertion::run_assertions; use super::block_stream::StaticStreamBuilder; use super::mock_chain; +use super::mock_ipfs::MockIpfsClient; use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; use super::schema::{TestFile, TestResult}; use super::trigger::build_blocks_with_triggers; @@ -35,7 +36,7 @@ use graph::data::subgraph::schema::SubgraphError; use graph::endpoint::EndpointMetrics; use graph::env::EnvVars; use graph::firehose::{FirehoseEndpoint, FirehoseEndpoints, SubgraphLimit}; -use graph::ipfs::{IpfsMetrics, IpfsRpcClient, ServerAddress}; +use graph::ipfs::{ContentPath, IpfsMetrics}; use graph::prelude::{ DeploymentHash, LoggerFactory, NodeId, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, @@ -52,6 +53,7 @@ use graph_node::config::Config; use graph_node::manager::PanicSubscriptionManager; use graph_node::store_builder::StoreBuilder; use graph_store_postgres::{ChainHeadUpdateListener, ChainStore, Store, SubgraphStore}; +use std::collections::HashMap; use std::marker::PhantomData; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -256,6 +258,7 @@ pub async fn run_single_test( opt: &TestOpt, manifest_info: &ManifestInfo, test_file: &TestFile, + test_file_path: &Path, ) -> Result { // Warn (and short-circuit) when there are no assertions. if test_file.assertions.is_empty() { @@ -286,6 +289,24 @@ pub async fn run_single_test( &manifest_info.receipt_required_selectors, )?; + // Build mock IPFS file map. Fails fast on invalid CIDs or unreadable file paths. + let test_file_dir = test_file_path + .parent() + .map(|p| p.to_path_buf()) + .unwrap_or_else(|| PathBuf::from(".")); + + let mut mock_files: HashMap = HashMap::new(); + for entry in &test_file.files { + let path = ContentPath::new(&entry.cid) + .map_err(|e| anyhow!("Invalid CID '{}' in test files: {}", entry.cid, e))?; + let content = entry + .resolve(&test_file_dir) + .with_context(|| format!("Failed to resolve mock file for CID '{}'", entry.cid))?; + mock_files.insert(path, content); + } + + let (unresolved_tx, mut unresolved_rx) = tokio::sync::mpsc::unbounded_channel::(); + // Create the database for this test. For pgtemp, the `db` value must // stay alive for the duration of the test — dropping it destroys the database. let db = create_test_database(opt, &manifest_info.build_dir)?; @@ -296,7 +317,15 @@ pub async fn run_single_test( let chain = setup_chain(&logger, blocks.clone(), &stores).await?; - let ctx = setup_context(&logger, &stores, &chain, manifest_info).await?; + let ctx = setup_context( + &logger, + &stores, + &chain, + manifest_info, + mock_files, + unresolved_tx, + ) + .await?; // Populate eth_call cache with mock responses before starting indexer. // This ensures handlers can successfully retrieve mocked contract call results. @@ -330,7 +359,34 @@ pub async fn run_single_test( ) .await { - Ok(()) => run_assertions(&ctx, &test_file.assertions).await, + Ok(()) => { + // Drain any CIDs that were requested but not found in the mock. + // Deduplicate so each missing CID is listed once. + let mut unresolved: Vec = Vec::new(); + while let Ok(cid) = unresolved_rx.try_recv() { + if !unresolved.contains(&cid) { + unresolved.push(cid); + } + } + + if !unresolved.is_empty() { + let cid_list = unresolved + .iter() + .map(|p| format!(" - {}", p)) + .collect::>() + .join("\n"); + Ok(TestResult { + handler_error: Some(format!( + "File data source requested CID not found in mock 'files':\n{}\n\ + Add the missing CID(s) to the \"files\" array in your test JSON.", + cid_list + )), + assertions: vec![], + }) + } else { + run_assertions(&ctx, &test_file.assertions).await + } + } Err(subgraph_error) => { // The subgraph handler threw a fatal error during indexing. // Report it as a test failure without running assertions. @@ -614,6 +670,8 @@ async fn setup_context( stores: &TestStores, chain: &Arc, manifest_info: &ManifestInfo, + mock_files: HashMap, + unresolved_tx: tokio::sync::mpsc::UnboundedSender, ) -> Result { let build_dir = &manifest_info.build_dir; let manifest_path = &manifest_info.manifest_path; @@ -643,13 +701,14 @@ async fn setup_context( FileLinkResolver::new(Some(build_dir.to_path_buf()), aliases), ); - // IPFS client is required by the instance manager constructor but not used - // for manifest loading (FileLinkResolver handles that). + // Replace the real IPFS client with a mock that serves pre-loaded content. + // FileLinkResolver handles manifest loading; the mock handles file data sources. let ipfs_metrics = IpfsMetrics::new(&mock_registry); - let ipfs_client = Arc::new( - IpfsRpcClient::new_unchecked(ServerAddress::test_rpc_api(), ipfs_metrics, logger) - .context("Failed to create IPFS client")?, - ); + let ipfs_client = Arc::new(MockIpfsClient { + files: mock_files, + metrics: ipfs_metrics, + unresolved_tx, + }); let ipfs_service = ipfs_service( ipfs_client, diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 37c492486d3..b048ac52f4c 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -30,6 +30,7 @@ //! } //! ``` +use graph::bytes::Bytes; use serde::Deserialize; use serde_json::Value; use std::path::{Path, PathBuf}; @@ -39,6 +40,10 @@ use std::path::{Path, PathBuf}; pub struct TestFile { pub name: String, + /// Mock IPFS file contents keyed by CID. Used for file data sources. + #[serde(default)] + pub files: Vec, + /// Ordered sequence of mock blocks to index. #[serde(default)] pub blocks: Vec, @@ -48,6 +53,64 @@ pub struct TestFile { pub assertions: Vec, } +/// A mock IPFS file entry for file data source testing. +/// +/// Exactly one of `content` or `file` must be set. +#[derive(Debug, Clone, Deserialize)] +pub struct MockFile { + /// Syntactically valid IPFS CID (v0 `Qm...` or v1 `bafy...`). + /// The CID does not need to be the actual hash of the content — the mock + /// ignores the hash relationship. + pub cid: String, + + /// Inline UTF-8 content. Exactly one of `content` or `file` must be set. + #[serde(default)] + pub content: Option, + + /// Path to a file whose contents are loaded as UTF-8. + /// Resolved relative to the test JSON file. Exactly one of `content` or + /// `file` must be set. + #[serde(default)] + pub file: Option, +} + +impl MockFile { + /// Resolve this entry to bytes, given the directory of the test JSON file. + /// + /// Fails if: + /// - neither `content` nor `file` is set + /// - both `content` and `file` are set + /// - the referenced `file` path cannot be read + pub fn resolve(&self, test_dir: &Path) -> anyhow::Result { + match (&self.content, &self.file) { + (Some(content), None) => Ok(Bytes::from(content.clone().into_bytes())), + (None, Some(file)) => { + let path = if Path::new(file).is_absolute() { + PathBuf::from(file) + } else { + test_dir.join(file) + }; + let data = std::fs::read(&path).map_err(|e| { + anyhow::anyhow!("Failed to read file '{}': {}", path.display(), e) + })?; + Ok(Bytes::from(data)) + } + (Some(_), Some(_)) => { + anyhow::bail!( + "MockFile entry for CID '{}' must have either 'content' or 'file', not both", + self.cid + ) + } + (None, None) => { + anyhow::bail!( + "MockFile entry for CID '{}' must have either 'content' or 'file'", + self.cid + ) + } + } + } +} + #[derive(Debug, Clone, Deserialize)] pub struct TestBlock { /// Block number. If omitted, auto-increments starting from `start_block` diff --git a/graph/src/ipfs/client.rs b/graph/src/ipfs/client.rs index 06bf7aee99c..908a46c338e 100644 --- a/graph/src/ipfs/client.rs +++ b/graph/src/ipfs/client.rs @@ -196,6 +196,16 @@ pub struct IpfsResponse { } impl IpfsResponse { + /// Construct an `IpfsResponse` from pre-buffered bytes. + /// + /// Intended for mock `IpfsClient` implementations in tests. + pub fn for_test(path: ContentPath, bytes: Bytes) -> Self { + Self { + path, + response: reqwest::Response::from(http::Response::new(bytes)), + } + } + /// Reads and returns the response body. /// /// If the max size is specified and the response body is larger than the max size, From 86cca014024b915bbbda67e47c5199d5233ce51a Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Sat, 14 Mar 2026 20:16:20 +0200 Subject: [PATCH 4/9] gnd: add mock Arweave resolver for file/arweave data source testing Introduces `MockArweaveResolver` and the `arweaveFiles` schema field so `gnd test` can serve pre-loaded Arweave content without hitting the network. Unresolved tx IDs are collected and reported as a clear test failure, mirroring the existing IPFS mock behaviour. --- gnd/src/commands/test/mock_arweave.rs | 45 +++++++++++ gnd/src/commands/test/mod.rs | 1 + gnd/src/commands/test/runner.rs | 103 +++++++++++++++++++------- gnd/src/commands/test/schema.rs | 57 ++++++++++++++ 4 files changed, 180 insertions(+), 26 deletions(-) create mode 100644 gnd/src/commands/test/mock_arweave.rs diff --git a/gnd/src/commands/test/mock_arweave.rs b/gnd/src/commands/test/mock_arweave.rs new file mode 100644 index 00000000000..7b04cb60e4a --- /dev/null +++ b/gnd/src/commands/test/mock_arweave.rs @@ -0,0 +1,45 @@ +//! Mock Arweave resolver for `gnd test`. +//! +//! Replaces the real `ArweaveClient` with a map of pre-loaded txId → bytes. +//! Any txId not found in the map is sent to the `unresolved_tx` channel and +//! `ServerUnavailable` is returned, which causes the `PollingMonitor` to retry +//! with backoff. After sync, the runner drains the channel and reports missing +//! tx IDs as a clear test failure. + +use std::collections::HashMap; + +use async_trait::async_trait; +use graph::bytes::Bytes; +use graph::components::link_resolver::{ArweaveClientError, ArweaveResolver, FileSizeLimit}; +use graph::data_source::offchain::Base64; +use tokio::sync::mpsc::UnboundedSender; + +#[derive(Debug)] +pub struct MockArweaveResolver { + pub files: HashMap, + pub unresolved_tx: UnboundedSender, +} + +#[async_trait] +impl ArweaveResolver for MockArweaveResolver { + async fn get(&self, file: &Base64) -> Result, ArweaveClientError> { + self.get_with_limit(file, &FileSizeLimit::Unlimited).await + } + + async fn get_with_limit( + &self, + file: &Base64, + _limit: &FileSizeLimit, + ) -> Result, ArweaveClientError> { + match self.files.get(file.as_str()) { + Some(bytes) => Ok(bytes.to_vec()), + None => { + let _ = self.unresolved_tx.send(file.as_str().to_owned()); + Err(ArweaveClientError::ServerUnavailable(format!( + "txId '{}' not found in mock 'arweaveFiles'", + file.as_str() + ))) + } + } + } +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 8bd66e73787..67e14377bf7 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -31,6 +31,7 @@ mod assertion; mod block_stream; mod eth_calls; mod matchstick; +mod mock_arweave; mod mock_chain; mod mock_ipfs; mod noop; diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 63925f90a90..1d6ec4dae58 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -12,6 +12,7 @@ use super::assertion::run_assertions; use super::block_stream::StaticStreamBuilder; +use super::mock_arweave::MockArweaveResolver; use super::mock_chain; use super::mock_ipfs::MockIpfsClient; use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; @@ -24,7 +25,7 @@ use graph::amp::FlightClient; use graph::blockchain::block_stream::BlockWithTriggers; use graph::blockchain::{BlockPtr, BlockchainMap, ChainIdentifier}; use graph::cheap_clone::CheapClone; -use graph::components::link_resolver::{ArweaveClient, FileLinkResolver}; +use graph::components::link_resolver::FileLinkResolver; use graph::components::metrics::MetricsRegistry; use graph::components::network_provider::{ AmpChainNames, ChainName, ProviderCheckStrategy, ProviderManager, @@ -81,6 +82,14 @@ fn make_test_logger(verbose: u8) -> Logger { } } +/// Mock file data passed to `setup_context`. +struct MockData { + ipfs_files: HashMap, + ipfs_unresolved_tx: tokio::sync::mpsc::UnboundedSender, + arweave_files: HashMap, + arweave_unresolved_tx: tokio::sync::mpsc::UnboundedSender, +} + struct TestStores { network_name: ChainName, /// Listens for chain head updates — needed by the Chain constructor. @@ -307,6 +316,20 @@ pub async fn run_single_test( let (unresolved_tx, mut unresolved_rx) = tokio::sync::mpsc::unbounded_channel::(); + let mut mock_arweave_files: HashMap = HashMap::new(); + for entry in &test_file.arweave_files { + let content = entry.resolve(&test_file_dir).with_context(|| { + format!( + "Failed to resolve mock Arweave file for txId '{}'", + entry.tx_id + ) + })?; + mock_arweave_files.insert(entry.tx_id.clone(), content); + } + + let (arweave_unresolved_tx, mut arweave_unresolved_rx) = + tokio::sync::mpsc::unbounded_channel::(); + // Create the database for this test. For pgtemp, the `db` value must // stay alive for the duration of the test — dropping it destroys the database. let db = create_test_database(opt, &manifest_info.build_dir)?; @@ -317,15 +340,14 @@ pub async fn run_single_test( let chain = setup_chain(&logger, blocks.clone(), &stores).await?; - let ctx = setup_context( - &logger, - &stores, - &chain, - manifest_info, - mock_files, - unresolved_tx, - ) - .await?; + let mock_data = MockData { + ipfs_files: mock_files, + ipfs_unresolved_tx: unresolved_tx, + arweave_files: mock_arweave_files, + arweave_unresolved_tx, + }; + + let ctx = setup_context(&logger, &stores, &chain, manifest_info, mock_data).await?; // Populate eth_call cache with mock responses before starting indexer. // This ensures handlers can successfully retrieve mocked contract call results. @@ -362,25 +384,51 @@ pub async fn run_single_test( Ok(()) => { // Drain any CIDs that were requested but not found in the mock. // Deduplicate so each missing CID is listed once. - let mut unresolved: Vec = Vec::new(); + let mut unresolved_ipfs: Vec = Vec::new(); while let Ok(cid) = unresolved_rx.try_recv() { - if !unresolved.contains(&cid) { - unresolved.push(cid); + if !unresolved_ipfs.contains(&cid) { + unresolved_ipfs.push(cid); } } - if !unresolved.is_empty() { - let cid_list = unresolved + let mut unresolved_arweave: Vec = Vec::new(); + while let Ok(tx_id) = arweave_unresolved_rx.try_recv() { + if !unresolved_arweave.contains(&tx_id) { + unresolved_arweave.push(tx_id); + } + } + + let mut missing_parts: Vec = Vec::new(); + + if !unresolved_ipfs.is_empty() { + let list = unresolved_ipfs .iter() .map(|p| format!(" - {}", p)) .collect::>() .join("\n"); + missing_parts.push(format!( + "IPFS CIDs not found in mock 'files':\n{}\n\ + Add the missing CID(s) to the \"files\" array in your test JSON.", + list + )); + } + + if !unresolved_arweave.is_empty() { + let list = unresolved_arweave + .iter() + .map(|id| format!(" - {}", id)) + .collect::>() + .join("\n"); + missing_parts.push(format!( + "Arweave tx IDs not found in mock 'arweaveFiles':\n{}\n\ + Add the missing txId(s) to the \"arweaveFiles\" array in your test JSON.", + list + )); + } + + if !missing_parts.is_empty() { Ok(TestResult { - handler_error: Some(format!( - "File data source requested CID not found in mock 'files':\n{}\n\ - Add the missing CID(s) to the \"files\" array in your test JSON.", - cid_list - )), + handler_error: Some(missing_parts.join("\n\n")), assertions: vec![], }) } else { @@ -670,8 +718,7 @@ async fn setup_context( stores: &TestStores, chain: &Arc, manifest_info: &ManifestInfo, - mock_files: HashMap, - unresolved_tx: tokio::sync::mpsc::UnboundedSender, + mock_data: MockData, ) -> Result { let build_dir = &manifest_info.build_dir; let manifest_path = &manifest_info.manifest_path; @@ -705,9 +752,9 @@ async fn setup_context( // FileLinkResolver handles manifest loading; the mock handles file data sources. let ipfs_metrics = IpfsMetrics::new(&mock_registry); let ipfs_client = Arc::new(MockIpfsClient { - files: mock_files, + files: mock_data.ipfs_files, metrics: ipfs_metrics, - unresolved_tx, + unresolved_tx: mock_data.ipfs_unresolved_tx, }); let ipfs_service = ipfs_service( @@ -717,9 +764,13 @@ async fn setup_context( env_vars.mappings.ipfs_request_limit, ); - let arweave_resolver = Arc::new(ArweaveClient::default()); + let arweave_resolver: Arc = + Arc::new(MockArweaveResolver { + files: mock_data.arweave_files, + unresolved_tx: mock_data.arweave_unresolved_tx, + }); let arweave_service = arweave_service( - arweave_resolver.cheap_clone(), + arweave_resolver, env_vars.mappings.ipfs_request_limit, graph::components::link_resolver::FileSizeLimit::MaxBytes( env_vars.mappings.max_ipfs_file_bytes as u64, diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index b048ac52f4c..71e1aa4d250 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -44,6 +44,10 @@ pub struct TestFile { #[serde(default)] pub files: Vec, + /// Mock Arweave file contents keyed by transaction ID. Used for file/arweave data sources. + #[serde(default, rename = "arweaveFiles")] + pub arweave_files: Vec, + /// Ordered sequence of mock blocks to index. #[serde(default)] pub blocks: Vec, @@ -74,6 +78,59 @@ pub struct MockFile { pub file: Option, } +/// A mock Arweave file entry for file/arweave data source testing. +/// +/// Exactly one of `content` or `file` must be set. +#[derive(Debug, Clone, Deserialize)] +pub struct MockArweaveFile { + /// Arweave transaction ID or bundle path (e.g. `"txid/filename.json"`). + /// No format validation — treated as an opaque string key. + #[serde(rename = "txId")] + pub tx_id: String, + + /// Inline UTF-8 content. Exactly one of `content` or `file` must be set. + #[serde(default)] + pub content: Option, + + /// Path to a file. Resolved relative to the test JSON file. + /// Exactly one of `content` or `file` must be set. + #[serde(default)] + pub file: Option, +} + +impl MockArweaveFile { + /// Resolve this entry to bytes, given the directory of the test JSON file. + /// + /// Fails if: + /// - neither `content` nor `file` is set + /// - both `content` and `file` are set + /// - the referenced `file` path cannot be read + pub fn resolve(&self, test_dir: &Path) -> anyhow::Result { + match (&self.content, &self.file) { + (Some(content), None) => Ok(graph::bytes::Bytes::from(content.clone().into_bytes())), + (None, Some(file)) => { + let path = if Path::new(file).is_absolute() { + PathBuf::from(file) + } else { + test_dir.join(file) + }; + let data = std::fs::read(&path).map_err(|e| { + anyhow::anyhow!("Failed to read file '{}': {}", path.display(), e) + })?; + Ok(graph::bytes::Bytes::from(data)) + } + (Some(_), Some(_)) => anyhow::bail!( + "MockArweaveFile entry for txId '{}' must have either 'content' or 'file', not both", + self.tx_id + ), + (None, None) => anyhow::bail!( + "MockArweaveFile entry for txId '{}' must have either 'content' or 'file'", + self.tx_id + ), + } + } +} + impl MockFile { /// Resolve this entry to bytes, given the directory of the test JSON file. /// From eac4ac1b2856cd26d16234026940fae58495c3e7 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Sat, 14 Mar 2026 21:31:12 +0200 Subject: [PATCH 5/9] gnd: add file data source tests and split fixture subgraphs Add IPFS and Arweave file data source test fixtures and split the monolithic gnd_test fixture into four focused subgraphs: - token/ ERC20 events, eth_call mocking, dynamic templates - blocks/ Block handlers (every, once, polling filters) - receipts/ Transaction receipts (receipt: true handlers) - file-data-sources/ IPFS and Arweave file data sources Each fixture is a standalone subgraph with its own schema, mappings, ABIs, and test JSON files. The Rust test harness is updated to use a generic setup_fixture(name) helper, with one test function per fixture. --- .../{subgraph => blocks}/abis/ERC20.json | 0 .../fixtures/gnd_test/blocks/package.json | 10 + .../fixtures/gnd_test/blocks/schema.graphql | 14 ++ .../{subgraph => blocks}/src/blocks.ts | 0 .../fixtures/gnd_test/blocks/subgraph.yaml | 64 +++++ .../{subgraph => blocks}/tests/blocks.json | 0 .../file-data-sources/abis/FileEvents.json | 28 +++ .../gnd_test/file-data-sources/package.json | 10 + .../gnd_test/file-data-sources/schema.graphql | 9 + .../gnd_test/file-data-sources/src/files.ts | 26 ++ .../gnd_test/file-data-sources/subgraph.yaml | 53 +++++ .../file-data-sources/tests/file_arweave.json | 36 +++ .../file-data-sources/tests/file_ipfs.json | 36 +++ .../gnd_test/receipts/abis/ERC20.json | 222 ++++++++++++++++++ .../fixtures/gnd_test/receipts/package.json | 10 + .../fixtures/gnd_test/receipts/schema.graphql | 13 + .../{subgraph => receipts}/src/receipts.ts | 2 +- .../fixtures/gnd_test/receipts/subgraph.yaml | 25 ++ .../tests/receipts.json | 0 .../fixtures/gnd_test/subgraph/package.json | 19 -- .../fixtures/gnd_test/subgraph/schema.graphql | 47 ---- .../fixtures/gnd_test/subgraph/subgraph.yaml | 145 ------------ .../fixtures/gnd_test/token/abis/ERC20.json | 222 ++++++++++++++++++ .../abis/TokenFactory.json | 0 .../fixtures/gnd_test/token/package.json | 10 + .../fixtures/gnd_test/token/schema.graphql | 18 ++ .../{subgraph => token}/src/factory.ts | 0 .../gnd_test/{subgraph => token}/src/token.ts | 0 .../fixtures/gnd_test/token/subgraph.yaml | 68 ++++++ .../{subgraph => token}/tests/failing.json | 0 .../{subgraph => token}/tests/templates.json | 0 .../{subgraph => token}/tests/transfer.json | 0 gnd/tests/gnd_test.rs | 162 +++++++++---- 33 files changed, 986 insertions(+), 263 deletions(-) rename gnd/tests/fixtures/gnd_test/{subgraph => blocks}/abis/ERC20.json (100%) create mode 100644 gnd/tests/fixtures/gnd_test/blocks/package.json create mode 100644 gnd/tests/fixtures/gnd_test/blocks/schema.graphql rename gnd/tests/fixtures/gnd_test/{subgraph => blocks}/src/blocks.ts (100%) create mode 100644 gnd/tests/fixtures/gnd_test/blocks/subgraph.yaml rename gnd/tests/fixtures/gnd_test/{subgraph => blocks}/tests/blocks.json (100%) create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/abis/FileEvents.json create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/package.json create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/schema.graphql create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/src/files.ts create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/subgraph.yaml create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_arweave.json create mode 100644 gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_ipfs.json create mode 100644 gnd/tests/fixtures/gnd_test/receipts/abis/ERC20.json create mode 100644 gnd/tests/fixtures/gnd_test/receipts/package.json create mode 100644 gnd/tests/fixtures/gnd_test/receipts/schema.graphql rename gnd/tests/fixtures/gnd_test/{subgraph => receipts}/src/receipts.ts (94%) create mode 100644 gnd/tests/fixtures/gnd_test/receipts/subgraph.yaml rename gnd/tests/fixtures/gnd_test/{subgraph => receipts}/tests/receipts.json (100%) delete mode 100644 gnd/tests/fixtures/gnd_test/subgraph/package.json delete mode 100644 gnd/tests/fixtures/gnd_test/subgraph/schema.graphql delete mode 100644 gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml create mode 100644 gnd/tests/fixtures/gnd_test/token/abis/ERC20.json rename gnd/tests/fixtures/gnd_test/{subgraph => token}/abis/TokenFactory.json (100%) create mode 100644 gnd/tests/fixtures/gnd_test/token/package.json create mode 100644 gnd/tests/fixtures/gnd_test/token/schema.graphql rename gnd/tests/fixtures/gnd_test/{subgraph => token}/src/factory.ts (100%) rename gnd/tests/fixtures/gnd_test/{subgraph => token}/src/token.ts (100%) create mode 100644 gnd/tests/fixtures/gnd_test/token/subgraph.yaml rename gnd/tests/fixtures/gnd_test/{subgraph => token}/tests/failing.json (100%) rename gnd/tests/fixtures/gnd_test/{subgraph => token}/tests/templates.json (100%) rename gnd/tests/fixtures/gnd_test/{subgraph => token}/tests/transfer.json (100%) diff --git a/gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json b/gnd/tests/fixtures/gnd_test/blocks/abis/ERC20.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/abis/ERC20.json rename to gnd/tests/fixtures/gnd_test/blocks/abis/ERC20.json diff --git a/gnd/tests/fixtures/gnd_test/blocks/package.json b/gnd/tests/fixtures/gnd_test/blocks/package.json new file mode 100644 index 00000000000..8f9d6327502 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/blocks/package.json @@ -0,0 +1,10 @@ +{ + "name": "gnd-test-blocks", + "version": "0.1.0", + "private": true, + "devDependencies": { + "@graphprotocol/graph-cli": "0.98.1", + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" + } +} diff --git a/gnd/tests/fixtures/gnd_test/blocks/schema.graphql b/gnd/tests/fixtures/gnd_test/blocks/schema.graphql new file mode 100644 index 00000000000..53154f2b1d3 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/blocks/schema.graphql @@ -0,0 +1,14 @@ +type Block @entity(immutable: true) { + id: Bytes! + number: BigInt! +} + +type OnceBlock @entity(immutable: true) { + id: Bytes! + msg: String! +} + +type PollingBlock @entity(immutable: true) { + id: Bytes! + number: BigInt! +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts b/gnd/tests/fixtures/gnd_test/blocks/src/blocks.ts similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/src/blocks.ts rename to gnd/tests/fixtures/gnd_test/blocks/src/blocks.ts diff --git a/gnd/tests/fixtures/gnd_test/blocks/subgraph.yaml b/gnd/tests/fixtures/gnd_test/blocks/subgraph.yaml new file mode 100644 index 00000000000..a5f727da28d --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/blocks/subgraph.yaml @@ -0,0 +1,64 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - name: EveryBlock + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - Block + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handleEveryBlock + - name: BlockOnce + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + startBlock: 1000 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - OnceBlock + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handleOnce + filter: + kind: once + - name: BlockPolling + kind: ethereum + network: arbitrum + source: + abi: ERC20 + address: "0x0000000000000000000000000000000000000000" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/blocks.ts + entities: + - PollingBlock + abis: + - name: ERC20 + file: ./abis/ERC20.json + blockHandlers: + - handler: handlePolling + filter: + kind: polling + every: 5 diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json b/gnd/tests/fixtures/gnd_test/blocks/tests/blocks.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/tests/blocks.json rename to gnd/tests/fixtures/gnd_test/blocks/tests/blocks.json diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/abis/FileEvents.json b/gnd/tests/fixtures/gnd_test/file-data-sources/abis/FileEvents.json new file mode 100644 index 00000000000..8a1a3861982 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/abis/FileEvents.json @@ -0,0 +1,28 @@ +[ + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "cid", + "type": "string" + } + ], + "name": "IpfsFileCreated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "txId", + "type": "string" + } + ], + "name": "ArweaveFileCreated", + "type": "event" + } +] diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/package.json b/gnd/tests/fixtures/gnd_test/file-data-sources/package.json new file mode 100644 index 00000000000..6154ce4bee6 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/package.json @@ -0,0 +1,10 @@ +{ + "name": "gnd-test-file-data-sources", + "version": "0.1.0", + "private": true, + "devDependencies": { + "@graphprotocol/graph-cli": "0.98.1", + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" + } +} diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/schema.graphql b/gnd/tests/fixtures/gnd_test/file-data-sources/schema.graphql new file mode 100644 index 00000000000..1966b71c61c --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/schema.graphql @@ -0,0 +1,9 @@ +type IpfsFile @entity { + id: ID! + content: String! +} + +type ArweaveFile @entity { + id: ID! + content: String! +} diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/src/files.ts b/gnd/tests/fixtures/gnd_test/file-data-sources/src/files.ts new file mode 100644 index 00000000000..a5bd9ee7e7e --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/src/files.ts @@ -0,0 +1,26 @@ +import { dataSource, Bytes } from "@graphprotocol/graph-ts"; +import { + IpfsFileCreated, + ArweaveFileCreated, +} from "../generated/FileEvents/FileEvents"; +import { IpfsFile, ArweaveFile } from "../generated/schema"; + +export function handleIpfsFileCreated(event: IpfsFileCreated): void { + dataSource.create("IpfsFile", [event.params.cid]); +} + +export function handleArweaveFileCreated(event: ArweaveFileCreated): void { + dataSource.create("ArweaveFile", [event.params.txId]); +} + +export function handleIpfsFile(data: Bytes): void { + let entity = new IpfsFile(dataSource.stringParam()); + entity.content = data.toString(); + entity.save(); +} + +export function handleArweaveFile(data: Bytes): void { + let entity = new ArweaveFile(dataSource.stringParam()); + entity.content = data.toString(); + entity.save(); +} diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/subgraph.yaml b/gnd/tests/fixtures/gnd_test/file-data-sources/subgraph.yaml new file mode 100644 index 00000000000..3a898f9f8d6 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/subgraph.yaml @@ -0,0 +1,53 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - name: FileEvents + kind: ethereum/contract + network: arbitrum + source: + abi: FileEvents + address: "0x0000000000000000000000000000000000000002" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/files.ts + entities: + - IpfsFile + - ArweaveFile + abis: + - name: FileEvents + file: ./abis/FileEvents.json + eventHandlers: + - event: IpfsFileCreated(string) + handler: handleIpfsFileCreated + - event: ArweaveFileCreated(string) + handler: handleArweaveFileCreated +templates: + - name: IpfsFile + kind: file/ipfs + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/files.ts + entities: + - IpfsFile + abis: + - name: FileEvents + file: ./abis/FileEvents.json + handler: handleIpfsFile + - name: ArweaveFile + kind: file/arweave + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/files.ts + entities: + - ArweaveFile + abis: + - name: FileEvents + file: ./abis/FileEvents.json + handler: handleArweaveFile diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_arweave.json b/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_arweave.json new file mode 100644 index 00000000000..708ca1a5865 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_arweave.json @@ -0,0 +1,36 @@ +{ + "name": "Arweave file data source creates entity from fetched content", + "arweaveFiles": [ + { + "txId": "8APeQ5lW0-csTcBaGdPBDLAL2ci2AT9pTn2tppGPU_8", + "content": "Hello from Arweave" + } + ], + "blocks": [ + { + "events": [ + { + "address": "0x0000000000000000000000000000000000000002", + "event": "ArweaveFileCreated(string txId)", + "params": { + "txId": "8APeQ5lW0-csTcBaGdPBDLAL2ci2AT9pTn2tppGPU_8" + } + } + ] + }, + {} + ], + "assertions": [ + { + "query": "{ arweaveFiles { id content } }", + "expected": { + "arweaveFiles": [ + { + "id": "8APeQ5lW0-csTcBaGdPBDLAL2ci2AT9pTn2tppGPU_8", + "content": "Hello from Arweave" + } + ] + } + } + ] +} diff --git a/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_ipfs.json b/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_ipfs.json new file mode 100644 index 00000000000..8fee92571b4 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/file-data-sources/tests/file_ipfs.json @@ -0,0 +1,36 @@ +{ + "name": "IPFS file data source creates entity from fetched content", + "files": [ + { + "cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG", + "content": "Hello from IPFS" + } + ], + "blocks": [ + { + "events": [ + { + "address": "0x0000000000000000000000000000000000000002", + "event": "IpfsFileCreated(string cid)", + "params": { + "cid": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG" + } + } + ] + }, + {} + ], + "assertions": [ + { + "query": "{ ipfsFiles { id content } }", + "expected": { + "ipfsFiles": [ + { + "id": "QmYwAPJzv5CZsnA625s3Xf2nemtYgPpHdWEz79ojWnPbdG", + "content": "Hello from IPFS" + } + ] + } + } + ] +} diff --git a/gnd/tests/fixtures/gnd_test/receipts/abis/ERC20.json b/gnd/tests/fixtures/gnd_test/receipts/abis/ERC20.json new file mode 100644 index 00000000000..405d6b36486 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/receipts/abis/ERC20.json @@ -0,0 +1,222 @@ +[ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_spender", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_from", + "type": "address" + }, + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "decimals", + "outputs": [ + { + "name": "", + "type": "uint8" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "name": "balance", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + }, + { + "name": "_spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "payable": true, + "stateMutability": "payable", + "type": "fallback" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "from", + "type": "address" + }, + { + "indexed": true, + "name": "to", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + } +] diff --git a/gnd/tests/fixtures/gnd_test/receipts/package.json b/gnd/tests/fixtures/gnd_test/receipts/package.json new file mode 100644 index 00000000000..c04018dd36c --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/receipts/package.json @@ -0,0 +1,10 @@ +{ + "name": "gnd-test-receipts", + "version": "0.1.0", + "private": true, + "devDependencies": { + "@graphprotocol/graph-cli": "0.98.1", + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" + } +} diff --git a/gnd/tests/fixtures/gnd_test/receipts/schema.graphql b/gnd/tests/fixtures/gnd_test/receipts/schema.graphql new file mode 100644 index 00000000000..009205bf890 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/receipts/schema.graphql @@ -0,0 +1,13 @@ +type Transfer @entity(immutable: true) { + id: Bytes! + from: Bytes! + to: Bytes! + value: BigInt! +} + +type Approval @entity(immutable: true) { + id: Bytes! + owner: Bytes! + spender: Bytes! + value: BigInt! +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts b/gnd/tests/fixtures/gnd_test/receipts/src/receipts.ts similarity index 94% rename from gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts rename to gnd/tests/fixtures/gnd_test/receipts/src/receipts.ts index 7ee1291b576..3a7934c6d55 100644 --- a/gnd/tests/fixtures/gnd_test/subgraph/src/receipts.ts +++ b/gnd/tests/fixtures/gnd_test/receipts/src/receipts.ts @@ -1,4 +1,4 @@ -import { Transfer as TransferEvent } from '../generated/StandardToken/ERC20' +import { Transfer as TransferEvent } from '../generated/Receipts/ERC20' import { Transfer, Approval } from '../generated/schema' import { Bytes, ethereum, log } from "@graphprotocol/graph-ts"; diff --git a/gnd/tests/fixtures/gnd_test/receipts/subgraph.yaml b/gnd/tests/fixtures/gnd_test/receipts/subgraph.yaml new file mode 100644 index 00000000000..5913550c427 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/receipts/subgraph.yaml @@ -0,0 +1,25 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - name: Receipts + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + address: "0x000000000000000000000000000000000000000a" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/receipts.ts + entities: + - Transfer + - Approval + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleEventsWithReceipts + receipt: true diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json b/gnd/tests/fixtures/gnd_test/receipts/tests/receipts.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/tests/receipts.json rename to gnd/tests/fixtures/gnd_test/receipts/tests/receipts.json diff --git a/gnd/tests/fixtures/gnd_test/subgraph/package.json b/gnd/tests/fixtures/gnd_test/subgraph/package.json deleted file mode 100644 index 96118f38494..00000000000 --- a/gnd/tests/fixtures/gnd_test/subgraph/package.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "name": "gnd-example-subgraph", - "version": "0.1.0", - "private": true, - "scripts": { - "auth": "graph auth https://api.thegraph.com/deploy/", - "codegen": "graph codegen", - "build": "graph build", - "deploy": "graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ graphprotocol/erc20-subgraph", - "create-local": "graph create --node http://localhost:8020/ graphprotocol/erc20-subgraph", - "remove-local": "graph remove --node http://localhost:8020/ graphprotocol/erc20-subgraph", - "deploy-local": "graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 graphprotocol/erc20-subgraph" - }, - "devDependencies": { - "@graphprotocol/graph-cli": "0.98.1", - "@graphprotocol/graph-ts": "0.38.2", - "assemblyscript": "0.19.23" - } -} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql b/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql deleted file mode 100644 index 37ec979870d..00000000000 --- a/gnd/tests/fixtures/gnd_test/subgraph/schema.graphql +++ /dev/null @@ -1,47 +0,0 @@ -type Account @entity(immutable: false) { - id: Bytes! # Address - balances: [Balance!]! @derivedFrom(field: "account") -} - -type Token @entity(immutable: true) { - id: Bytes! # Address - name: String! - symbol: String! - decimals: Int! -} - -type Balance @entity(immutable: false) { - id: Bytes! # Account address + token address - token: Token! - account: Account! - amount: BigInt -} - -type Block @entity(immutable: true) { - id: Bytes! - number: BigInt! -} - -type OnceBlock @entity(immutable: true) { - id: Bytes! - msg: String! -} - -type PollingBlock @entity(immutable: true) { - id: Bytes! - number: BigInt! -} - -type Transfer @entity(immutable: true) { - id: Bytes! - from: Bytes! - to: Bytes! - value: BigInt! -} - -type Approval @entity(immutable: true) { - id: Bytes! - owner: Bytes! - spender: Bytes! - value: BigInt! -} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml b/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml deleted file mode 100644 index 46fe971c642..00000000000 --- a/gnd/tests/fixtures/gnd_test/subgraph/subgraph.yaml +++ /dev/null @@ -1,145 +0,0 @@ -specVersion: 1.3.0 -schema: - file: ./schema.graphql -dataSources: - - name: StandardToken - kind: ethereum/contract - network: arbitrum - source: - abi: ERC20 - address: "0x9623063377AD1B27544C965cCd7342f7EA7e88C7" - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/token.ts - entities: - - TransferEvent - abis: - - name: ERC20 - file: ./abis/ERC20.json - eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransfer - - name: Receipts - kind: ethereum/contract - network: arbitrum - source: - abi: ERC20 - address: "0x000000000000000000000000000000000000000a" - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/receipts.ts - entities: - - Transfer - - Approve - abis: - - name: ERC20 - file: ./abis/ERC20.json - eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleEventsWithReceipts - receipt: true - - name: EveryBlock - kind: ethereum - network: arbitrum - source: - abi: ERC20 - address: "0x0000000000000000000000000000000000000000" - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/blocks.ts - entities: - - Block - abis: - - name: ERC20 - file: ./abis/ERC20.json - blockHandlers: - - handler: handleEveryBlock - - name: BlockOnce - kind: ethereum - network: arbitrum - source: - abi: ERC20 - address: "0x0000000000000000000000000000000000000000" - startBlock: 1000 - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/blocks.ts - entities: - - BlockOnce - abis: - - name: ERC20 - file: ./abis/ERC20.json - blockHandlers: - - handler: handleOnce - filter: - kind: once - - name: BlockPolling - kind: ethereum - network: arbitrum - source: - abi: ERC20 - address: "0x0000000000000000000000000000000000000000" - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/blocks.ts - entities: - - PollingBlock - abis: - - name: ERC20 - file: ./abis/ERC20.json - blockHandlers: - - handler: handlePolling - filter: - kind: polling - every: 5 - - name: Factory - kind: ethereum/contract - network: arbitrum - source: - abi: TokenFactory - address: "0x0000000000000000000000000000000000000001" - startBlock: 2000 - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/factory.ts - entities: - - FactoryToken - abis: - - name: TokenFactory - file: ./abis/TokenFactory.json - - name: ERC20 - file: ./abis/ERC20.json - eventHandlers: - - event: FactoryTokenCreated(indexed address) - handler: handleTokenCreated -templates: - - name: FactoryToken - kind: ethereum/contract - network: arbitrum - source: - abi: ERC20 - mapping: - kind: ethereum/events - apiVersion: 0.0.9 - language: wasm/assemblyscript - file: ./src/token.ts - entities: - - FactoryTokenTransfer - abis: - - name: ERC20 - file: ./abis/ERC20.json - eventHandlers: - - event: Transfer(indexed address,indexed address,uint256) - handler: handleTransfer diff --git a/gnd/tests/fixtures/gnd_test/token/abis/ERC20.json b/gnd/tests/fixtures/gnd_test/token/abis/ERC20.json new file mode 100644 index 00000000000..405d6b36486 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/token/abis/ERC20.json @@ -0,0 +1,222 @@ +[ + { + "constant": true, + "inputs": [], + "name": "name", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_spender", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "approve", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "totalSupply", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_from", + "type": "address" + }, + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transferFrom", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "decimals", + "outputs": [ + { + "name": "", + "type": "uint8" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + } + ], + "name": "balanceOf", + "outputs": [ + { + "name": "balance", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": true, + "inputs": [], + "name": "symbol", + "outputs": [ + { + "name": "", + "type": "string" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "constant": false, + "inputs": [ + { + "name": "_to", + "type": "address" + }, + { + "name": "_value", + "type": "uint256" + } + ], + "name": "transfer", + "outputs": [ + { + "name": "", + "type": "bool" + } + ], + "payable": false, + "stateMutability": "nonpayable", + "type": "function" + }, + { + "constant": true, + "inputs": [ + { + "name": "_owner", + "type": "address" + }, + { + "name": "_spender", + "type": "address" + } + ], + "name": "allowance", + "outputs": [ + { + "name": "", + "type": "uint256" + } + ], + "payable": false, + "stateMutability": "view", + "type": "function" + }, + { + "payable": true, + "stateMutability": "payable", + "type": "fallback" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "owner", + "type": "address" + }, + { + "indexed": true, + "name": "spender", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Approval", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "name": "from", + "type": "address" + }, + { + "indexed": true, + "name": "to", + "type": "address" + }, + { + "indexed": false, + "name": "value", + "type": "uint256" + } + ], + "name": "Transfer", + "type": "event" + } +] diff --git a/gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json b/gnd/tests/fixtures/gnd_test/token/abis/TokenFactory.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/abis/TokenFactory.json rename to gnd/tests/fixtures/gnd_test/token/abis/TokenFactory.json diff --git a/gnd/tests/fixtures/gnd_test/token/package.json b/gnd/tests/fixtures/gnd_test/token/package.json new file mode 100644 index 00000000000..69ecc1c9311 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/token/package.json @@ -0,0 +1,10 @@ +{ + "name": "gnd-test-token", + "version": "0.1.0", + "private": true, + "devDependencies": { + "@graphprotocol/graph-cli": "0.98.1", + "@graphprotocol/graph-ts": "0.38.2", + "assemblyscript": "0.19.23" + } +} diff --git a/gnd/tests/fixtures/gnd_test/token/schema.graphql b/gnd/tests/fixtures/gnd_test/token/schema.graphql new file mode 100644 index 00000000000..4f4885fe848 --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/token/schema.graphql @@ -0,0 +1,18 @@ +type Token @entity(immutable: true) { + id: Bytes! # Address + name: String! + symbol: String! + decimals: Int! +} + +type Account @entity(immutable: false) { + id: Bytes! # Address + balances: [Balance!]! @derivedFrom(field: "account") +} + +type Balance @entity(immutable: false) { + id: Bytes! # Account address + token address + token: Token! + account: Account! + amount: BigInt +} diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts b/gnd/tests/fixtures/gnd_test/token/src/factory.ts similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/src/factory.ts rename to gnd/tests/fixtures/gnd_test/token/src/factory.ts diff --git a/gnd/tests/fixtures/gnd_test/subgraph/src/token.ts b/gnd/tests/fixtures/gnd_test/token/src/token.ts similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/src/token.ts rename to gnd/tests/fixtures/gnd_test/token/src/token.ts diff --git a/gnd/tests/fixtures/gnd_test/token/subgraph.yaml b/gnd/tests/fixtures/gnd_test/token/subgraph.yaml new file mode 100644 index 00000000000..081b071704b --- /dev/null +++ b/gnd/tests/fixtures/gnd_test/token/subgraph.yaml @@ -0,0 +1,68 @@ +specVersion: 1.3.0 +schema: + file: ./schema.graphql +dataSources: + - name: StandardToken + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + address: "0x9623063377AD1B27544C965cCd7342f7EA7e88C7" + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/token.ts + entities: + - Token + - Account + - Balance + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransfer + - name: Factory + kind: ethereum/contract + network: arbitrum + source: + abi: TokenFactory + address: "0x0000000000000000000000000000000000000001" + startBlock: 2000 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/factory.ts + entities: + - Token + abis: + - name: TokenFactory + file: ./abis/TokenFactory.json + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: FactoryTokenCreated(indexed address) + handler: handleTokenCreated +templates: + - name: FactoryToken + kind: ethereum/contract + network: arbitrum + source: + abi: ERC20 + mapping: + kind: ethereum/events + apiVersion: 0.0.9 + language: wasm/assemblyscript + file: ./src/token.ts + entities: + - Token + - Account + - Balance + abis: + - name: ERC20 + file: ./abis/ERC20.json + eventHandlers: + - event: Transfer(indexed address,indexed address,uint256) + handler: handleTransfer diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json b/gnd/tests/fixtures/gnd_test/token/tests/failing.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/tests/failing.json rename to gnd/tests/fixtures/gnd_test/token/tests/failing.json diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json b/gnd/tests/fixtures/gnd_test/token/tests/templates.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/tests/templates.json rename to gnd/tests/fixtures/gnd_test/token/tests/templates.json diff --git a/gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json b/gnd/tests/fixtures/gnd_test/token/tests/transfer.json similarity index 100% rename from gnd/tests/fixtures/gnd_test/subgraph/tests/transfer.json rename to gnd/tests/fixtures/gnd_test/token/tests/transfer.json diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs index c1c8fc79e8b..4d5bdb47511 100644 --- a/gnd/tests/gnd_test.rs +++ b/gnd/tests/gnd_test.rs @@ -1,19 +1,16 @@ //! Integration tests for `gnd test` — the mock-based subgraph test runner. //! -//! These tests verify that `gnd test` can: -//! - Build and run fixture subgraph tests end-to-end -//! - Execute individual test files -//! - Report correct pass/fail counts +//! Each fixture under `tests/fixtures/gnd_test/` covers one focused area: //! -//! The fixture subgraph at `tests/fixtures/gnd_test/subgraph/` covers: -//! - Event handling with eth_call mocking (transfer.json) -//! - Block handlers with various filters (blocks.json) -//! - Dynamic data source templates (templates.json) +//! - `token/` — ERC20 event handling, eth_call mocking, dynamic templates +//! - `blocks/` — Block handlers (`every`, `once`, `polling` filters) +//! - `receipts/` — Transaction receipts (`receipt: true` handlers) +//! - `file-data-sources/` — IPFS and Arweave file data sources //! //! # Prerequisites //! //! - Build the gnd binary: `cargo build -p gnd` -//! - AssemblyScript compiler (`asc`) in PATH +//! - AssemblyScript compiler (`asc`) in PATH or local `node_modules/.bin` //! - pnpm available for dependency installation //! //! # Running @@ -33,24 +30,35 @@ use std::process::Command; use tempfile::TempDir; use walkdir::WalkDir; -/// Copy the fixture subgraph into a fresh temp directory, install pnpm -/// dependencies, and run `gnd codegen`. Returns the temp dir handle (to -/// keep it alive) and the path to the prepared subgraph directory. -fn setup_fixture() -> (TempDir, PathBuf) { +// ============================================================================ +// Shared helpers +// ============================================================================ + +fn fixtures_root() -> PathBuf { + PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("tests") + .join("fixtures") + .join("gnd_test") +} + +/// Copy a named fixture into a fresh temp directory, install pnpm dependencies, +/// and run `gnd codegen`. Returns the temp dir handle (to keep it alive) and +/// the path to the prepared subgraph directory. +fn setup_fixture(name: &str) -> (TempDir, PathBuf) { let temp_dir = TempDir::new().expect("Failed to create temp directory"); - let subgraph_dir = temp_dir.path().join("subgraph"); + let subgraph_dir = temp_dir.path().join(name); fs::create_dir_all(&subgraph_dir).unwrap(); - let fixture = fixture_path(); + let fixture = fixtures_root().join(name); assert!( fixture.exists(), - "Fixture not found at {}", + "Fixture '{}' not found at {}", + name, fixture.display() ); copy_dir_recursive(&fixture, &subgraph_dir).expect("Failed to copy fixture to temp directory"); - // Install dependencies (graph-ts, graph-cli) let npm_output = Command::new("pnpm") .arg("install") .current_dir(&subgraph_dir) @@ -59,7 +67,8 @@ fn setup_fixture() -> (TempDir, PathBuf) { assert!( npm_output.status.success(), - "pnpm install failed in fixture:\nstdout: {}\nstderr: {}", + "pnpm install failed in fixture '{}':\nstdout: {}\nstderr: {}", + name, String::from_utf8_lossy(&npm_output.stdout), String::from_utf8_lossy(&npm_output.stderr), ); @@ -75,7 +84,8 @@ fn setup_fixture() -> (TempDir, PathBuf) { assert!( codegen_output.status.success(), - "gnd codegen failed in fixture:\nstdout: {}\nstderr: {}", + "gnd codegen failed in fixture '{}':\nstdout: {}\nstderr: {}", + name, String::from_utf8_lossy(&codegen_output.stdout), String::from_utf8_lossy(&codegen_output.stderr), ); @@ -83,7 +93,6 @@ fn setup_fixture() -> (TempDir, PathBuf) { (temp_dir, subgraph_dir) } -/// Get the path to the gnd binary. fn gnd_binary_path() -> PathBuf { let manifest_dir = env!("CARGO_MANIFEST_DIR"); PathBuf::from(manifest_dir) @@ -94,7 +103,6 @@ fn gnd_binary_path() -> PathBuf { .join("gnd") } -/// Verify the gnd binary exists, panic with a helpful message if not. fn verify_gnd_binary() -> PathBuf { let gnd_path = gnd_binary_path(); assert!( @@ -105,16 +113,6 @@ fn verify_gnd_binary() -> PathBuf { gnd_path } -/// Get the path to the gnd_test fixture subgraph. -fn fixture_path() -> PathBuf { - PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("tests") - .join("fixtures") - .join("gnd_test") - .join("subgraph") -} - -/// Assert that `asc` (AssemblyScript compiler) is available in PATH or in local node_modules. fn verify_asc_available(subgraph_dir: &Path) { assert!( gnd::compiler::find_asc_binary(subgraph_dir).is_some(), @@ -124,7 +122,6 @@ fn verify_asc_available(subgraph_dir: &Path) { ); } -/// Copy a directory recursively. fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { for entry in WalkDir::new(src).min_depth(1) { let entry = entry?; @@ -140,8 +137,6 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> std::io::Result<()> { Ok(()) } -/// Run `gnd test` with the given args in the given directory. -/// Returns the Output (status, stdout, stderr). fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { let gnd = verify_gnd_binary(); let mut cmd = Command::new(&gnd); @@ -160,39 +155,30 @@ fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { } // ============================================================================ -// gnd test — run all fixture tests +// token — ERC20 events, eth_call mocking, dynamic templates // ============================================================================ #[test] -fn test_gnd_test_all() { - let (_temp_dir, subgraph_dir) = setup_fixture(); +fn test_token_transfer_and_templates() { + let (_temp_dir, subgraph_dir) = setup_fixture("token"); - // Run only the passing test files (exclude failing.json which is used by the negative test). let output = run_gnd_test( - &[ - "tests/transfer.json", - "tests/blocks.json", - "tests/templates.json", - ], + &["tests/transfer.json", "tests/templates.json"], &subgraph_dir, ); assert!( output.status.success(), - "gnd test failed with exit code: {:?}\nstdout: {}\nstderr: {}", - output.status.code(), + "gnd test failed for token fixture\nstdout: {}\nstderr: {}", String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr), ); } -// ============================================================================ -// gnd test — verify failure on wrong assertions -// ============================================================================ - +/// Verifies that `gnd test` exits with a non-zero code when an assertion fails. #[test] -fn test_gnd_test_failing_assertions() { - let (_temp_dir, subgraph_dir) = setup_fixture(); +fn test_token_failing_assertion() { + let (_temp_dir, subgraph_dir) = setup_fixture("token"); let output = run_gnd_test(&["tests/failing.json"], &subgraph_dir); @@ -203,3 +189,77 @@ fn test_gnd_test_failing_assertions() { String::from_utf8_lossy(&output.stderr), ); } + +// ============================================================================ +// blocks — block handlers with every/once/polling filters +// ============================================================================ + +#[test] +fn test_blocks() { + let (_temp_dir, subgraph_dir) = setup_fixture("blocks"); + + let output = run_gnd_test(&["tests/blocks.json"], &subgraph_dir); + + assert!( + output.status.success(), + "gnd test failed for blocks fixture\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} + +// ============================================================================ +// receipts — receipt: true event handlers +// ============================================================================ + +#[test] +fn test_receipts() { + let (_temp_dir, subgraph_dir) = setup_fixture("receipts"); + + let output = run_gnd_test(&["tests/receipts.json"], &subgraph_dir); + + assert!( + output.status.success(), + "gnd test failed for receipts fixture\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} + +// ============================================================================ +// file-data-sources — IPFS and Arweave file data sources +// ============================================================================ + +/// Verifies that an event handler can spawn a `file/ipfs` data source, the mock +/// IPFS client serves the pre-loaded content, and the file handler writes an +/// entity whose content matches the mocked bytes. +#[test] +fn test_file_ipfs() { + let (_temp_dir, subgraph_dir) = setup_fixture("file-data-sources"); + + let output = run_gnd_test(&["tests/file_ipfs.json"], &subgraph_dir); + + assert!( + output.status.success(), + "gnd test failed for file_ipfs.json\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} + +/// Verifies that an event handler can spawn a `file/arweave` data source, the +/// mock Arweave resolver serves the pre-loaded content, and the file handler +/// writes an entity whose content matches the mocked bytes. +#[test] +fn test_file_arweave() { + let (_temp_dir, subgraph_dir) = setup_fixture("file-data-sources"); + + let output = run_gnd_test(&["tests/file_arweave.json"], &subgraph_dir); + + assert!( + output.status.success(), + "gnd test failed for file_arweave.json\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), + ); +} From c15b2a85cddab3494ba7c81af18f6c54cb6ec429 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Sat, 14 Mar 2026 21:50:29 +0200 Subject: [PATCH 6/9] gnd: trim verbose inline comments in test module --- gnd/src/commands/test/assertion.rs | 25 ++---- gnd/src/commands/test/block_stream.rs | 12 +-- gnd/src/commands/test/eth_calls.rs | 20 ++--- gnd/src/commands/test/mod.rs | 3 +- gnd/src/commands/test/noop.rs | 10 +-- gnd/src/commands/test/runner.rs | 112 ++++++++------------------ gnd/src/commands/test/schema.rs | 61 +++++--------- gnd/src/commands/test/trigger.rs | 65 ++++----------- 8 files changed, 91 insertions(+), 217 deletions(-) diff --git a/gnd/src/commands/test/assertion.rs b/gnd/src/commands/test/assertion.rs index 01cacabc8a9..8bb6f9ee302 100644 --- a/gnd/src/commands/test/assertion.rs +++ b/gnd/src/commands/test/assertion.rs @@ -77,13 +77,10 @@ async fn run_single_assertion( } } -/// Reorder `actual` arrays to align with `expected`'s element ordering. +/// Reorder `actual` arrays to match `expected`'s element order for cleaner diffs. /// -/// When a test fails, the raw diff can be misleading if array elements appear -/// in a different order — every line shows as changed even if only one field -/// differs. This function reorders `actual` so that elements are paired with -/// their closest match in `expected`, producing a diff that highlights only -/// real value differences. +/// Without this, out-of-order elements show every field as changed even when +/// only one field differs. pub(super) fn align_for_diff( expected: &serde_json::Value, actual: &serde_json::Value, @@ -132,12 +129,8 @@ pub(super) fn align_for_diff( } } -/// Score how similar two JSON values are for use in [`align_for_diff`]. -/// -/// For objects, counts the number of fields whose values are equal in both. -/// A matching `"id"` field is weighted heavily (+100) since it is the -/// strongest signal that two objects represent the same entity. -/// For all other value types, returns 1 if equal, 0 otherwise. +/// Score JSON similarity for [`align_for_diff`]. +/// Objects: matching `"id"` = 100, other equal fields = 1. Non-objects: 0 or 1. fn json_similarity(a: &serde_json::Value, b: &serde_json::Value) -> usize { match (a, b) { (serde_json::Value::Object(a_obj), serde_json::Value::Object(b_obj)) => { @@ -162,13 +155,9 @@ fn json_similarity(a: &serde_json::Value, b: &serde_json::Value) -> usize { } } -/// Compare two JSON values for equality (ignoring key ordering in objects). +/// Compare JSON values for equality, ignoring object key ordering. /// -/// Also handles string-vs-number coercion: GraphQL returns `BigInt` and -/// `BigDecimal` fields as JSON strings (e.g., `"1000000000000000000"`), -/// but test authors may write them as JSON numbers. This function treats -/// `String("123")` and `Number(123)` as equal when they represent the -/// same value. +/// Coerces string/number: `"123"` == `123` to handle GraphQL `BigInt`/`BigDecimal`. fn json_equal(a: &serde_json::Value, b: &serde_json::Value) -> bool { match (a, b) { (serde_json::Value::Null, serde_json::Value::Null) => true, diff --git a/gnd/src/commands/test/block_stream.rs b/gnd/src/commands/test/block_stream.rs index e07eb9aa104..cd8dd8642c3 100644 --- a/gnd/src/commands/test/block_stream.rs +++ b/gnd/src/commands/test/block_stream.rs @@ -63,21 +63,15 @@ impl BlockStreamBuilder for StaticStreamBuilder { } } -/// A `Stream` that synchronously yields pre-defined blocks one at a time. -/// -/// Each `poll_next` call returns the next block immediately (no async waiting). -/// When all blocks have been emitted, returns `None` to signal stream completion, -/// which tells the indexer that sync is done. +/// A `Stream` that yields pre-defined blocks synchronously. +/// Returns `None` when all blocks are emitted, signaling sync completion. struct StaticStream { blocks: Vec>, current_idx: usize, } impl StaticStream { - /// Create a new stream, optionally skipping past already-processed blocks. - /// - /// `skip_to`: If `Some(i)`, start from block `i+1` (block `i` was already processed). - /// If `None`, start from the beginning. + /// `skip_to`: if `Some(i)`, start from block `i+1` (block `i` already processed). fn new(blocks: Vec>, skip_to: Option) -> Self { Self { blocks, diff --git a/gnd/src/commands/test/eth_calls.rs b/gnd/src/commands/test/eth_calls.rs index 60883d70193..39ee0aa8315 100644 --- a/gnd/src/commands/test/eth_calls.rs +++ b/gnd/src/commands/test/eth_calls.rs @@ -1,9 +1,7 @@ -//! Pre-populates the eth_call cache with mock responses for `gnd test`. +//! Populates the eth_call cache with mock responses for `gnd test`. //! -//! Function signatures use graph-node's convention: `name(inputs):(outputs)` -//! e.g. `"balanceOf(address):(uint256)"`, `"getReserves():(uint112,uint112,uint32)"`. -//! Call data is encoded using the same path as production graph-node, so cache -//! IDs match exactly what the runtime generates. +//! Signatures use graph-node's `name(inputs):(outputs)` convention. +//! Encoding matches production graph-node so cache IDs align with the runtime. use super::schema::{MockEthCall, TestFile}; use super::trigger::json_to_sol_value; @@ -92,16 +90,8 @@ fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Res .map_err(|e| anyhow!("Failed to encode return value: {}", e)) } -/// Convert a graph-node style function signature to alloy's expected format. -/// -/// Graph-node uses `name(inputs):(outputs)` while alloy expects -/// `name(inputs) returns (outputs)`. -/// -/// Examples: -/// - `"balanceOf(address):(uint256)"` → `"balanceOf(address) returns (uint256)"` -/// - `"name():(string)"` → `"name() returns (string)"` -/// - `"transfer(address,uint256)"` → `"transfer(address,uint256)"` (no change) -/// - `"balanceOf(address) returns (uint256)"` → unchanged (already alloy format) +/// Convert graph-node `name(inputs):(outputs)` to alloy `name(inputs) returns (outputs)`. +/// Passes through signatures already in alloy format or without outputs. fn to_alloy_signature(sig: &str) -> String { // If it already contains "returns", assume alloy format. if sig.contains(" returns ") { diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index 67e14377bf7..e13a2fde6ea 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -112,8 +112,7 @@ pub async fn run_test(opt: TestOpt) -> Result<()> { return matchstick::run(&opt).await; } - // Build the subgraph first so the WASM and schema are available in build/. - // This mirrors what a user would do manually before running tests. + // Build the subgraph first (WASM and schema must be available in build/). if !opt.skip_build { step(Step::Generate, "Building subgraph"); let build_opt = crate::commands::BuildOpt { diff --git a/gnd/src/commands/test/noop.rs b/gnd/src/commands/test/noop.rs index b686177b795..069ab45bcbd 100644 --- a/gnd/src/commands/test/noop.rs +++ b/gnd/src/commands/test/noop.rs @@ -1,10 +1,8 @@ -//! Noop/stub trait implementations for the mock `Chain`. +//! Noop trait implementations for the mock `Chain`. //! -//! These types satisfy the trait bounds required by the `Chain` constructor -//! but are never called during normal test execution because: -//! - Triggers are provided directly via `StaticStreamBuilder` (no scanning needed) -//! - The real `EthereumRuntimeAdapterBuilder` is used for host functions -//! (ethereum.call, ethereum.getBalance, ethereum.hasCode), backed by the call cache +//! Satisfy `Chain` constructor bounds but are never called: +//! - Triggers come from `StaticStreamBuilder` (no scanning) +//! - Host functions use `EthereumRuntimeAdapterBuilder` with the eth_call cache use async_trait::async_trait; use graph::blockchain::block_stream::{BlockRefetcher, BlockWithTriggers, FirehoseCursor}; diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index 1d6ec4dae58..bc86fe2726e 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -76,8 +76,7 @@ fn make_test_logger(verbose: u8) -> Logger { 0 => graph::log::discard(), 1 => graph::log::logger_with_levels(false, None), 2 => graph::log::logger_with_levels(true, None), - // "trace" is parsed by slog_envlogger::LogBuilder::parse() as a global - // level filter — equivalent to setting GRAPH_LOG=trace. + // "trace" sets GRAPH_LOG=trace globally via slog_envlogger. _ => graph::log::logger_with_levels(true, Some("trace")), } } @@ -106,14 +105,11 @@ pub(super) struct TestContext { pub(super) graphql_runner: Arc>, } -/// Pre-computed manifest data shared across all tests in a run. -/// -/// Loaded once to avoid redundant parsing. +/// Manifest data shared across all tests in a run. pub(super) struct ManifestInfo { pub build_dir: PathBuf, - /// Canonical path to the built manifest file (e.g., `build/subgraph.yaml`). - /// Registered as an alias for `hash` in `FileLinkResolver` so that - /// `clone_for_manifest` can resolve the Qm hash to a real filesystem path. + /// Built manifest path. Aliased to `hash` in `FileLinkResolver` so the Qm + /// hash resolves to a real filesystem path. pub manifest_path: PathBuf, pub network_name: ChainName, pub min_start_block: u64, @@ -127,10 +123,7 @@ pub(super) struct ManifestInfo { std::collections::HashSet, } -/// Compute a `DeploymentHash` from a path and seed. -/// -/// Produces `"Qm" + hex(sha1(path + '\0' + seed))`. The seed makes each run -/// produce a distinct hash so sequential runs never collide in the store. +/// Compute `"Qm" + hex(sha1(path + '\0' + seed))`. Seed ensures per-run uniqueness. fn deployment_hash_from_path_and_seed(path: &Path, seed: u128) -> Result { use sha1::{Digest, Sha1}; @@ -177,8 +170,7 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { None }; - // Use Unix epoch millis as a per-run seed so each invocation gets a unique - // deployment hash and subgraph name, avoiding conflicts with previous runs. + // Per-run seed for unique deployment hash and subgraph name. let seed = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() @@ -186,8 +178,7 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { let hash = deployment_hash_from_path_and_seed(&built_manifest_path, seed)?; - // Derive subgraph name from the root directory (e.g., "my-subgraph" → "test/my-subgraph-"). - // Sanitize to alphanumeric + hyphens + underscores for SubgraphName compatibility. + // Derive subgraph name from the root dir, sanitized for SubgraphName compatibility. let root_dir_name = manifest_dir .canonicalize() .unwrap_or(manifest_dir.clone()) @@ -214,15 +205,10 @@ pub(super) fn load_manifest_info(opt: &TestOpt) -> Result { }) } -/// Collect the event selectors (topic0) for all handlers that declare `receipt: true`. +/// Collect topic0 selectors for handlers declaring `receipt: true`. /// -/// Covers both data sources and templates. Only logs whose topic0 appears in -/// this set will have a non-null receipt attached to their trigger. -/// -/// The selector is computed using the same method as graph-node's -/// `MappingEventHandler::topic0()`: strip `"indexed "` then all spaces, then -/// keccak256. This handles the manifest format `Transfer(indexed address,...)` -/// where `indexed` precedes the type rather than following it. +/// Covers data sources and templates. Mirrors `MappingEventHandler::topic0()`: +/// strip `"indexed "` and spaces, then keccak256. fn extract_receipt_required_selectors( manifest: &Manifest, ) -> std::collections::HashSet { @@ -252,8 +238,6 @@ fn extract_network_from_manifest(manifest: &Manifest) -> Result { } /// Extract the minimum `startBlock` across all data sources. -/// -/// Used to build `start_block_override` for bypassing on-chain validation. fn extract_start_block_from_manifest(manifest: &Manifest) -> Result { Ok(manifest .data_sources @@ -290,15 +274,14 @@ pub async fn run_single_test( } } - // Default block numbering starts at the manifest's startBlock so that - // test blocks without explicit numbers fall in the subgraph's indexed range. + // Start numbering at the manifest's startBlock so implicit blocks are in range. let blocks = build_blocks_with_triggers( test_file, manifest_info.min_start_block, &manifest_info.receipt_required_selectors, )?; - // Build mock IPFS file map. Fails fast on invalid CIDs or unreadable file paths. + // Build mock IPFS file map. let test_file_dir = test_file_path .parent() .map(|p| p.to_path_buf()) @@ -330,8 +313,7 @@ pub async fn run_single_test( let (arweave_unresolved_tx, mut arweave_unresolved_rx) = tokio::sync::mpsc::unbounded_channel::(); - // Create the database for this test. For pgtemp, the `db` value must - // stay alive for the duration of the test — dropping it destroys the database. + // `db` must stay alive — dropping it destroys a pgtemp database. let db = create_test_database(opt, &manifest_info.build_dir)?; let logger = make_test_logger(opt.verbose).new(o!("test" => test_file.name.clone())); @@ -349,8 +331,7 @@ pub async fn run_single_test( let ctx = setup_context(&logger, &stores, &chain, manifest_info, mock_data).await?; - // Populate eth_call cache with mock responses before starting indexer. - // This ensures handlers can successfully retrieve mocked contract call results. + // Populate eth_call cache before starting the indexer. super::eth_calls::populate_eth_call_cache( &logger, stores.chain_store.cheap_clone(), @@ -382,8 +363,7 @@ pub async fn run_single_test( .await { Ok(()) => { - // Drain any CIDs that were requested but not found in the mock. - // Deduplicate so each missing CID is listed once. + // Drain unresolved CIDs/tx IDs, deduplicating. let mut unresolved_ipfs: Vec = Vec::new(); while let Ok(cid) = unresolved_rx.try_recv() { if !unresolved_ipfs.contains(&cid) { @@ -436,8 +416,7 @@ pub async fn run_single_test( } } Err(subgraph_error) => { - // The subgraph handler threw a fatal error during indexing. - // Report it as a test failure without running assertions. + // Fatal handler error — skip assertions. Ok(TestResult { handler_error: Some(subgraph_error.message), assertions: vec![], @@ -445,15 +424,13 @@ pub async fn run_single_test( } }; - // Always stop the subgraph to ensure cleanup, even when wait_for_sync errors + // Stop the subgraph regardless of result. ctx.provider .clone() .stop_subgraph(ctx.deployment.clone()) .await; - // For persistent databases, clean up the deployment after the test so the - // database is left in a clean state. On failure, skip cleanup so the data - // is preserved for inspection. + // For persistent DBs: clean up on pass, preserve on failure for inspection. if db.needs_cleanup() { let test_passed = result.as_ref().map(|r| r.is_passed()).unwrap_or(false); if test_passed { @@ -476,9 +453,7 @@ pub async fn run_single_test( result } -/// Create the database for this test run. -/// -/// Returns `Temporary` (pgtemp, auto-dropped) or `Persistent` (--postgres-url). +/// Create the test database: pgtemp (auto-dropped) or persistent (`--postgres-url`). /// On non-Unix systems, `--postgres-url` is required. fn create_test_database(opt: &TestOpt, build_dir: &Path) -> Result { if let Some(url) = &opt.postgres_url { @@ -494,11 +469,8 @@ fn create_test_database(opt: &TestOpt, build_dir: &Path) -> Result ); } - // pgtemp sets `unix_socket_directories` to the data dir by default. - // On macOS the temp dir path can exceed the 104-byte Unix socket limit - // (e.g. /private/var/folders/.../build/pgtemp-xxx/pg_data_dir/.s.PGSQL.PORT), - // causing postgres to silently fail to start. Override to /tmp so the - // socket path stays short. Different port numbers prevent conflicts. + // Override unix_socket_directories to /tmp: macOS temp paths can exceed + // the 104-byte Unix socket limit, causing postgres to silently fail to start. let db = PgTempDBBuilder::new() .with_data_dir_prefix(build_dir) .persist_data(false) @@ -541,9 +513,8 @@ impl TestDatabase { } } - /// Persistent databases accumulate state across test runs and need - /// explicit post-test cleanup to remove each run's deployment. - /// Temporary databases are dropped automatically — no cleanup needed. + /// Returns true if the deployment must be cleaned up after the test. + /// Temporary databases are auto-dropped; persistent ones accumulate state. fn needs_cleanup(&self) -> bool { match self { #[cfg(unix)] @@ -611,10 +582,7 @@ ingestor = "default" }) } -/// Construct a mock Ethereum `Chain` with pre-built blocks. -/// -/// Uses `StaticStreamBuilder` for blocks, noops for unused adapters, -/// and a dummy firehose endpoint (never connected to). +/// Construct the mock Ethereum `Chain` with pre-built blocks. async fn setup_chain( logger: &Logger, blocks: Vec>, @@ -641,10 +609,8 @@ async fn setup_chain( let block_stream_builder: Arc> = Arc::new(StaticStreamBuilder { chain: blocks }); - // Create a dummy Ethereum adapter with archive capabilities. - // The adapter itself is never used for RPC — ethereum.call results come from - // the pre-populated call cache. But the RuntimeAdapter needs to resolve an - // adapter with matching capabilities before it can invoke the cache lookup. + // Dummy archive adapter — never used for RPC. The RuntimeAdapter needs one + // with matching capabilities to reach the eth_call cache. let endpoint_metrics = Arc::new(EndpointMetrics::mock()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); let transport = Transport::new_rpc( @@ -733,23 +699,20 @@ async fn setup_context( let subgraph_store = stores.network_store.subgraph_store(); - // Map the network name to our mock chain so graph-node routes triggers correctly. + // Route triggers to our mock chain. let mut blockchain_map = BlockchainMap::new(); blockchain_map.insert(stores.network_name.clone(), chain.clone()); let blockchain_map = Arc::new(blockchain_map); - // FileLinkResolver loads the manifest and WASM from the build directory - // instead of fetching from IPFS. The alias maps the Qm deployment hash to the - // actual manifest path so that clone_for_manifest can resolve it without - // treating the hash as a filesystem path. + // FileLinkResolver loads from build dir. Alias maps the Qm hash → manifest path + // so clone_for_manifest can resolve it without treating the hash as a path. let aliases = std::collections::HashMap::from([(hash.to_string(), manifest_path.to_path_buf())]); let link_resolver: Arc = Arc::new( FileLinkResolver::new(Some(build_dir.to_path_buf()), aliases), ); - // Replace the real IPFS client with a mock that serves pre-loaded content. - // FileLinkResolver handles manifest loading; the mock handles file data sources. + // Mock IPFS client for file data sources (FileLinkResolver handles the manifest). let ipfs_metrics = IpfsMetrics::new(&mock_registry); let ipfs_client = Arc::new(MockIpfsClient { files: mock_data.ipfs_files, @@ -821,8 +784,7 @@ async fn setup_context( mock_registry.clone(), )); - // The registrar handles subgraph naming and version management. - // Uses PanicSubscriptionManager because tests don't need GraphQL subscriptions. + // Uses PanicSubscriptionManager — tests don't need GraphQL subscriptions. let panicking_subscription_manager = Arc::new(PanicSubscriptionManager {}); let subgraph_registrar = Arc::new(graph_core::subgraph::SubgraphRegistrar::new( &logger_factory, @@ -840,8 +802,7 @@ async fn setup_context( SubgraphRegistrar::create_subgraph(subgraph_registrar.as_ref(), subgraph_name.clone()).await?; - // Deploy the subgraph version (loads manifest, compiles WASM, creates schema tables). - // start_block_override bypasses on-chain block validation when startBlock > 0. + // Deploy the subgraph. start_block_override bypasses on-chain block validation. let deployment = SubgraphRegistrar::create_subgraph_version( subgraph_registrar.as_ref(), subgraph_name.clone(), @@ -884,17 +845,14 @@ async fn cleanup( Ok(()) } -/// Poll until the subgraph reaches `stop_block` or fails. -/// -/// Returns `Ok(())` on success or `Err(SubgraphError)` on fatal error or timeout. +/// Poll until the subgraph reaches `stop_block`, returning `Err` on fatal error or timeout. async fn wait_for_sync( logger: &Logger, store: Arc, deployment: &DeploymentLocator, stop_block: BlockPtr, ) -> Result<(), SubgraphError> { - // NOTE: Hardcoded timeout/interval - could be made configurable via env var - // or CLI flag for slow subgraphs or faster iteration during development. + // Hardcoded; could be made configurable via env var or CLI flag. const MAX_WAIT: Duration = Duration::from_secs(60); const WAIT_TIME: Duration = Duration::from_millis(500); @@ -923,7 +881,7 @@ async fn wait_for_sync( info!(logger, "Sync progress"; "current" => block_ptr.number, "target" => stop_block.number); - // Check if the subgraph hit a fatal error (e.g., handler panic, deterministic error). + // Check for fatal errors. let status = store.status_for_id(deployment.id).await; if let Some(fatal_error) = status.fatal_error { return Err(fatal_error); diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 71e1aa4d250..81bc81807d9 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -1,10 +1,8 @@ -//! JSON schema types for test files and result types. +//! JSON schema types for test files. //! -//! Test files are JSON documents that describe a sequence of mock blockchain -//! blocks with triggers (log events) and GraphQL assertions to validate the -//! resulting entity state after indexing. Block triggers are auto-injected -//! for every block (both `Start` and `End` types) so block handlers with any -//! filter (`once`, `polling`, or none) fire correctly without explicit config. +//! Test files describe mock blockchain blocks with triggers and GraphQL assertions. +//! Block triggers (Start/End) are auto-injected so block handlers with any filter +//! (`once`, `polling`, or none) fire without explicit config. //! //! ```json //! { @@ -62,18 +60,14 @@ pub struct TestFile { /// Exactly one of `content` or `file` must be set. #[derive(Debug, Clone, Deserialize)] pub struct MockFile { - /// Syntactically valid IPFS CID (v0 `Qm...` or v1 `bafy...`). - /// The CID does not need to be the actual hash of the content — the mock - /// ignores the hash relationship. + /// IPFS CID (`Qm...` or `bafy...`). The mock ignores hash/content relationship. pub cid: String, /// Inline UTF-8 content. Exactly one of `content` or `file` must be set. #[serde(default)] pub content: Option, - /// Path to a file whose contents are loaded as UTF-8. - /// Resolved relative to the test JSON file. Exactly one of `content` or - /// `file` must be set. + /// File path, resolved relative to the test JSON. One of `content` or `file` required. #[serde(default)] pub file: Option, } @@ -92,19 +86,13 @@ pub struct MockArweaveFile { #[serde(default)] pub content: Option, - /// Path to a file. Resolved relative to the test JSON file. - /// Exactly one of `content` or `file` must be set. + /// File path, resolved relative to the test JSON. One of `content` or `file` required. #[serde(default)] pub file: Option, } impl MockArweaveFile { - /// Resolve this entry to bytes, given the directory of the test JSON file. - /// - /// Fails if: - /// - neither `content` nor `file` is set - /// - both `content` and `file` are set - /// - the referenced `file` path cannot be read + /// Resolve to bytes. Exactly one of `content` or `file` must be set. pub fn resolve(&self, test_dir: &Path) -> anyhow::Result { match (&self.content, &self.file) { (Some(content), None) => Ok(graph::bytes::Bytes::from(content.clone().into_bytes())), @@ -132,12 +120,7 @@ impl MockArweaveFile { } impl MockFile { - /// Resolve this entry to bytes, given the directory of the test JSON file. - /// - /// Fails if: - /// - neither `content` nor `file` is set - /// - both `content` and `file` are set - /// - the referenced `file` path cannot be read + /// Resolve to bytes. Exactly one of `content` or `file` must be set. pub fn resolve(&self, test_dir: &Path) -> anyhow::Result { match (&self.content, &self.file) { (Some(content), None) => Ok(Bytes::from(content.clone().into_bytes())), @@ -180,8 +163,7 @@ pub struct TestBlock { #[serde(default)] pub hash: Option, - /// Unix timestamp in seconds. If omitted, defaults to the block number - /// (monotonically increasing, chain-agnostic). + /// Unix timestamp in seconds. Defaults to the block number if omitted. #[serde(default)] pub timestamp: Option, @@ -200,20 +182,15 @@ pub struct LogEvent { /// Contract address that emitted the event (checksummed or lowercase hex). pub address: String, - /// Full event signature including parameter names and `indexed` keywords. - /// Example: `"Transfer(address indexed from, address indexed to, uint256 value)"` - /// - /// The signature is parsed to determine: - /// - topic0 (keccak256 hash of the canonical signature) - /// - Which parameters are indexed (become topics) vs non-indexed (become data) + /// Full event signature with parameter names and `indexed` keywords. + /// e.g. `"Transfer(address indexed from, address indexed to, uint256 value)"` pub event: String, - /// Event parameter values keyed by name. Values are JSON strings/numbers - /// that get converted to the appropriate Solidity type: - /// - Addresses: hex string `"0x1234..."` - /// - Integers: string `"1000000000000000000"` or number `1000` + /// Parameter values keyed by name. JSON → Solidity type: + /// - Addresses: `"0x1234..."` + /// - Integers: `"1000000000000000000"` or `1000` /// - Booleans: `true` / `false` - /// - Bytes: hex string `"0xdeadbeef"` + /// - Bytes: `"0xdeadbeef"` #[serde(default)] pub params: serde_json::Map, @@ -271,7 +248,7 @@ pub struct AssertionFailure { pub actual: Value, } -/// Parse a JSON test file. NOTE: Only validates JSON schema, not semantic correctness. +/// Parse a JSON test file (validates schema only, not semantic correctness). pub fn parse_test_file(path: &Path) -> anyhow::Result { let content = std::fs::read_to_string(path) .map_err(|e| anyhow::anyhow!("Failed to read test file {}: {}", path.display(), e))?; @@ -279,7 +256,8 @@ pub fn parse_test_file(path: &Path) -> anyhow::Result { .map_err(|e| anyhow::anyhow!("Failed to parse test file {}: {}", path.display(), e)) } -/// Discover `*.json` / `*.test.json` test files in a directory (recursive). Skips entries starting with non-alphanumeric characters. +/// Recursively discover `*.json` / `*.test.json` files. +/// Skips entries whose name starts with a non-alphanumeric character. pub fn discover_test_files(dir: &Path) -> anyhow::Result> { let mut files = Vec::new(); @@ -301,7 +279,6 @@ fn discover_recursive(dir: &Path, files: &mut Vec) -> anyhow::Result<() None => continue, }; - // Skip entries whose name starts with a non-alphanumeric character. if !name.starts_with(|c: char| c.is_alphanumeric()) { continue; } diff --git a/gnd/src/commands/test/trigger.rs b/gnd/src/commands/test/trigger.rs index b375d8bd0d3..8af4272a071 100644 --- a/gnd/src/commands/test/trigger.rs +++ b/gnd/src/commands/test/trigger.rs @@ -23,12 +23,9 @@ use graph_chain_ethereum::Chain; use std::collections::HashMap; use std::sync::Arc; -/// Convert test blocks into `BlockWithTriggers`, chained by parent hash. -/// Block numbers auto-increment from `start_block` when not explicit. -/// -/// `receipt_required_selectors` is the set of event selectors (topic0) for -/// handlers that declare `receipt: true`. Only matching logs get a non-null -/// receipt; all others receive `None`, mirroring production behaviour. +/// Build `BlockWithTriggers` from test blocks, chained by parent hash. +/// Numbers auto-increment from `start_block` when omitted. +/// Only logs whose topic0 is in `receipt_required_selectors` get a non-null receipt. pub fn build_blocks_with_triggers( test_file: &TestFile, start_block: u64, @@ -49,9 +46,7 @@ pub fn build_blocks_with_triggers( .context("Invalid block hash")? .unwrap_or_else(|| keccak256(number.to_be_bytes())); - // Default: use block number as timestamp (seconds since epoch). - // Avoids assuming a chain-specific block time and prevents future timestamps - // on chains with high block numbers (e.g. Arbitrum). + // Default timestamp = block number — avoids chain-specific block times. let timestamp = test_block.timestamp.unwrap_or(number); let mut triggers = Vec::new(); @@ -67,10 +62,7 @@ pub fn build_blocks_with_triggers( // Pass 2: build one mock receipt per tx-hash group. let receipts = build_receipts_by_tx(number, hash, &event_logs); - // Pass 3: create one log trigger per event. - // Only attach a receipt when the log's event selector (topic0) belongs - // to a handler that declared `receipt: true` in the manifest — matching - // production behaviour where handlers without that flag receive null. + // Pass 3: one log trigger per event. Receipt only for `receipt: true` handlers. for (tx_hash, full_log) in &event_logs { let needs_receipt = full_log .topics() @@ -83,10 +75,7 @@ pub fn build_blocks_with_triggers( ))); } - // Auto-inject block triggers for every block so that block handlers - // with any filter fire correctly: - // - Start: matches `once` handlers (at start_block) and initialization handlers - // - End: matches unfiltered and `polling` handlers + // Inject Start/End block triggers so `once`, `polling`, and unfiltered handlers fire. ensure!( number <= i32::MAX as u64, "block number {} exceeds i32::MAX", @@ -112,10 +101,8 @@ pub fn build_blocks_with_triggers( Ok(blocks) } -/// Parse a test event into `(tx_hash, Arc)` without building a trigger. -/// -/// The tx_hash is either taken from `trigger.tx_hash` or auto-generated as -/// `keccak256(block_number || log_index)`, which is unique per event. +/// Parse a test event into `(tx_hash, Arc)`. +/// tx_hash from the event or generated as `keccak256(block_number || log_index)`. fn prepare_event_log( block_number: u64, block_hash: B256, @@ -126,7 +113,7 @@ fn prepare_event_log( let (topics, data) = encode_event_log(&trigger.event, &trigger.params)?; - // Generate deterministic tx hash if not provided: keccak256(block_number || log_index). + // Deterministic tx hash: keccak256(block_number || log_index). let tx_hash = trigger .tx_hash .as_ref() @@ -155,12 +142,8 @@ fn prepare_event_log( Ok((tx_hash, full_log)) } -/// Build one mock receipt per unique tx_hash from a block's event logs. -/// -/// Events sharing the same `tx_hash` share a receipt whose `logs` contains -/// all of their logs in declaration order. Events without an explicit -/// `tx_hash` each have a unique auto-generated hash, so they each get their -/// own single-log receipt. +/// Build one receipt per unique tx_hash. Logs sharing a tx_hash are grouped; +/// events without an explicit tx_hash each get their own receipt. fn build_receipts_by_tx( block_number: u64, block_hash: B256, @@ -218,28 +201,15 @@ fn build_receipts_by_tx( receipts } -/// Encode event parameters into EVM log topics and data using `alloy::json_abi::Event::parse()`. -/// -/// Given a human-readable event signature like: -/// `"Transfer(address indexed from, address indexed to, uint256 value)"` -/// and parameter values like: -/// `{"from": "0xaaaa...", "to": "0xbbbb...", "value": "1000"}` -/// -/// Produces: -/// - topics[0] = keccak256("Transfer(address,address,uint256)") (the event selector) -/// - topics[1] = left-padded `from` address (indexed) -/// - topics[2] = left-padded `to` address (indexed) -/// - data = ABI-encoded `value` as uint256 (non-indexed) +/// ABI-encode event params into EVM log (topics, data). /// -/// Indexed parameters become topics (max 3 after topic0), non-indexed parameters -/// are ABI-encoded together as the log data. +/// topic0 = keccak256 of the canonical signature; indexed params → topics; +/// non-indexed params → ABI-encoded data. pub fn encode_event_log( event_sig: &str, params: &serde_json::Map, ) -> Result<(Vec, Bytes)> { - // Event::parse expects "event EventName(...)" format. - // If the user already wrote "event Transfer(...)" use as-is, - // otherwise prepend "event ". + // Event::parse requires "event " prefix — add if missing. let sig_with_prefix = if event_sig.trim_start().starts_with("event ") { event_sig.to_string() } else { @@ -377,8 +347,7 @@ pub fn json_to_sol_value(sol_type: &DynSolType, value: &serde_json::Value) -> Re len )); } - // DynSolValue::FixedBytes always wraps a B256 (32 bytes) plus the actual - // byte count. Right-zero-pad the input to fill the full 32 bytes. + // DynSolValue::FixedBytes wraps a full B256 — right-zero-pad. let mut padded = [0u8; 32]; padded[..bytes.len()].copy_from_slice(&bytes); Ok(DynSolValue::FixedBytes(B256::from(padded), *len)) @@ -448,7 +417,7 @@ fn sol_value_to_topic(value: &DynSolValue) -> Result { Ok(B256::from(bytes)) } DynSolValue::FixedBytes(b, _) => Ok(*b), - // Dynamic types are hashed per Solidity spec — the original value cannot be recovered from the topic. + // Dynamic types are hashed per Solidity spec. DynSolValue::Bytes(b) => Ok(keccak256(b)), DynSolValue::String(s) => Ok(keccak256(s.as_bytes())), _ => Err(anyhow!("Cannot convert {:?} to topic", value)), From aa03c1d231958d26b611d14dc74f61e00e895622 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 19 Mar 2026 21:23:26 +0200 Subject: [PATCH 7/9] gnd: replace eth_call cache with mock transport layer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the pre-populated eth_call cache approach with a mock Alloy transport that intercepts RPC calls at the transport level. This makes tests exercise the real production code path end-to-end. - Add MockTransport serving eth_call, eth_getBalance, eth_getCode - Add TestRuntimeAdapterBuilder to convert PossibleReorg → Deterministic - Add getBalanceCalls/hasCodeCalls to test JSON schema - Unmocked RPC calls now fail immediately with descriptive errors - Disable RPC retries in test mode for fast failure --- Cargo.lock | 1 + gnd/Cargo.toml | 1 + gnd/src/commands/test/eth_calls.rs | 70 +----- gnd/src/commands/test/mock_runtime.rs | 77 +++++++ gnd/src/commands/test/mock_transport.rs | 285 ++++++++++++++++++++++++ gnd/src/commands/test/mod.rs | 2 + gnd/src/commands/test/runner.rs | 61 ++--- gnd/src/commands/test/schema.rs | 23 +- gnd/src/main.rs | 9 +- 9 files changed, 439 insertions(+), 90 deletions(-) create mode 100644 gnd/src/commands/test/mock_runtime.rs create mode 100644 gnd/src/commands/test/mock_transport.rs diff --git a/Cargo.lock b/Cargo.lock index 8c44f8809b1..352b0b07da0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3699,6 +3699,7 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tokio-util", + "tower 0.5.2", "url", "walkdir", "wasmparser 0.118.2", diff --git a/gnd/Cargo.toml b/gnd/Cargo.toml index 2dcb8a1fdcf..5e4b2fd51e2 100644 --- a/gnd/Cargo.toml +++ b/gnd/Cargo.toml @@ -31,6 +31,7 @@ graph-store-postgres = { path = "../store/postgres" } # Test command dependencies hex = "0.4" async-trait = { workspace = true } +tower = { workspace = true } # Direct dependencies from current dev.rs anyhow = { workspace = true } diff --git a/gnd/src/commands/test/eth_calls.rs b/gnd/src/commands/test/eth_calls.rs index 39ee0aa8315..4b9dbddfaf8 100644 --- a/gnd/src/commands/test/eth_calls.rs +++ b/gnd/src/commands/test/eth_calls.rs @@ -1,26 +1,16 @@ -//! Populates the eth_call cache with mock responses for `gnd test`. -//! -//! Signatures use graph-node's `name(inputs):(outputs)` convention. -//! Encoding matches production graph-node so cache IDs align with the runtime. +//! ABI encoding helpers for mock Ethereum call data. -use super::schema::{MockEthCall, TestFile}; use super::trigger::json_to_sol_value; use anyhow::{anyhow, Context, Result}; use graph::abi::FunctionExt as GraphFunctionExt; -use graph::blockchain::block_stream::BlockWithTriggers; -use graph::blockchain::BlockPtr; -use graph::components::store::EthereumCallCache; -use graph::data::store::ethereum::call; use graph::prelude::alloy::dyn_abi::{DynSolType, FunctionExt as AlloyFunctionExt}; use graph::prelude::alloy::json_abi::Function; -use graph::prelude::alloy::primitives::Address; -use graph::slog::Logger; -use graph_chain_ethereum::Chain; -use graph_store_postgres::ChainStore; -use std::sync::Arc; /// ABI-encode a function call (selector + params) using graph-node's encoding path. -fn encode_function_call(function_sig: &str, params: &[serde_json::Value]) -> Result> { +pub(super) fn encode_function_call( + function_sig: &str, + params: &[serde_json::Value], +) -> Result> { let alloy_sig = to_alloy_signature(function_sig); let function = Function::parse(&alloy_sig).map_err(|e| { anyhow!( @@ -55,7 +45,10 @@ fn encode_function_call(function_sig: &str, params: &[serde_json::Value]) -> Res } /// ABI-encode function return values (no selector prefix). -fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Result> { +pub(super) fn encode_return_value( + function_sig: &str, + returns: &[serde_json::Value], +) -> Result> { let alloy_sig = to_alloy_signature(function_sig); let function = Function::parse(&alloy_sig).map_err(|e| { anyhow!( @@ -92,7 +85,7 @@ fn encode_return_value(function_sig: &str, returns: &[serde_json::Value]) -> Res /// Convert graph-node `name(inputs):(outputs)` to alloy `name(inputs) returns (outputs)`. /// Passes through signatures already in alloy format or without outputs. -fn to_alloy_signature(sig: &str) -> String { +pub(super) fn to_alloy_signature(sig: &str) -> String { // If it already contains "returns", assume alloy format. if sig.contains(" returns ") { return sig.to_string(); @@ -108,49 +101,6 @@ fn to_alloy_signature(sig: &str) -> String { } } -/// Populate the eth_call cache from test block mock calls before indexing starts. -pub async fn populate_eth_call_cache( - logger: &Logger, - chain_store: Arc, - blocks: &[BlockWithTriggers], - test_file: &TestFile, -) -> Result<()> { - for (block_data, test_block) in blocks.iter().zip(&test_file.blocks) { - let block_ptr = block_data.ptr(); - - for eth_call in &test_block.eth_calls { - populate_single_call(logger, chain_store.clone(), &block_ptr, eth_call).await?; - } - } - Ok(()) -} - -async fn populate_single_call( - logger: &Logger, - chain_store: Arc, - block_ptr: &BlockPtr, - eth_call: &MockEthCall, -) -> Result<()> { - let address: Address = eth_call.address.parse()?; - - let encoded_call = encode_function_call(ð_call.function, ð_call.params)?; - - let request = call::Request::new(address, encoded_call, 0); - - let retval = if eth_call.reverts { - call::Retval::Null - } else { - let encoded_return = encode_return_value(ð_call.function, ð_call.returns)?; - call::Retval::Value(encoded_return.into()) - }; - - chain_store - .set_call(logger, request, block_ptr.clone(), retval) - .await?; - - Ok(()) -} - #[cfg(test)] mod tests { use super::*; diff --git a/gnd/src/commands/test/mock_runtime.rs b/gnd/src/commands/test/mock_runtime.rs new file mode 100644 index 00000000000..150662faa74 --- /dev/null +++ b/gnd/src/commands/test/mock_runtime.rs @@ -0,0 +1,77 @@ +//! Wraps Ethereum host functions to convert `PossibleReorg` errors into +//! `Deterministic` ones so they surface immediately instead of causing +//! infinite restart loops. + +use anyhow::anyhow; +use graph::blockchain::{self, ChainIdentifier, HostFn, HostFnCtx}; +use graph::data_source; +use graph::futures03::FutureExt; +use graph::prelude::EthereumCallCache; +use graph::runtime::HostExportError; +use graph_chain_ethereum::chain::{EthereumRuntimeAdapterBuilder, RuntimeAdapterBuilder}; +use graph_chain_ethereum::network::EthereumNetworkAdapters; +use graph_chain_ethereum::Chain; +use std::sync::Arc; + +const WRAPPED_HOST_FNS: &[&str] = &["ethereum.call", "ethereum.getBalance", "ethereum.hasCode"]; + +pub struct TestRuntimeAdapterBuilder; + +impl RuntimeAdapterBuilder for TestRuntimeAdapterBuilder { + fn build( + &self, + eth_adapters: Arc, + call_cache: Arc, + chain_identifier: Arc, + ) -> Arc> { + let real_adapter = + EthereumRuntimeAdapterBuilder {}.build(eth_adapters, call_cache, chain_identifier); + Arc::new(TestRuntimeAdapter { real_adapter }) + } +} + +struct TestRuntimeAdapter { + real_adapter: Arc>, +} + +impl TestRuntimeAdapter { + /// Convert `PossibleReorg` → `Deterministic` for a single host function. + fn wrap_possible_reorg(real: HostFn) -> HostFn { + let real_func = real.func.clone(); + let name = real.name; + HostFn { + name, + func: Arc::new(move |ctx: HostFnCtx<'_>, wasm_ptr: u32| { + let real_func = real_func.clone(); + async move { + match real_func(ctx, wasm_ptr).await { + Ok(result) => Ok(result), + Err(HostExportError::PossibleReorg(e)) => { + Err(HostExportError::Deterministic(anyhow!( + "{}. Add mock data to your test JSON.", + e + ))) + } + Err(other) => Err(other), + } + } + .boxed() + }), + } + } +} + +impl blockchain::RuntimeAdapter for TestRuntimeAdapter { + fn host_fns(&self, ds: &data_source::DataSource) -> Result, anyhow::Error> { + let mut fns = self.real_adapter.host_fns(ds)?; + + // PossibleReorg → Deterministic for mock-backed host functions. + for hf in &mut fns { + if WRAPPED_HOST_FNS.contains(&hf.name) { + *hf = Self::wrap_possible_reorg(hf.clone()); + } + } + + Ok(fns) + } +} diff --git a/gnd/src/commands/test/mock_transport.rs b/gnd/src/commands/test/mock_transport.rs new file mode 100644 index 00000000000..4db45addeb9 --- /dev/null +++ b/gnd/src/commands/test/mock_transport.rs @@ -0,0 +1,285 @@ +//! Mock Alloy transport that serves `eth_getBalance`, `eth_getCode`, and +//! `eth_call` from test JSON mock data. Unmocked RPC calls fail immediately. + +use super::eth_calls::{encode_function_call, encode_return_value}; +use super::schema::TestFile; +use anyhow::{Context, Result}; +use graph::blockchain::block_stream::BlockWithTriggers; +use graph::prelude::alloy::primitives::{Address, B256, U256}; +use graph::prelude::alloy::rpc::json_rpc::{RequestPacket, ResponsePacket}; +use graph::prelude::alloy::transports::{TransportError, TransportErrorKind, TransportFut}; +use graph_chain_ethereum::Chain; +use std::collections::HashMap; +use std::sync::Arc; +use std::task::{Context as TaskContext, Poll}; +use tower::Service; + +#[derive(Debug)] +enum EthCallResult { + Success(Vec), + Revert, +} + +#[derive(Debug)] +struct Inner { + get_balance: HashMap<(Address, B256), U256>, + get_code: HashMap<(Address, B256), bool>, + eth_calls: HashMap<(Address, Vec, B256), EthCallResult>, +} + +/// Mock RPC transport backed by test JSON data. Cheap to clone (`Arc` inner). +#[derive(Clone)] +pub struct MockTransport { + inner: Arc, +} + +impl MockTransport { + /// Build from test file mock data, keyed by built block hashes. + pub fn new(test_file: &TestFile, blocks: &[BlockWithTriggers]) -> Result { + let mut get_balance = HashMap::new(); + let mut get_code = HashMap::new(); + let mut eth_calls = HashMap::new(); + + for (test_block, built_block) in test_file.blocks.iter().zip(blocks.iter()) { + let block_hash = built_block.ptr().hash.as_b256(); + + for mock_balance in &test_block.get_balance_calls { + let address: Address = mock_balance + .address + .parse() + .context("Invalid address in getBalanceCalls mock")?; + let value: U256 = mock_balance.value.parse().context( + "Invalid value in getBalanceCalls mock — expected decimal Wei string", + )?; + get_balance.insert((address, block_hash), value); + } + + for mock_code in &test_block.has_code_calls { + let address: Address = mock_code + .address + .parse() + .context("Invalid address in hasCodeCalls mock")?; + get_code.insert((address, block_hash), mock_code.has_code); + } + + for eth_call in &test_block.eth_calls { + let address: Address = eth_call + .address + .parse() + .context("Invalid address in ethCalls mock")?; + let calldata = encode_function_call(ð_call.function, ð_call.params)?; + let result = if eth_call.reverts { + EthCallResult::Revert + } else { + let return_data = encode_return_value(ð_call.function, ð_call.returns)?; + EthCallResult::Success(return_data) + }; + eth_calls.insert((address, calldata, block_hash), result); + } + } + + Ok(Self { + inner: Arc::new(Inner { + get_balance, + get_code, + eth_calls, + }), + }) + } + + /// Successful JSON-RPC response. + fn success_response( + id: &serde_json::Value, + result: serde_json::Value, + ) -> Result { + let response = serde_json::json!({ + "jsonrpc": "2.0", + "id": id, + "result": result, + }); + serde_json::from_value(response).map_err(|e| { + TransportErrorKind::custom_str(&format!("Failed to build mock response: {}", e)) + }) + } + + /// JSON-RPC error response (for reverts). + fn error_response( + id: &serde_json::Value, + code: i64, + message: &str, + ) -> Result { + let response = serde_json::json!({ + "jsonrpc": "2.0", + "id": id, + "error": { + "code": code, + "message": message, + }, + }); + serde_json::from_value(response).map_err(|e| { + TransportErrorKind::custom_str(&format!("Failed to build mock error response: {}", e)) + }) + } + + /// Extract `(address, block_hash)` from `[address, {"blockHash": hash}]` params. + fn parse_address_and_block_hash(params: &serde_json::Value) -> Result<(Address, B256), String> { + let arr = params.as_array().ok_or("Expected array params")?; + + let address_str = arr + .first() + .and_then(|v| v.as_str()) + .ok_or("Missing address param")?; + let address: Address = address_str + .parse() + .map_err(|e| format!("Invalid address '{}': {}", address_str, e))?; + + let block_hash_str = arr + .get(1) + .and_then(|v| v.get("blockHash")) + .and_then(|v| v.as_str()) + .ok_or("Missing blockHash in params")?; + let block_hash: B256 = block_hash_str + .parse() + .map_err(|e| format!("Invalid blockHash '{}': {}", block_hash_str, e))?; + + Ok((address, block_hash)) + } + + fn handle_get_balance( + &self, + params: &serde_json::Value, + id: &serde_json::Value, + ) -> Result { + let (address, block_hash) = Self::parse_address_and_block_hash(params) + .map_err(|e| TransportErrorKind::custom_str(&format!("eth_getBalance: {}", e)))?; + + match self.inner.get_balance.get(&(address, block_hash)) { + Some(value) => Self::success_response(id, serde_json::json!(format!("0x{:x}", value))), + None => Err(TransportErrorKind::custom_str(&format!( + "gnd test: no mock getBalance entry for address {} at block hash {}. \ + Add a 'getBalanceCalls' entry to this block in your test JSON.", + address, block_hash + ))), + } + } + + fn handle_get_code( + &self, + params: &serde_json::Value, + id: &serde_json::Value, + ) -> Result { + let (address, block_hash) = Self::parse_address_and_block_hash(params) + .map_err(|e| TransportErrorKind::custom_str(&format!("eth_getCode: {}", e)))?; + + match self.inner.get_code.get(&(address, block_hash)) { + // graph-node checks `code.len() > 2`; "0xff" is the minimal truthy value. + Some(true) => Self::success_response(id, serde_json::json!("0xff")), + Some(false) => Self::success_response(id, serde_json::json!("0x")), + None => Err(TransportErrorKind::custom_str(&format!( + "gnd test: no mock hasCode entry for address {} at block hash {}. \ + Add a 'hasCodeCalls' entry to this block in your test JSON.", + address, block_hash + ))), + } + } + + fn handle_eth_call( + &self, + params: &serde_json::Value, + id: &serde_json::Value, + ) -> Result { + let arr = params + .as_array() + .ok_or_else(|| TransportErrorKind::custom_str("eth_call: expected array params"))?; + + let tx = arr.first().ok_or_else(|| { + TransportErrorKind::custom_str("eth_call: missing transaction object") + })?; + + let to_str = tx + .get("to") + .and_then(|v| v.as_str()) + .ok_or_else(|| TransportErrorKind::custom_str("eth_call: missing 'to' field"))?; + let address: Address = to_str.parse().map_err(|e| { + TransportErrorKind::custom_str(&format!("eth_call: invalid 'to' address: {}", e)) + })?; + + let input_str = tx + .get("input") + .and_then(|v| v.as_str()) + .ok_or_else(|| TransportErrorKind::custom_str("eth_call: missing 'input' field"))?; + let input = + hex::decode(input_str.strip_prefix("0x").unwrap_or(input_str)).map_err(|e| { + TransportErrorKind::custom_str(&format!("eth_call: invalid 'input' hex: {}", e)) + })?; + + let block_hash_str = arr + .get(1) + .and_then(|v| v.get("blockHash")) + .and_then(|v| v.as_str()) + .ok_or_else(|| TransportErrorKind::custom_str("eth_call: missing blockHash"))?; + let block_hash: B256 = block_hash_str.parse().map_err(|e| { + TransportErrorKind::custom_str(&format!("eth_call: invalid blockHash: {}", e)) + })?; + + match self.inner.eth_calls.get(&(address, input, block_hash)) { + Some(EthCallResult::Success(data)) => { + Self::success_response(id, serde_json::json!(format!("0x{}", hex::encode(data)))) + } + Some(EthCallResult::Revert) => Self::error_response(id, 3, "execution reverted"), + None => Err(TransportErrorKind::custom_str(&format!( + "gnd test: unmocked eth_call to {} at block hash {}. \ + Add a matching 'ethCalls' entry to this block in your test JSON.", + address, block_hash + ))), + } + } +} + +impl Service for MockTransport { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + fn poll_ready(&mut self, _cx: &mut TaskContext<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn call(&mut self, request: RequestPacket) -> Self::Future { + let transport = self.clone(); + Box::pin(async move { + let req = match &request { + RequestPacket::Single(req) => req, + RequestPacket::Batch(_) => { + return Err(TransportErrorKind::custom_str( + "gnd test: batch RPC requests are not supported by MockTransport", + )); + } + }; + + let method = req.method(); + let params: serde_json::Value = req + .params() + .map(|p| serde_json::from_str(p.get())) + .transpose() + .map_err(|e| { + TransportErrorKind::custom_str(&format!("Failed to parse params: {}", e)) + })? + .unwrap_or(serde_json::Value::Null); + + let id = serde_json::to_value(req.id()).map_err(|e| { + TransportErrorKind::custom_str(&format!("Failed to serialize request id: {}", e)) + })?; + + match method { + "eth_getBalance" => transport.handle_get_balance(¶ms, &id), + "eth_getCode" => transport.handle_get_code(¶ms, &id), + "eth_call" => transport.handle_eth_call(¶ms, &id), + _ => Err(TransportErrorKind::custom_str(&format!( + "gnd test: unmocked RPC method '{}'. Add mock data to your test JSON file.", + method + ))), + } + }) + } +} diff --git a/gnd/src/commands/test/mod.rs b/gnd/src/commands/test/mod.rs index e13a2fde6ea..a2f0d413495 100644 --- a/gnd/src/commands/test/mod.rs +++ b/gnd/src/commands/test/mod.rs @@ -34,6 +34,8 @@ mod matchstick; mod mock_arweave; mod mock_chain; mod mock_ipfs; +mod mock_runtime; +mod mock_transport; mod noop; mod output; mod runner; diff --git a/gnd/src/commands/test/runner.rs b/gnd/src/commands/test/runner.rs index bc86fe2726e..0b5a9302141 100644 --- a/gnd/src/commands/test/runner.rs +++ b/gnd/src/commands/test/runner.rs @@ -15,6 +15,8 @@ use super::block_stream::StaticStreamBuilder; use super::mock_arweave::MockArweaveResolver; use super::mock_chain; use super::mock_ipfs::MockIpfsClient; +use super::mock_runtime::TestRuntimeAdapterBuilder; +use super::mock_transport::MockTransport; use super::noop::{NoopAdapterSelector, StaticBlockRefetcher}; use super::schema::{TestFile, TestResult}; use super::trigger::build_blocks_with_triggers; @@ -43,7 +45,6 @@ use graph::prelude::{ SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }; use graph::slog::{info, o, Logger}; -use graph_chain_ethereum::chain::EthereumRuntimeAdapterBuilder; use graph_chain_ethereum::network::{EthereumNetworkAdapter, EthereumNetworkAdapters}; use graph_chain_ethereum::{ Chain, EthereumAdapter, NodeCapabilities, ProviderEthRpcMetrics, Transport, @@ -320,7 +321,8 @@ pub async fn run_single_test( let stores = setup_stores(&logger, &db, &manifest_info.network_name).await?; - let chain = setup_chain(&logger, blocks.clone(), &stores).await?; + let mock_transport = MockTransport::new(test_file, &blocks)?; + let chain = setup_chain(&logger, blocks.clone(), &stores, mock_transport).await?; let mock_data = MockData { ipfs_files: mock_files, @@ -331,16 +333,7 @@ pub async fn run_single_test( let ctx = setup_context(&logger, &stores, &chain, manifest_info, mock_data).await?; - // Populate eth_call cache before starting the indexer. - super::eth_calls::populate_eth_call_cache( - &logger, - stores.chain_store.cheap_clone(), - &blocks, - test_file, - ) - .await?; - - // Determine the target block — the indexer will process until it reaches this. + // The indexer will process until it reaches this block. let stop_block = if blocks.is_empty() { mock_chain::genesis_ptr() } else { @@ -418,7 +411,7 @@ pub async fn run_single_test( Err(subgraph_error) => { // Fatal handler error — skip assertions. Ok(TestResult { - handler_error: Some(subgraph_error.message), + handler_error: Some(format_handler_error(&subgraph_error)), assertions: vec![], }) } @@ -587,11 +580,12 @@ async fn setup_chain( logger: &Logger, blocks: Vec>, stores: &TestStores, + mock_transport: MockTransport, ) -> Result> { let mock_registry = Arc::new(MetricsRegistry::mock()); let logger_factory = LoggerFactory::new(logger.clone(), None, mock_registry.clone()); - // Dummy firehose endpoint — required by Chain constructor but never used. + // Dummy firehose endpoint — required by Chain constructor, never used. let firehose_endpoints = FirehoseEndpoints::for_testing(vec![Arc::new(FirehoseEndpoint::new( "", "http://0.0.0.0:0", @@ -606,21 +600,13 @@ async fn setup_chain( let client = Arc::new(graph::blockchain::client::ChainClient::::new_firehose(firehose_endpoints)); - let block_stream_builder: Arc> = - Arc::new(StaticStreamBuilder { chain: blocks }); - - // Dummy archive adapter — never used for RPC. The RuntimeAdapter needs one - // with matching capabilities to reach the eth_call cache. let endpoint_metrics = Arc::new(EndpointMetrics::mock()); let provider_metrics = Arc::new(ProviderEthRpcMetrics::new(mock_registry.clone())); - let transport = Transport::new_rpc( - graph::url::Url::parse("http://0.0.0.0:0").unwrap(), - graph::http::HeaderMap::new(), - endpoint_metrics.clone(), - "", - false, // no_eip2718 - graph_chain_ethereum::Compression::None, - ); + let rpc_client = graph::prelude::alloy::rpc::client::RpcClient::new(mock_transport, true); + let transport = Transport::RPC(rpc_client); + + let block_stream_builder: Arc> = + Arc::new(StaticStreamBuilder { chain: blocks }); let dummy_adapter = Arc::new( EthereumAdapter::new( logger.clone(), @@ -668,7 +654,7 @@ async fn setup_chain( Arc::new(NoopAdapterSelector { _phantom: PhantomData, }), - Arc::new(EthereumRuntimeAdapterBuilder {}), + Arc::new(TestRuntimeAdapterBuilder), eth_adapters, graph::prelude::ENV_VARS.reorg_threshold(), graph::prelude::ENV_VARS.ingestor_polling_interval, @@ -845,6 +831,25 @@ async fn cleanup( Ok(()) } +/// Format a `SubgraphError` for user-facing output. +fn format_handler_error(err: &SubgraphError) -> String { + let mut parts = Vec::new(); + + if let Some(handler) = &err.handler { + parts.push(format!("in {handler}")); + } + if let Some(block_ptr) = &err.block_ptr { + parts.push(format!("at block #{}", block_ptr.number)); + } + + let location = parts.join(" "); + if location.is_empty() { + err.message.clone() + } else { + format!("{location}: {}", err.message) + } +} + /// Poll until the subgraph reaches `stop_block`, returning `Err` on fatal error or timeout. async fn wait_for_sync( logger: &Logger, diff --git a/gnd/src/commands/test/schema.rs b/gnd/src/commands/test/schema.rs index 81bc81807d9..d684e98fff6 100644 --- a/gnd/src/commands/test/schema.rs +++ b/gnd/src/commands/test/schema.rs @@ -171,9 +171,14 @@ pub struct TestBlock { #[serde(default)] pub events: Vec, - /// Mock contract call responses pre-cached before the test runs. #[serde(default, rename = "ethCalls")] pub eth_calls: Vec, + + #[serde(default, rename = "getBalanceCalls")] + pub get_balance_calls: Vec, + + #[serde(default, rename = "hasCodeCalls")] + pub has_code_calls: Vec, } /// A mock Ethereum event log. @@ -211,6 +216,22 @@ pub struct MockEthCall { pub reverts: bool, } +/// Mock `ethereum.getBalance()` response. +#[derive(Debug, Clone, Deserialize)] +pub struct MockBalance { + pub address: String, + /// Wei as a decimal string. + pub value: String, +} + +/// Mock `ethereum.hasCode()` response. +#[derive(Debug, Clone, Deserialize)] +pub struct MockCode { + pub address: String, + #[serde(rename = "hasCode")] + pub has_code: bool, +} + #[derive(Debug, Clone, Deserialize)] pub struct Assertion { pub query: String, diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 351dd9d220a..0cac355a062 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -177,7 +177,14 @@ async fn main() -> Result<()> { Commands::Remove(remove_opt) => run_remove(remove_opt).await, Commands::Auth(auth_opt) => run_auth(auth_opt), Commands::Publish(publish_opt) => run_publish(publish_opt).await, - Commands::Test(test_opt) => run_test(test_opt).await, + Commands::Test(test_opt) => { + // Disable RPC retries so unmocked calls fail immediately. + // SAFETY: single-threaded here, before the Tokio runtime is built. + unsafe { + std::env::set_var("GRAPH_ETHEREUM_REQUEST_RETRIES", "0"); + } + run_test(test_opt).await + } Commands::Clean(clean_opt) => run_clean(clean_opt), Commands::Completions(completions_opt) => generate_completions(completions_opt), }; From 65708a5ba15fc1e8cee31c9ef0b3ec6ac1cd0dd0 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 19 Mar 2026 21:23:48 +0200 Subject: [PATCH 8/9] gnd: update test docs for mock transport and file data sources - Document getBalanceCalls and hasCodeCalls mock fields - Document IPFS and Arweave file data source mocking - Update architecture section to describe mock transport - Update troubleshooting for immediate unmocked call errors - Update known limitations table --- gnd/docs/gnd-test.md | 155 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 122 insertions(+), 33 deletions(-) diff --git a/gnd/docs/gnd-test.md b/gnd/docs/gnd-test.md index cb60f8e616d..6fcc118fc29 100644 --- a/gnd/docs/gnd-test.md +++ b/gnd/docs/gnd-test.md @@ -22,7 +22,7 @@ gnd test --matchstick Tests are JSON files that define: - Mock blockchain blocks with events -- Mock `eth_call` responses +- Mock Ethereum RPC responses (`eth_call`, `eth_getBalance`, `eth_getCode`) - GraphQL assertions to validate entity state Place test files in a `tests/` directory with `.json` or `.test.json` extension. @@ -82,6 +82,8 @@ Place test files in a `tests/` directory with `.json` or `.test.json` extension. | `baseFeePerGas` | No | None (pre-EIP-1559) | Base fee in wei | | `events` | No | Empty array | Log events in this block | | `ethCalls` | No | Empty array | Mock `eth_call` responses | +| `getBalanceCalls` | No | Empty array | Mock `eth_getBalance` responses for `ethereum.getBalance()` | +| `hasCodeCalls` | No | Empty array | Mock `eth_getCode` responses for `ethereum.hasCode()` | ### Empty Blocks @@ -359,7 +361,7 @@ Mock contract calls made from mapping handlers using `contract.call()`: | `function` | Yes | Full signature: `"functionName(inputTypes)(returnTypes)"` | | `params` | Yes | Array of input parameters (as strings) | | `returns` | Yes | Array of return values (as strings, ignored if `reverts: true`) | -| `reverts` | No | Default `false`. If `true`, the call is cached as `Retval::Null` | +| `reverts` | No | Default `false`. If `true`, the mock transport returns an RPC error | ### Function Signature Format @@ -414,6 +416,110 @@ From the ERC20 test: } ``` +## ethereum.getBalance() Mocking + +Mock balance lookups made from mapping handlers using `ethereum.getBalance()`: + +```json +{ + "getBalanceCalls": [ + { + "address": "0xaaaa000000000000000000000000000000000000", + "value": "1000000000000000000" + } + ] +} +``` + +### getBalanceCalls Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `address` | Yes | Account address (checksummed or lowercase hex) | +| `value` | Yes | Balance in Wei as a decimal string | + +## ethereum.hasCode() Mocking + +Mock code existence checks made from mapping handlers using `ethereum.hasCode()`: + +```json +{ + "hasCodeCalls": [ + { + "address": "0x1234000000000000000000000000000000000000", + "hasCode": true + } + ] +} +``` + +### hasCodeCalls Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `address` | Yes | Contract address (checksummed or lowercase hex) | +| `hasCode` | Yes | Whether the address has deployed bytecode | + +## File Data Sources + +Mock IPFS and Arweave file contents for file data source handlers. Files are defined at the top level of the test JSON (not inside blocks). + +### IPFS Files + +```json +{ + "name": "File data source test", + "files": [ + { + "cid": "QmExample...", + "content": "{\"name\": \"Token\", \"description\": \"A token\"}" + }, + { + "cid": "QmAnother...", + "file": "fixtures/metadata.json" + } + ], + "blocks": [...], + "assertions": [...] +} +``` + +#### files Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `cid` | Yes | IPFS CID (`Qm...` or `bafy...`). The mock ignores hash/content relationship | +| `content` | One of `content`/`file` | Inline UTF-8 content | +| `file` | One of `content`/`file` | File path, resolved relative to the test JSON | + +### Arweave Files + +```json +{ + "name": "Arweave data source test", + "arweaveFiles": [ + { + "txId": "abc123", + "content": "{\"name\": \"Token\"}" + }, + { + "txId": "def456/metadata.json", + "file": "fixtures/arweave-data.json" + } + ], + "blocks": [...], + "assertions": [...] +} +``` + +#### arweaveFiles Fields + +| Field | Required | Description | +|-------|----------|-------------| +| `txId` | Yes | Arweave transaction ID or bundle path (e.g. `"txid/filename.json"`) | +| `content` | One of `content`/`file` | Inline UTF-8 content | +| `file` | One of `content`/`file` | File path, resolved relative to the test JSON | + ## Assertions GraphQL queries to validate the indexed entity state after processing all blocks. @@ -647,10 +753,12 @@ my-subgraph/ |---------|--------| | Log events | ✅ Supported | | Block handlers (all filters) | ✅ Supported | -| eth_call mocking | ✅ Supported | +| `eth_call` mocking | ✅ Supported | +| `ethereum.getBalance()` mocking | ✅ Supported | +| `ethereum.hasCode()` mocking | ✅ Supported | | Dynamic/template data sources | ✅ Supported | | Transaction receipts (`receipt: true`) | ⚠️ Partial — `receipt.logs` is populated and grouped by `txHash`; other fields (gas, from, to, etc.) are hardcoded stubs (see [Transaction Receipts](#transaction-receipts)) | -| File data sources / IPFS mocking | ❌ Not implemented | +| File data sources (IPFS + Arweave) | ✅ Supported | | Call triggers (traces) | ❌ Not implemented | | `--json` CI output | ❌ Not implemented | | Parallel test execution | ❌ Not implemented | @@ -764,10 +872,8 @@ GraphQL queries → Assertions **Key design principles:** - **Isolated database per test:** Each test gets a pgtemp database dropped on completion (default), or a shared persistent database with post-test cleanup (`--postgres-url`) -- **Real WASM runtime:** Uses `EthereumRuntimeAdapterBuilder` with real `ethereum.call` host function -- **Pre-populated call cache:** `eth_call` responses are cached before indexing starts +- **Mock transport layer:** A mock Alloy transport serves `eth_call`, `eth_getBalance`, and `eth_getCode` from test JSON data. All three flow through the real production code path — only the transport returns mock responses. Unmocked RPC calls fail immediately with a descriptive error. - **No IPFS for manifest:** Uses `FileLinkResolver` to load manifest/WASM from build directory -- **Dummy RPC adapter:** Registered at `http://0.0.0.0:0` — exists so the runtime can resolve an adapter with the required capabilities. If a mapping makes an `ethereum.call` that has no matching mock in `ethCalls`, the call misses the cache and falls through to this dummy adapter. The connection is refused immediately (port 0 is invalid), which graph-node treats as a possible reorg and restarts the block stream. The indexer then loops until the 60-second test timeout. See [Unmocked eth_call](#unmocked-eth_call-causes-60-second-timeout) in Troubleshooting. ## Troubleshooting @@ -790,36 +896,19 @@ GraphQL queries → Assertions 2. Simplify mapping logic 3. Check for infinite loops in handler code -### eth_call Returns Wrong Value - -**Cause:** Call cache miss — no matching mock in `ethCalls`. - -**Fix:** -1. Verify `address`, `function`, and `params` exactly match the call from your mapping -2. Check function signature format: `"functionName(inputTypes)(returnTypes)"` -3. Ensure parameters are in correct order - -### Unmocked eth_call Causes 60-Second Timeout +### Unmocked RPC Call -**Cause:** A mapping handler calls `ethereum.call` (directly or via a generated contract binding) for a call that has no matching entry in `ethCalls`. The call misses the pre-populated cache and is forwarded to the dummy RPC adapter at `http://0.0.0.0:0`. The connection is refused immediately, but graph-node interprets connection errors as a possible chain reorganisation and restarts the block stream instead of failing. The indexer loops indefinitely until the test runner's 60-second timeout expires. +**Cause:** A mapping handler calls `ethereum.call`, `ethereum.getBalance`, or `ethereum.hasCode` for a call that has no matching mock entry. -**Symptom:** Test fails with `Sync timeout after 60s` with no indication of which call was missing. +**Symptom:** Test fails immediately with a descriptive error like: +``` +gnd test: unmocked eth_call to 0x1234... at block hash 0xabcd... +Add a matching 'ethCalls' entry to this block in your test JSON. +``` **Fix:** -1. Add the missing call to `ethCalls` in your test block: - ```json - "ethCalls": [ - { - "address": "0xContractAddress", - "function": "myFunction(uint256):(address)", - "params": ["42"], - "returns": ["0xSomeAddress"] - } - ] - ``` -2. If the call is not supposed to happen, check the mapping logic — a code path may be executing unexpectedly. - -**Known limitation:** There is currently no fail-fast error for unmocked calls. The only signal is the timeout. A future improvement will make the dummy adapter panic immediately on a cache miss with a descriptive message. +1. Add the missing mock to the appropriate field in your test block (`ethCalls`, `getBalanceCalls`, or `hasCodeCalls`) +2. If the call is not supposed to happen, check the mapping logic — a code path may be executing unexpectedly ### Block Handler Not Firing From 296a709573e2b8a378562163f0672e48ffe7c9e6 Mon Sep 17 00:00:00 2001 From: Maksim Dimitrov Date: Thu, 19 Mar 2026 23:33:21 +0200 Subject: [PATCH 9/9] gnd: Separate tests and fix retry count env var value Signed-off-by: Maksim Dimitrov --- gnd/src/main.rs | 2 +- gnd/tests/gnd_test.rs | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 0cac355a062..d4c90f72ba3 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -181,7 +181,7 @@ async fn main() -> Result<()> { // Disable RPC retries so unmocked calls fail immediately. // SAFETY: single-threaded here, before the Tokio runtime is built. unsafe { - std::env::set_var("GRAPH_ETHEREUM_REQUEST_RETRIES", "0"); + std::env::set_var("GRAPH_ETHEREUM_REQUEST_RETRIES", "1"); } run_test(test_opt).await } diff --git a/gnd/tests/gnd_test.rs b/gnd/tests/gnd_test.rs index 4d5bdb47511..74f9d4ce522 100644 --- a/gnd/tests/gnd_test.rs +++ b/gnd/tests/gnd_test.rs @@ -159,13 +159,24 @@ fn run_gnd_test(args: &[&str], cwd: &Path) -> std::process::Output { // ============================================================================ #[test] -fn test_token_transfer_and_templates() { +fn test_token_transfer() { let (_temp_dir, subgraph_dir) = setup_fixture("token"); - let output = run_gnd_test( - &["tests/transfer.json", "tests/templates.json"], - &subgraph_dir, + let output = run_gnd_test(&["tests/transfer.json"], &subgraph_dir); + + assert!( + output.status.success(), + "gnd test failed for token fixture\nstdout: {}\nstderr: {}", + String::from_utf8_lossy(&output.stdout), + String::from_utf8_lossy(&output.stderr), ); +} + +#[test] +fn test_templates() { + let (_temp_dir, subgraph_dir) = setup_fixture("token"); + + let output = run_gnd_test(&["tests/templates.json"], &subgraph_dir); assert!( output.status.success(),