⚠️Blast API (blastapi.io) ends Oct 31. Migrate to Dwellir and skip Alchemy's expensive compute units.
Switch Today →
Skip to main content

state_getKeysPaged - JSON-RPC Method

Description

Returns storage keys matching a prefix with pagination support. This JSON-RPC method is essential for querying large storage maps without overwhelming the node or client with massive result sets. It allows iterating through storage keys in manageable chunks.

Parameters

ParameterTypeRequiredDescription
prefixstringYesHex-encoded storage key prefix to match
countnumberYesMaximum number of keys to return
startKeystringNoHex-encoded key to start from (exclusive). Used for pagination
blockHashstringNoBlock hash to query at. If omitted, uses the latest block

Returns

FieldTypeDescription
resultarrayArray of hex-encoded storage keys matching the prefix

Request Example

{
"jsonrpc": "2.0",
"method": "state_getKeysPaged",
"params": [
"0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da9",
10,
null
],
"id": 1
}

Response Example

{
"jsonrpc": "2.0",
"result": [
"0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da900000001",
"0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da900000002",
"0x26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da900000003"
],
"id": 1
}

Code Examples

import requests
import json
from typing import List, Optional

class StorageKeyPaginator:
def __init__(self, rpc_url: str, api_key: str):
self.rpc_url = rpc_url
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
}

def get_keys_paged(
self,
prefix: str,
count: int,
start_key: Optional[str] = None,
block_hash: Optional[str] = None
) -> List[str]:
params = [prefix, count, start_key]
if block_hash:
params.append(block_hash)

payload = {
"jsonrpc": "2.0",
"method": "state_getKeysPaged",
"params": params,
"id": 1
}

response = requests.post(
self.rpc_url,
headers=self.headers,
data=json.dumps(payload)
)
return response.json()["result"]

def iterate_all_keys(self, prefix: str, page_size: int = 100):
"""Generator that yields all keys matching prefix"""
start_key = None

while True:
keys = self.get_keys_paged(prefix, page_size, start_key)

if not keys:
break

for key in keys:
yield key

if len(keys) < page_size:
break

start_key = keys[-1]

def get_all_keys(self, prefix: str, page_size: int = 100) -> List[str]:
"""Get all keys as a list"""
return list(self.iterate_all_keys(prefix, page_size))

# Usage example
paginator = StorageKeyPaginator(
"https://api-moonriver.n.dwellir.com/YOUR_API_KEY",
"YOUR_API_KEY"
)

# Get validator keys with pagination
validator_prefix = "0x5f3e4907f716ac89b6347d15ececedca9320c2dc4f5d7af5b320b04e2d1a3ff3"

# Method 1: Get all at once
all_validators = paginator.get_all_keys(validator_prefix, page_size=50)
print(f"Total validators: {len(all_validators)}")

# Method 2: Process in batches
for i, key in enumerate(paginator.iterate_all_keys(validator_prefix, page_size=10)):
if i >= 100: # Process first 100 only
break
print(f"Validator {i}: {key[-64:]}") # Print account part

Memory-Efficient Processing

// Process large datasets without loading all into memory
async function processLargeStorage(prefix, processor, batchSize = 100) {
let startKey = null;
let totalProcessed = 0;

while (true) {
const keys = await getKeysPagedRPC(prefix, batchSize, startKey);

if (keys.length === 0) break;

// Process batch
for (const key of keys) {
await processor(key);
totalProcessed++;
}

console.log(`Processed batch of ${keys.length}, total: ${totalProcessed}`);

if (keys.length < batchSize) break;

startKey = keys[keys.length - 1];

// Optional: Add delay to avoid overwhelming the node
await new Promise(resolve => setTimeout(resolve, 100));
}

return totalProcessed;
}

// Example processor
const analyzeAccounts = async (key) => {
// Process each account key
const address = key.slice(-64);
// Do something with the address
};

const total = await processLargeStorage(accountPrefix, analyzeAccounts, 50);
console.log(`Processed ${total} accounts`);

Use Cases

  1. Large-Scale Analysis: Process millions of storage entries
  2. Data Export: Export blockchain data in batches
  3. Memory Management: Handle large datasets with limited memory
  4. Progressive Loading: Load data progressively in UIs
  5. Background Processing: Process storage in background tasks

Notes

  • The startKey parameter is exclusive (results start after this key)
  • Results are returned in lexicographical order
  • Empty result indicates no more keys available
  • Consider rate limiting when processing large datasets
  • Use appropriate page sizes based on your use case (10-1000)