tweaks for core

This commit is contained in:
Mike Stepanovic 2025-05-02 10:49:35 -06:00
parent eabf956b38
commit b63b7e4546
20 changed files with 122 additions and 253 deletions

View File

@ -1,5 +1,5 @@
{% macro standard_predicate(
input_column = 'block_number'
input_column = 'block_id'
) -%}
{%- set database_name = target.database -%}
{%- set schema_name = generate_schema_name(

View File

@ -1,14 +1,12 @@
{% macro terra_vars() %}
{% set vars = {
'API_INTEGRATION_PROD': 'aws_ton_api_prod_v2',
'API_INTEGRATION_DEV': 'aws_ton_api_dev_v2',
'GLOBAL_PROJECT_NAME': 'terra',
'GLOBAL_NODE_PROVIDER': 'quicknode',
'GLOBAL_NODE_VAULT_PATH': 'Vault/prod/terra/quicknode/mainnet',
'GLOBAL_NODE_URL': '{service}/{Authentication}',
'GLOBAL_WRAPPED_NATIVE_ASSET_ADDRESS': '0x82af49447d8a07e3bd95bd0d56f35241523fbab1',
'MAIN_SL_BLOCKS_PER_HOUR': 14200,
'MAIN_PRICES_NATIVE_SYMBOLS': 'ETH'
'GLOBAL_WRAPPED_NATIVE_ASSET_ADDRESS': '',
'MAIN_SL_BLOCKS_PER_HOUR': ,
'MAIN_PRICES_NATIVE_SYMBOLS': 'LUNA'
} %}
{{ return(vars) }}

View File

@ -32,53 +32,9 @@
{% set ns.MAIN_GHA_TEST_DAILY_CRON = get_var('MAIN_GHA_TEST_DAILY_CRON', none) %}
{% set ns.MAIN_GHA_TEST_INTRADAY_CRON = get_var('MAIN_GHA_TEST_INTRADAY_CRON', none) %}
{% set ns.MAIN_GHA_TEST_MONTHLY_CRON = get_var('MAIN_GHA_TEST_MONTHLY_CRON', none) %}
{% set ns.MAIN_GHA_HEAL_MODELS_CRON = get_var('MAIN_GHA_HEAL_MODELS_CRON', none) %}
{% set ns.MAIN_GHA_FULL_OBSERVABILITY_CRON = get_var('MAIN_GHA_FULL_OBSERVABILITY_CRON', none) %}
{% set ns.MAIN_GHA_DEV_REFRESH_CRON = get_var('MAIN_GHA_DEV_REFRESH_CRON', none) %}
{% set ns.MAIN_GHA_STREAMLINE_DECODER_HISTORY_CRON = get_var('MAIN_GHA_STREAMLINE_DECODER_HISTORY_CRON', none) %}
{# Core Variables #}
{% set ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED = get_var('MAIN_CORE_RECEIPTS_BY_HASH_ENABLED', false) %}
{% set ns.MAIN_CORE_TRACES_ARB_MODE = ns.GLOBAL_PROJECT_NAME.upper() == 'ARBITRUM' %}
{% set ns.MAIN_CORE_TRACES_SEI_MODE = ns.GLOBAL_PROJECT_NAME.upper() == 'SEI' %}
{% set ns.MAIN_CORE_TRACES_KAIA_MODE = ns.GLOBAL_PROJECT_NAME.upper() == 'KAIA' %}
{# Core Silver Variables #}
{% set ns.MAIN_CORE_SILVER_RECEIPTS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{% set ns.MAIN_CORE_SILVER_RECEIPTS_SOURCE_NAME = 'RECEIPTS_BY_HASH' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'RECEIPTS' %}
{% set ns.MAIN_CORE_SILVER_RECEIPTS_POST_HOOK = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(tx_hash, block_number)" if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(array_index, block_number)" %}
{% set ns.MAIN_CORE_SILVER_CONFIRM_BLOCKS_FULL_RELOAD_ENABLED = get_var('MAIN_CORE_SILVER_CONFIRM_BLOCKS_FULL_RELOAD_ENABLED', false) %}
{% set ns.MAIN_CORE_SILVER_TRACES_FULL_RELOAD_ENABLED = get_var('MAIN_CORE_SILVER_TRACES_FULL_RELOAD_ENABLED', false) %}
{% set ns.MAIN_CORE_SILVER_TRACES_FR_MAX_BLOCK = get_var('MAIN_CORE_SILVER_TRACES_FR_MAX_BLOCK', 1000000) %}
{% set ns.MAIN_CORE_SILVER_TRACES_FULL_RELOAD_BLOCKS_PER_RUN = get_var('MAIN_CORE_SILVER_TRACES_FULL_RELOAD_BLOCKS_PER_RUN', 1000000) %}
{% set ns.MAIN_CORE_SILVER_TRACES_PARTITION_KEY_ENABLED = get_var('MAIN_CORE_SILVER_TRACES_PARTITION_KEY_ENABLED', true) %}
{# Core Gold Variables #}
{% set ns.MAIN_CORE_GOLD_FACT_TRANSACTIONS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{% set ns.MAIN_CORE_GOLD_EZ_NATIVE_TRANSFERS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{% set ns.MAIN_CORE_GOLD_EZ_NATIVE_TRANSFERS_PRICES_START_DATE = get_var('MAIN_CORE_GOLD_EZ_NATIVE_TRANSFERS_PRICES_START_DATE','2024-01-01') %}
{% set ns.MAIN_CORE_GOLD_EZ_TOKEN_TRANSFERS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{% set ns.MAIN_CORE_GOLD_FACT_EVENT_LOGS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{% set ns.MAIN_CORE_GOLD_TRACES_FULL_RELOAD_ENABLED = get_var('MAIN_CORE_GOLD_TRACES_FULL_RELOAD_ENABLED', false) %}
{% set ns.MAIN_CORE_GOLD_TRACES_FR_MAX_BLOCK = get_var('MAIN_CORE_GOLD_TRACES_FR_MAX_BLOCK', 1000000) %}
{% set ns.MAIN_CORE_GOLD_TRACES_FULL_RELOAD_BLOCKS_PER_RUN = get_var('MAIN_CORE_GOLD_TRACES_FULL_RELOAD_BLOCKS_PER_RUN', 1000000) %}
{% set ns.MAIN_CORE_GOLD_TRACES_TX_STATUS_ENABLED = get_var('MAIN_CORE_GOLD_TRACES_TX_STATUS_ENABLED', false) %}
{% set ns.MAIN_CORE_GOLD_TRACES_SCHEMA_NAME = get_var('MAIN_CORE_GOLD_TRACES_SCHEMA_NAME', 'silver') %}
{% if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED %}
{% if ns.MAIN_CORE_TRACES_SEI_MODE %}
{% set ns.MAIN_CORE_GOLD_TRACES_UNIQUE_KEY = "concat(block_number, '-', tx_hash)" %}
{% else %}
{% set ns.MAIN_CORE_GOLD_TRACES_UNIQUE_KEY = "concat(block_number, '-', tx_position)" %}
{% endif %}
{% else %}
{% set ns.MAIN_CORE_GOLD_TRACES_UNIQUE_KEY = "block_number" %}
{% endif %}
{# Main Streamline Variables #}
{% set ns.MAIN_SL_BLOCKS_PER_HOUR = get_var('MAIN_SL_BLOCKS_PER_HOUR', 1) %}
@ -89,95 +45,24 @@
{% set ns.MAIN_SL_CHAINHEAD_DELAY_MINUTES = get_var('MAIN_SL_CHAINHEAD_DELAY_MINUTES', 3) %}
{% set ns.MAIN_SL_BLOCK_LOOKBACK_ENABLED = get_var('MAIN_SL_BLOCK_LOOKBACK_ENABLED', true) %}
{# SL Blocks Transactions Variables #}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{# SL Blocks Variables #}
{% set ns.MAIN_SL_BLOCKS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_BLOCKS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_BLOCKS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_SQL_LIMIT = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_SQL_LIMIT', 1000 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_PRODUCER_BATCH_SIZE', 10 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_WORKER_BATCH_SIZE = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_BLOCKS_TRANSACTIONS_HISTORY_ASYNC_CONCURRENT_REQUESTS', 10) %}
{# SL Transactions Variables #}
{% set ns.MAIN_SL_TRANSACTIONS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_TRANSACTIONS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRANSACTIONS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_TRANSACTIONS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRANSACTIONS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_TRANSACTIONS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRANSACTIONS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_TRANSACTIONS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{# SL Receipts Variables #}
{% set ns.MAIN_SL_RECEIPTS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_RECEIPTS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_RECEIPTS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{# SL Transaction Counts Variables #}
{% set ns.MAIN_SL_TX_COUNTS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_TX_COUNTS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_TX_COUNTS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_TX_COUNTS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_TX_COUNTS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_TX_COUNTS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_TX_COUNTS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_TX_COUNTS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{% set ns.MAIN_SL_RECEIPTS_HISTORY_SQL_LIMIT = get_var('MAIN_SL_RECEIPTS_HISTORY_SQL_LIMIT', 1000 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_HISTORY_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_HISTORY_PRODUCER_BATCH_SIZE', 10 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_HISTORY_WORKER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_HISTORY_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_RECEIPTS_HISTORY_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_RECEIPTS_HISTORY_ASYNC_CONCURRENT_REQUESTS', 10) %}
{# SL Receipts By Hash Variables #}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_REALTIME_SQL_LIMIT = get_var('MAIN_SL_RECEIPTS_BY_HASH_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_BY_HASH_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_BY_HASH_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_RECEIPTS_BY_HASH_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_REALTIME_TXNS_MODEL_ENABLED = get_var('MAIN_SL_RECEIPTS_BY_HASH_REALTIME_TXNS_MODEL_ENABLED', true) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_HISTORY_SQL_LIMIT = get_var('MAIN_SL_RECEIPTS_BY_HASH_HISTORY_SQL_LIMIT', 1000 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_HISTORY_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_BY_HASH_HISTORY_PRODUCER_BATCH_SIZE', 10 * ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_HISTORY_WORKER_BATCH_SIZE = get_var('MAIN_SL_RECEIPTS_BY_HASH_HISTORY_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR * ns.MAIN_SL_TRANSACTIONS_PER_BLOCK) %}
{% set ns.MAIN_SL_RECEIPTS_BY_HASH_HISTORY_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_RECEIPTS_BY_HASH_HISTORY_ASYNC_CONCURRENT_REQUESTS', 10) %}
{# SL Traces Variables #}
{% set ns.MAIN_SL_TRACES_REALTIME_SQL_LIMIT = get_var('MAIN_SL_TRACES_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_TRACES_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_TRACES_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_TRACES_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{% set ns.MAIN_SL_TRACES_REALTIME_REQUEST_START_BLOCK = get_var('MAIN_SL_TRACES_REALTIME_REQUEST_START_BLOCK', none) %}
{% set ns.MAIN_SL_TRACES_HISTORY_SQL_LIMIT = get_var('MAIN_SL_TRACES_HISTORY_SQL_LIMIT', 1000 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_HISTORY_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_TRACES_HISTORY_PRODUCER_BATCH_SIZE', 10 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_HISTORY_WORKER_BATCH_SIZE = get_var('MAIN_SL_TRACES_HISTORY_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_TRACES_HISTORY_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_TRACES_HISTORY_ASYNC_CONCURRENT_REQUESTS', 10) %}
{% set ns.MAIN_SL_TRACES_HISTORY_REQUEST_START_BLOCK = get_var('MAIN_SL_TRACES_HISTORY_REQUEST_START_BLOCK', none) %}
{# SL Confirm Blocks Variables #}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_REALTIME_SQL_LIMIT = get_var('MAIN_SL_CONFIRM_BLOCKS_REALTIME_SQL_LIMIT', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_REALTIME_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_CONFIRM_BLOCKS_REALTIME_PRODUCER_BATCH_SIZE', 2 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_REALTIME_WORKER_BATCH_SIZE = get_var('MAIN_SL_CONFIRM_BLOCKS_REALTIME_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_REALTIME_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_CONFIRM_BLOCKS_REALTIME_ASYNC_CONCURRENT_REQUESTS', 100) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_HISTORY_SQL_LIMIT = get_var('MAIN_SL_CONFIRM_BLOCKS_HISTORY_SQL_LIMIT', 1000 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_HISTORY_PRODUCER_BATCH_SIZE = get_var('MAIN_SL_CONFIRM_BLOCKS_HISTORY_PRODUCER_BATCH_SIZE', 10 * ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_HISTORY_WORKER_BATCH_SIZE = get_var('MAIN_SL_CONFIRM_BLOCKS_HISTORY_WORKER_BATCH_SIZE', ns.MAIN_SL_BLOCKS_PER_HOUR) %}
{% set ns.MAIN_SL_CONFIRM_BLOCKS_HISTORY_ASYNC_CONCURRENT_REQUESTS = get_var('MAIN_SL_CONFIRM_BLOCKS_HISTORY_ASYNC_CONCURRENT_REQUESTS', 10) %}
{# Decoder SL Variables #}
{% set ns.DECODER_SL_TESTING_LIMIT = get_var('DECODER_SL_TESTING_LIMIT', none) %}
{% set ns.DECODER_SL_NEW_BUILD_ENABLED = get_var('DECODER_SL_NEW_BUILD_ENABLED', false) %}
{# SL Decoded Logs Variables #}
{% set ns.DECODER_SL_DECODED_LOGS_REALTIME_EXTERNAL_TABLE = get_var('DECODER_SL_DECODED_LOGS_REALTIME_EXTERNAL_TABLE', 'decoded_logs') %}
{% set ns.DECODER_SL_DECODED_LOGS_REALTIME_SQL_LIMIT = get_var('DECODER_SL_DECODED_LOGS_REALTIME_SQL_LIMIT', 10000000) %}
{% set ns.DECODER_SL_DECODED_LOGS_REALTIME_PRODUCER_BATCH_SIZE = get_var('DECODER_SL_DECODED_LOGS_REALTIME_PRODUCER_BATCH_SIZE', 5000000) %}
{% set ns.DECODER_SL_DECODED_LOGS_REALTIME_WORKER_BATCH_SIZE = get_var('DECODER_SL_DECODED_LOGS_REALTIME_WORKER_BATCH_SIZE',500000) %}
{% set ns.DECODER_SL_DECODED_LOGS_HISTORY_EXTERNAL_TABLE = get_var('DECODER_SL_DECODED_LOGS_HISTORY_EXTERNAL_TABLE', 'decoded_logs_history') %}
{% set ns.DECODER_SL_DECODED_LOGS_HISTORY_SQL_LIMIT = get_var('DECODER_SL_DECODED_LOGS_HISTORY_SQL_LIMIT', 8000000) %}
{% set ns.DECODER_SL_DECODED_LOGS_HISTORY_PRODUCER_BATCH_SIZE = get_var('DECODER_SL_DECODED_LOGS_HISTORY_PRODUCER_BATCH_SIZE', 400000) %}
{% set ns.DECODER_SL_DECODED_LOGS_HISTORY_WORKER_BATCH_SIZE = get_var('DECODER_SL_DECODED_LOGS_HISTORY_WORKER_BATCH_SIZE', 100000) %}
{% set ns.DECODER_SL_DECODED_LOGS_HISTORY_WAIT_SECONDS = get_var('DECODER_SL_DECODED_LOGS_HISTORY_WAIT_SECONDS', 60) %}
{# SL Contract ABIs Variables #}
{% set ns.DECODER_SL_CONTRACT_ABIS_REALTIME_SQL_LIMIT = get_var('DECODER_SL_CONTRACT_ABIS_REALTIME_SQL_LIMIT', 100) %}
{% set ns.DECODER_SL_CONTRACT_ABIS_REALTIME_PRODUCER_BATCH_SIZE = get_var('DECODER_SL_CONTRACT_ABIS_REALTIME_PRODUCER_BATCH_SIZE', 1) %}
{% set ns.DECODER_SL_CONTRACT_ABIS_REALTIME_WORKER_BATCH_SIZE = get_var('DECODER_SL_CONTRACT_ABIS_REALTIME_WORKER_BATCH_SIZE', 1) %}
{% set ns.DECODER_SL_CONTRACT_ABIS_INTERACTION_COUNT = get_var('DECODER_SL_CONTRACT_ABIS_INTERACTION_COUNT', 50) %}
{% set ns.DECODER_SL_CONTRACT_ABIS_EXPLORER_URL = get_var('DECODER_SL_CONTRACT_ABIS_EXPLORER_URL', '') %}
{% set ns.DECODER_SL_CONTRACT_ABIS_EXPLORER_URL_SUFFIX = get_var('DECODER_SL_CONTRACT_ABIS_EXPLORER_URL_SUFFIX', '') %}
{% set ns.DECODER_SL_CONTRACT_ABIS_EXPLORER_VAULT_PATH = get_var('DECODER_SL_CONTRACT_ABIS_EXPLORER_VAULT_PATH', '') %}
{% set ns.DECODER_SL_CONTRACT_ABIS_BRONZE_TABLE_ENABLED = get_var('DECODER_SL_CONTRACT_ABIS_BRONZE_TABLE_ENABLED', false) %}
{# ABIs Silver Variables #}
{% set ns.DECODER_SILVER_CONTRACT_ABIS_EXPLORER_NAME = get_var('DECODER_SILVER_CONTRACT_ABIS_EXPLORER_NAME', '') %}
{% set ns.DECODER_SILVER_CONTRACT_ABIS_ETHERSCAN_ENABLED = get_var('DECODER_SILVER_CONTRACT_ABIS_ETHERSCAN_ENABLED', false) %}
{% set ns.DECODER_SILVER_CONTRACT_ABIS_RESULT_ENABLED = get_var('DECODER_SILVER_CONTRACT_ABIS_RESULT_ENABLED', false) %}
{# Observability Variables #}
{% set ns.MAIN_OBSERV_FULL_TEST_ENABLED = get_var('MAIN_OBSERV_FULL_TEST_ENABLED', false) %}
{% set ns.MAIN_OBSERV_BLOCKS_EXCLUSION_LIST_ENABLED = get_var('MAIN_OBSERV_BLOCKS_EXCLUSION_LIST_ENABLED', false) %}
@ -199,15 +84,7 @@
{# Scores Variables #}
{% set ns.SCORES_FULL_RELOAD_ENABLED = get_var('SCORES_FULL_RELOAD_ENABLED', false) %}
{% set ns.SCORES_LIMIT_DAYS = get_var('SCORES_LIMIT_DAYS', 30) %}
{# NFT Variables #}
{% set ns.MAIN_NFT_TRANSFERS_UNIQUE_KEY = 'tx_hash' if ns.MAIN_CORE_RECEIPTS_BY_HASH_ENABLED else 'block_number' %}
{# Vertex Variables #}
{% set ns.CURATED_VERTEX_OFFCHAIN_EXCHANGE_CONTRACT = get_var('CURATED_VERTEX_OFFCHAIN_EXCHANGE_CONTRACT', '') %}
{% set ns.CURATED_VERTEX_CLEARINGHOUSE_CONTRACT = get_var('CURATED_VERTEX_CLEARINGHOUSE_CONTRACT', '') %}
{% set ns.CURATED_VERTEX_TOKEN_MAPPING = get_var('CURATED_VERTEX_TOKEN_MAPPING', {}) %}
{# Return the entire namespace as a dictionary #}
{{ return(ns) }}
{% endmacro %}

View File

@ -3,7 +3,7 @@
source_version,
partition_function,
balances,
block_number,
block_id,
uses_receipts_by_hash
) %}
@ -32,14 +32,14 @@
r.block_timestamp :: TIMESTAMP AS block_timestamp
{% endif %}
{% if block_number %},
{% if block_id %},
COALESCE(
s.value :"BLOCK_NUMBER" :: STRING,
s.value :"block_id" :: STRING,
s.metadata :request :"data" :id :: STRING,
PARSE_JSON(
s.metadata :request :"data"
) :id :: STRING
) :: INT AS block_number
) :: INT AS block_id
{% endif %}
{% if uses_receipts_by_hash %},
s.value :"TX_HASH" :: STRING AS tx_hash
@ -57,9 +57,9 @@
{% if balances %}
JOIN {{ ref('_block_ranges') }}
r
ON r.block_number = COALESCE(
s.value :"BLOCK_NUMBER" :: INT,
s.value :"block_number" :: INT
ON r.block_id = COALESCE(
s.value :"block_id" :: INT,
s.value :"block_id" :: INT
)
{% endif %}
WHERE
@ -74,7 +74,7 @@
partition_function,
partition_join_key,
balances,
block_number,
block_id,
uses_receipts_by_hash
) %}
@ -103,15 +103,15 @@ SELECT
r.block_timestamp :: TIMESTAMP AS block_timestamp
{% endif %}
{% if block_number %},
{% if block_id %},
COALESCE(
s.value :"BLOCK_NUMBER" :: STRING,
s.value :"block_number" :: STRING,
s.value :"block_id" :: STRING,
s.value :"block_id" :: STRING,
s.metadata :request :"data" :id :: STRING,
PARSE_JSON(
s.metadata :request :"data"
) :id :: STRING
) :: INT AS block_number
) :: INT AS block_id
{% endif %}
{% if uses_receipts_by_hash %},
s.value :"TX_HASH" :: STRING AS tx_hash
@ -129,9 +129,9 @@ FROM
{% if balances %}
JOIN {{ ref('_block_ranges') }}
r
ON r.block_number = COALESCE(
s.value :"BLOCK_NUMBER" :: INT,
s.value :"block_number" :: INT
ON r.block_id = COALESCE(
s.value :"block_id" :: INT,
s.value :"block_id" :: INT
)
{% endif %}
WHERE

View File

@ -9,19 +9,20 @@
incremental_strategy = 'delete+insert',
unique_key = 'blocks_id',
cluster_by = ['block_timestamp::DATE'],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number)",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id)",
incremental_predicates = [fsc_ibc.standard_predicate()],
tags = ['gold', 'core', 'phase_2']
) }}
SELECT
block_number,
blockchain,
block_id,
block_timestamp,
chain_id,
tx_count,
proposer_address,
validator_hash,
{{ dbt_utils.generate_surrogate_key(['block_number']) }} AS fact_blocks_id,
{{ dbt_utils.generate_surrogate_key(['block_id']) }} AS fact_blocks_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
'{{ invocation_id }}' AS _invocation_id

View File

@ -9,13 +9,13 @@
incremental_strategy = 'delete+insert',
unique_key = 'msg_attributes_id',
cluster_by = ['block_timestamp::DATE'],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number, tx_id)",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id, tx_id)",
incremental_predicates = [fsc_ibc.standard_predicate()],
tags = ['gold', 'core', 'phase_2']
) }}
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_succeeded,

View File

@ -9,13 +9,13 @@
incremental_strategy = 'delete+insert',
unique_key = 'transactions_id',
cluster_by = ['block_timestamp::DATE'],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number, tx_id)",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id, tx_id)",
incremental_predicates = [fsc_ibc.standard_predicate()],
tags = ['gold', 'core', 'phase_2']
) }}
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_succeeded,
@ -24,8 +24,8 @@ SELECT
':',
msg_sub_group
) AS msg_group,
msg_index,
msg_type,
msg_index,
msg,
{{ dbt_utils.generate_surrogate_key(['tx_id', 'msg_index']) }} AS fact_msgs_id,
SYSDATE() AS inserted_timestamp,

View File

@ -10,23 +10,23 @@
unique_key = 'transactions_id',
cluster_by = ['block_timestamp::DATE'],
incremental_predicates = [fsc_ibc.standard_predicate()],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number, tx_id)",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id, tx_id)",
tags = ['gold', 'core', 'phase_2']
) }}
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_from,
tx_succeeded,
codespace,
fee,
fee_denom,
gas_used,
gas_wanted,
tx_id,
{# tx_from, #}
tx_succeeded,
tx_code,
tx_log,
{# fee, #}
{# fee_denom, #}
gas_used,
gas_wanted,
{{ dbt_utils.generate_surrogate_key(['tx_id']) }} AS fact_transactions_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,

View File

@ -14,7 +14,8 @@
WITH bronze_blocks AS (
SELECT
block_number,
'{{ vars.GLOBAL_PROJECT_NAME }}' AS blockchain,
block_id,
COALESCE(
DATA :result :block :header :time :: TIMESTAMP,
DATA :block :header :time :: TIMESTAMP,
@ -58,7 +59,7 @@ WITH bronze_blocks AS (
{% endif %}
)
SELECT
block_number,
block_id,
block_timestamp,
chain_id,
tx_count,
@ -66,13 +67,13 @@ SELECT
validator_hash,
header,
_inserted_timestamp,
{{ dbt_utils.generate_surrogate_key(['chain_id', 'block_number']) }} AS blocks_id,
{{ dbt_utils.generate_surrogate_key(['chain_id', 'block_id']) }} AS blocks_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
'{{ invocation_id }}' AS _invocation_id
FROM
bronze_blocks
QUALIFY ROW_NUMBER() over (
PARTITION BY chain_id, block_number
PARTITION BY chain_id, block_id
ORDER BY _inserted_timestamp DESC
) = 1

View File

@ -14,7 +14,7 @@
WITH silver_msgs AS (
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_succeeded,
@ -51,7 +51,7 @@ WITH silver_msgs AS (
{% endif %}
)
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_succeeded,

View File

@ -15,7 +15,7 @@
WITH bronze_msgs AS (
SELECT
transactions.block_number,
transactions.block_id,
transactions.block_timestamp,
transactions.tx_id,
transactions.gas_used,
@ -105,7 +105,7 @@ GROUPING AS (
),
msgs AS (
SELECT
block_number,
block_id,
block_timestamp,
bronze_msgs.tx_id,
tx_succeeded,
@ -137,7 +137,7 @@ msgs AS (
AND bronze_msgs.msg_index = b.msg_index
)
SELECT
block_number,
block_id,
block_timestamp,
tx_id,
tx_succeeded,

View File

@ -15,7 +15,7 @@
WITH bronze_transactions AS (
SELECT
block_number,
block_id,
COALESCE(
DATA :hash,
f.value :hash
@ -40,17 +40,6 @@ WITH bronze_transactions AS (
DATA :tx_result :code,
f.value :tx_result :code
) :: INT AS tx_code,
CASE
WHEN NULLIF(
tx_code,
0
) IS NOT NULL THEN FALSE
ELSE TRUE
END AS tx_succeeded,
COALESCE(
DATA :tx_result :events,
f.value :tx_result :events
) AS msgs,
COALESCE(
TRY_PARSE_JSON(
COALESCE(
@ -69,12 +58,12 @@ WITH bronze_transactions AS (
END AS DATA,
partition_key,
COALESCE(
transactions.value :BLOCK_NUMBER_REQUESTED,
transactions.value :block_id_REQUESTED,
REPLACE(
metadata :request :params [0],
'tx.height='
)
) AS block_number_requested,
) AS block_id_requested,
inserted_timestamp AS _inserted_timestamp
FROM
{% if is_incremental() %}
@ -98,22 +87,25 @@ WITH bronze_transactions AS (
{% endif %}
)
SELECT
block_number,
block_id,
block_timestamp,
codespace,
tx_id,
tx_index,
codespace,
tx_log,
tx_succeeded,
{# tx_from, #}
{# fee, #}
{# fee_denom, #}
gas_used,
gas_wanted,
tx_code,
tx_succeeded,
msgs,
tx_log,
DATA,
partition_key,
block_number_requested,
block_id_requested,
_inserted_timestamp,
{{ dbt_utils.generate_surrogate_key(
['block_number_requested', 'tx_id']
['block_id_requested', 'tx_id']
) }} AS transactions_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
@ -121,6 +113,6 @@ SELECT
FROM
bronze_transactions
QUALIFY(ROW_NUMBER() over (
PARTITION BY block_number_requested, tx_id
PARTITION BY block_id_requested, tx_id
ORDER BY _inserted_timestamp DESC)
) = 1

View File

@ -5,16 +5,16 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'merge',
unique_key = "block_number",
cluster_by = "ROUND(block_number, -3)",
unique_key = "block_id",
cluster_by = "ROUND(block_id, -3)",
merge_exclude_columns = ["inserted_timestamp"],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number)"
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id)"
) }}
SELECT
DATA :result :block :header :height :: INT AS block_number,
DATA :result :block :header :height :: INT AS block_id,
{{ dbt_utils.generate_surrogate_key(
['block_number']
['block_id']
) }} AS complete_blocks_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
@ -35,6 +35,6 @@ WHERE
{{ ref('bronze__streamline_blocks_fr') }}
{% endif %}
qualify(ROW_NUMBER() over (PARTITION BY block_number
qualify(ROW_NUMBER() over (PARTITION BY block_id
ORDER BY
inserted_timestamp DESC)) = 1

View File

@ -7,23 +7,23 @@
materialized = "incremental",
incremental_strategy = 'merge',
unique_key = "complete_transactions_id",
cluster_by = "ROUND(block_number, -3)",
cluster_by = "ROUND(block_id, -3)",
merge_exclude_columns = ["inserted_timestamp"],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number)"
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id)"
) }}
SELECT
COALESCE(
VALUE :BLOCK_NUMBER_REQUESTED,
VALUE :block_id_REQUESTED,
DATA :height,
VALUE :data :result :txs [0] :height
) :: INT AS block_number,
) :: INT AS block_id,
COALESCE(
VALUE :PAGE_NUMBER,
metadata :request :params [2]
) :: INT AS page_number,
{{ dbt_utils.generate_surrogate_key(
['block_number','page_number']
['block_id','page_number']
) }} AS complete_transactions_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,

View File

@ -6,17 +6,17 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'merge',
unique_key = "block_number",
cluster_by = "ROUND(block_number, -3)",
unique_key = "block_id",
cluster_by = "ROUND(block_id, -3)",
merge_exclude_columns = ["inserted_timestamp"],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_number)"
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION on equality(block_id)"
) }}
SELECT
VALUE :BLOCK_NUMBER :: INT AS block_number,
VALUE :block_id :: INT AS block_id,
DATA :result :total_count :: INT AS tx_count,
{{ dbt_utils.generate_surrogate_key(
['block_number']
['block_id']
) }} AS complete_tx_counts_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
@ -33,11 +33,11 @@ WHERE
FROM
{{ this }}
)
AND block_number NOT IN (21208991)
AND block_id NOT IN (21208991)
{% else %}
{{ ref('bronze__streamline_tx_counts_fr') }}
{% endif %}
qualify(ROW_NUMBER() over (PARTITION BY block_number
qualify(ROW_NUMBER() over (PARTITION BY block_id
ORDER BY
inserted_timestamp DESC)) = 1

View File

@ -8,17 +8,17 @@
WITH blocks AS (
SELECT
block_number
block_id
FROM
{{ ref('streamline__blocks') }}
EXCEPT
SELECT
block_number
block_id
FROM
{{ ref('streamline__blocks_complete') }}
)
SELECT
ROUND(block_number, -4) :: INT AS partition_key,
ROUND(block_id, -4) :: INT AS partition_key,
{{ target.database }}.live.udf_api(
'POST',
'{{ vars.GLOBAL_NODE_URL }}',
@ -27,17 +27,17 @@ SELECT
'fsc-quantum-state', 'streamline'
),
OBJECT_CONSTRUCT(
'id', block_number,
'id', block_id,
'jsonrpc', '2.0',
'method', 'block',
'params', ARRAY_CONSTRUCT(block_number :: STRING)
'params', ARRAY_CONSTRUCT(block_id :: STRING)
),
'{{ vars.GLOBAL_NODE_VAULT_PATH }}'
) AS request
FROM
blocks
ORDER BY
block_number
block_id
LIMIT {{ vars.MAIN_SL_BLOCKS_REALTIME_SQL_LIMIT }}

View File

@ -1,7 +1,7 @@
WITH blocks AS (
SELECT
A.block_number,
A.block_id,
tx_count
FROM
{{ ref('streamline__tx_counts_complete') }} A
@ -26,7 +26,7 @@ numbers AS (
),
blocks_with_page_numbers AS (
SELECT
tt.block_number :: INT AS block_number,
tt.block_id :: INT AS block_id,
n.n AS page_number
FROM
blocks tt
@ -39,14 +39,14 @@ numbers AS (
END
EXCEPT
SELECT
block_number,
block_id,
page_number
FROM
{{ ref('streamline__transactions_complete') }}
)
SELECT
ROUND(
block_number,
block_id,
-3
) :: INT AS partition_key,
{{ target.database }}.live.udf_api(
@ -57,11 +57,11 @@ numbers AS (
'fsc-quantum-state', 'streamline'
),
OBJECT_CONSTRUCT(
'id', block_number,
'id', block_id,
'jsonrpc', '2.0',
'method', 'tx_search',
'params', ARRAY_CONSTRUCT(
'tx.height=' || block_number :: STRING,
'tx.height=' || block_id :: STRING,
TRUE,
page_number :: STRING,
'100',
@ -71,11 +71,11 @@ numbers AS (
'{{ vars.GLOBAL_NODE_VAULT_PATH }}'
) AS request,
page_number,
block_number AS block_number_requested
block_id AS block_id_requested
FROM
blocks_with_page_numbers
ORDER BY
block_number
block_id
LIMIT {{ vars.MAIN_SL_TRANSACTIONS_REALTIME_SQL_LIMIT }}

View File

@ -11,39 +11,39 @@
WITH blocks AS (
SELECT
block_number
block_id
FROM
{{ ref('streamline__blocks') }}
EXCEPT
SELECT
block_number
block_id
FROM
{{ ref('streamline__tx_counts_complete') }}
),
{# retry AS (
SELECT
NULL AS A.block_number
NULL AS A.block_id
FROM
{{ ref('streamline__complete_tx_counts') }} A
JOIN {{ ref('silver__blockchain') }}
b
ON A.block_number = b.block_id
ON A.block_id = b.block_id
WHERE
A.tx_count <> b.num_txs
),
#}
combo AS (
SELECT
block_number
block_id
FROM
blocks {# UNION
SELECT
block_number
block_id
FROM
retry #}
)
SELECT
ROUND(block_number, -3) :: INT AS partition_key,
ROUND(block_id, -3) :: INT AS partition_key,
{{ target.database }}.live.udf_api(
'POST',
'{{ vars.GLOBAL_NODE_URL }}',
@ -52,11 +52,11 @@ SELECT
'fsc-quantum-state', 'streamline'
),
OBJECT_CONSTRUCT(
'id', block_number,
'id', block_id,
'jsonrpc', '2.0',
'method', 'tx_search',
'params', ARRAY_CONSTRUCT(
'tx.height=' || block_number :: STRING, TRUE,
'tx.height=' || block_id :: STRING, TRUE,
'1',
'1',
'asc'
@ -64,11 +64,11 @@ SELECT
),
'{{ vars.GLOBAL_NODE_VAULT_PATH }}'
) AS request,
block_number
block_id
FROM
combo
ORDER BY
block_number
block_id
{# Streamline Function Call #}
{% if execute %}

View File

@ -4,7 +4,7 @@
) }}
SELECT
_id AS block_number
_id AS block_id
FROM
{{ source(
'crosschain_silver',
@ -14,7 +14,7 @@ WHERE
_id >= 5200791
AND _id <= (
SELECT
MAX(block_number)
MAX(block_id)
FROM
{{ ref('streamline__chainhead') }}
)

View File

@ -24,4 +24,4 @@ SELECT
[]
),
'<VAULT_SECRET_PATH_HERE>'
) :data :result :sync_info :latest_block_height :: INT AS block_number
) :data :result :sync_info :latest_block_height :: INT AS block_id