An 5714/traces2 hotswap avaxdexalot (#299)

* delete workflow and comment out fix

* add

* add avalanche

* add dexalot

* add tests

* delete old

* tests

* avax

* tests

---------

Co-authored-by: Austin <austin@flipsidecrypto.com>
This commit is contained in:
Sam 2025-01-31 00:01:11 +08:00 committed by GitHub
parent 56355d3d72
commit ffa43f3a23
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
51 changed files with 1207 additions and 1843 deletions

View File

@ -1,50 +0,0 @@
name: dbt_run_traces_fix
run-name: dbt_run_traces_fix
on:
workflow_dispatch:
inputs:
use_xl_env:
description: "Use the 2xl environment"
type: boolean
schedule:
# every 15 minutes (see https://crontab.guru)
- cron: "*/15 * * * *"
env:
DBT_PROFILES_DIR: ./
ACCOUNT: "${{ vars.ACCOUNT }}"
ROLE: "${{ vars.ROLE }}"
USER: "${{ vars.USER }}"
PASSWORD: "${{ secrets.PASSWORD }}"
REGION: "${{ vars.REGION }}"
DATABASE: "${{ vars.DATABASE }}"
WAREHOUSE: "${{ vars.WAREHOUSE }}"
SCHEMA: "${{ vars.SCHEMA }}"
concurrency:
group: ${{ github.workflow }}
jobs:
run_dbt_jobs:
runs-on: ubuntu-latest
environment:
name: ${{ github.event_name == 'workflow_dispatch' && inputs.use_xl_env && 'workflow_prod_2xl' || 'workflow_prod_backfill' }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.10"
cache: "pip"
- name: install dependencies
run: |
pip install -r requirements.txt
dbt deps
- name: run traces fix model
run: |
dbt run -m "avalanche_models,tag:traces_fix"

View File

@ -1,50 +0,0 @@
name: dbt_run_traces_fix
run-name: dbt_run_traces_fix
on:
workflow_dispatch:
inputs:
use_xl_env:
description: "Use the 2xl environment"
type: boolean
schedule:
# every 15 minutes (see https://crontab.guru)
- cron: "*/15 * * * *"
env:
DBT_PROFILES_DIR: ./
ACCOUNT: "${{ vars.ACCOUNT }}"
ROLE: "${{ vars.ROLE }}"
USER: "${{ vars.USER }}"
PASSWORD: "${{ secrets.PASSWORD }}"
REGION: "${{ vars.REGION }}"
DATABASE: "${{ vars.DATABASE }}"
WAREHOUSE: "${{ vars.WAREHOUSE }}"
SCHEMA: "${{ vars.SCHEMA }}"
concurrency:
group: ${{ github.workflow }}
jobs:
run_dbt_jobs:
runs-on: ubuntu-latest
environment:
name: ${{ github.event_name == 'workflow_dispatch' && inputs.use_xl_env && 'workflow_prod_2xl' || 'workflow_prod_backfill' }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.10"
cache: "pip"
- name: install dependencies
run: |
pip install -r requirements.txt
dbt deps
- name: run traces fix model
run: |
dbt run -m "avalanche_models,tag:traces_fix_dexalot"

View File

@ -3,10 +3,17 @@
incremental_strategy = 'delete+insert',
unique_key = ['block_number'],
cluster_by = "block_timestamp::date",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION ON EQUALITY(block_number,tx_hash,from_address,to_address,trace_address,type,identifier), SUBSTRING(input,output,type,trace_address,identifier,error_reason,revert_reason)",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION ON EQUALITY(block_number,tx_hash,from_address,to_address,trace_address,type), SUBSTRING(input,output,type,trace_address,error_reason,revert_reason)",
full_refresh = false,
tags = ['dexalot_non_realtime']
) }}
{# {{ fsc_evm.gold_traces_v2(
full_reload_start_block = 25000000,
full_reload_blocks = 2000000,
schema_name = 'silver_dexalot',
uses_tx_status = TRUE
) }}
#}
WITH silver_traces AS (
SELECT
@ -23,17 +30,13 @@ WITH silver_traces AS (
WHERE
1 = 1
{% if is_incremental() and not var(
'RELOAD_TRACES',
) %}
{% if is_incremental() and not full_reload_mode %}
AND modified_timestamp > (
SELECT
MAX(modified_timestamp)
FROM
{{ this }}
) {% elif is_incremental() and var(
'RELOAD_TRACES',
) %}
) {% elif is_incremental() and full_reload_mode %}
AND block_number BETWEEN (
SELECT
MAX(
@ -46,12 +49,12 @@ AND (
SELECT
MAX(
block_number
) + 1000000
) + 2000000
FROM
{{ this }}
)
{% else %}
AND block_number <= 23000000
AND block_number <= 25000000
{% endif %}
),
sub_traces AS (
@ -112,7 +115,8 @@ trace_index_sub_traces AS (
number_array ASC
) - 1 AS trace_index,
b.trace_json,
b.traces_id
b.traces_id,
b.source
FROM
silver_traces b
LEFT JOIN sub_traces s
@ -148,9 +152,10 @@ error_logic AS (
LEFT JOIN errored_traces b1
ON b0.block_number = b1.block_number
AND b0.tx_position = b1.tx_position
AND b0.trace_address LIKE CONCAT(
AND b0.trace_address RLIKE CONCAT(
'^',
b1.trace_address,
'_%'
'(_[0-9]+)*$'
)
LEFT JOIN errored_traces b2
ON b0.block_number = b2.block_number
@ -182,7 +187,6 @@ aggregated_errors AS (
sub_traces,
number_array,
trace_index,
trace_json AS DATA,
trace_succeeded,
trace_json :error :: STRING AS error_reason,
trace_json :revertReason :: STRING AS revert_reason,
@ -212,11 +216,6 @@ aggregated_errors AS (
trace_json :input :: STRING AS input,
trace_json :output :: STRING AS output,
trace_json :type :: STRING AS TYPE,
concat_ws(
'_',
TYPE,
trace_address
) AS identifier,
traces_id
FROM
trace_index_sub_traces
@ -234,11 +233,10 @@ aggregated_errors AS (
t.origin_function_signature,
t.from_address AS origin_from_address,
t.to_address AS origin_to_address,
t.tx_status AS tx_succeeded,
f.tx_position,
f.trace_index,
f.from_address,
f.to_address,
f.from_address AS from_address,
f.to_address AS to_address,
f.value_hex,
f.value_precise_raw,
f.value_precise,
@ -248,14 +246,13 @@ aggregated_errors AS (
f.input,
f.output,
f.type,
f.identifier,
f.sub_traces,
f.error_reason,
f.revert_reason,
f.data,
f.traces_id,
f.trace_succeeded,
f.trace_address
f.trace_address,
t.tx_status AS tx_succeeded
FROM
json_traces f
LEFT OUTER JOIN {{ ref('silver_dexalot__transactions') }}
@ -263,9 +260,7 @@ aggregated_errors AS (
ON f.tx_position = t.position
AND f.block_number = t.block_number
{% if is_incremental() and not var(
'RELOAD_TRACES',
) %}
{% if is_incremental() and not full_reload_mode %}
AND t.modified_timestamp >= (
SELECT
DATEADD('hour', -24, MAX(modified_timestamp))
@ -275,6 +270,14 @@ AND t.modified_timestamp >= (
)
{% if is_incremental() %},
overflow_blocks AS (
SELECT
DISTINCT block_number
FROM
silver_traces
WHERE
source = 'overflow'
),
heal_missing_data AS (
SELECT
t.block_number,
@ -283,7 +286,6 @@ heal_missing_data AS (
txs.origin_function_signature,
txs.from_address AS origin_from_address,
txs.to_address AS origin_to_address,
txs.tx_status AS tx_succeeded,
t.tx_position,
t.trace_index,
t.from_address,
@ -297,17 +299,17 @@ heal_missing_data AS (
t.input,
t.output,
t.type,
t.identifier,
t.sub_traces,
t.error_reason,
t.revert_reason,
t.fact_traces_id AS traces_id,
t.trace_succeeded,
t.trace_address
t.trace_address,
txs.tx_status AS tx_succeeded
FROM
{{ this }}
t
INNER JOIN {{ ref('silver_dexalot__transactions') }}
JOIN {{ ref('silver_dexalot__transactions') }}
txs
ON t.tx_position = txs.position
AND t.block_number = txs.block_number
@ -325,7 +327,6 @@ all_traces AS (
origin_function_signature,
origin_from_address,
origin_to_address,
tx_succeeded,
tx_position,
trace_index,
from_address,
@ -339,13 +340,12 @@ all_traces AS (
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
traces_id,
trace_succeeded,
trace_address
trace_address,
tx_succeeded
FROM
incremental_traces
@ -358,7 +358,6 @@ SELECT
origin_function_signature,
origin_from_address,
origin_to_address,
tx_succeeded,
tx_position,
trace_index,
from_address,
@ -372,15 +371,44 @@ SELECT
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
traces_id,
trace_succeeded,
trace_address
trace_address,
tx_succeeded
FROM
heal_missing_data
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
sub_traces,
error_reason,
revert_reason,
trace_succeeded,
trace_address,
tx_succeeded
FROM
{{ this }}
JOIN overflow_blocks USING (block_number)
{% endif %}
)
SELECT
@ -388,28 +416,27 @@ SELECT
block_timestamp,
tx_hash,
tx_position,
origin_function_signature,
origin_from_address,
origin_to_address,
trace_index,
from_address,
to_address,
input,
output,
TYPE,
trace_address,
sub_traces,
VALUE,
value_precise_raw,
value_precise,
value_hex,
gas,
gas_used,
input,
output,
tx_succeeded,
origin_from_address,
origin_to_address,
origin_function_signature,
trace_succeeded,
error_reason,
revert_reason,
sub_traces,
TYPE,
trace_address,
identifier,
tx_succeeded,
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }} AS fact_traces_id,

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('dexalot__fact_blocks') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('dexalot__fact_event_logs') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('dexalot__fact_traces') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('dexalot__fact_transactions') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -1,448 +0,0 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = ['block_number'],
cluster_by = "block_timestamp::date",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION ON EQUALITY(block_number,tx_hash,from_address,to_address,trace_address,type), SUBSTRING(input,output,type,trace_address,error_reason,revert_reason)",
full_refresh = false,
tags = ['dexalot_non_realtime']
) }}
{# {{ fsc_evm.gold_traces_v2(
full_reload_start_block = 25000000,
full_reload_blocks = 2000000,
schema_name = 'silver_dexalot',
uses_tx_status = TRUE
) }}
#}
WITH silver_traces AS (
SELECT
block_number,
tx_position,
trace_address,
parent_trace_address,
trace_address_array,
trace_json,
traces_id,
'regular' AS source
FROM
{{ ref('silver_dexalot__traces') }}
WHERE
1 = 1
{% if is_incremental() and not full_reload_mode %}
AND modified_timestamp > (
SELECT
MAX(modified_timestamp)
FROM
{{ this }}
) {% elif is_incremental() and full_reload_mode %}
AND block_number BETWEEN (
SELECT
MAX(
block_number
)
FROM
{{ this }}
)
AND (
SELECT
MAX(
block_number
) + 2000000
FROM
{{ this }}
)
{% else %}
AND block_number <= 25000000
{% endif %}
),
sub_traces AS (
SELECT
block_number,
tx_position,
parent_trace_address,
COUNT(*) AS sub_traces
FROM
silver_traces
GROUP BY
block_number,
tx_position,
parent_trace_address
),
trace_index_array AS (
SELECT
block_number,
tx_position,
trace_address,
ARRAY_AGG(flat_value) AS number_array
FROM
(
SELECT
block_number,
tx_position,
trace_address,
IFF(
VALUE :: STRING = 'ORIGIN',
-1,
VALUE :: INT
) AS flat_value
FROM
silver_traces,
LATERAL FLATTEN (
input => trace_address_array
)
)
GROUP BY
block_number,
tx_position,
trace_address
),
trace_index_sub_traces AS (
SELECT
b.block_number,
b.tx_position,
b.trace_address,
IFNULL(
sub_traces,
0
) AS sub_traces,
number_array,
ROW_NUMBER() over (
PARTITION BY b.block_number,
b.tx_position
ORDER BY
number_array ASC
) - 1 AS trace_index,
b.trace_json,
b.traces_id,
b.source
FROM
silver_traces b
LEFT JOIN sub_traces s
ON b.block_number = s.block_number
AND b.tx_position = s.tx_position
AND b.trace_address = s.parent_trace_address
JOIN trace_index_array n
ON b.block_number = n.block_number
AND b.tx_position = n.tx_position
AND b.trace_address = n.trace_address
),
errored_traces AS (
SELECT
block_number,
tx_position,
trace_address,
trace_json
FROM
trace_index_sub_traces
WHERE
trace_json :error :: STRING IS NOT NULL
),
error_logic AS (
SELECT
b0.block_number,
b0.tx_position,
b0.trace_address,
b0.trace_json :error :: STRING AS error,
b1.trace_json :error :: STRING AS any_error,
b2.trace_json :error :: STRING AS origin_error
FROM
trace_index_sub_traces b0
LEFT JOIN errored_traces b1
ON b0.block_number = b1.block_number
AND b0.tx_position = b1.tx_position
AND b0.trace_address RLIKE CONCAT(
'^',
b1.trace_address,
'(_[0-9]+)*$'
)
LEFT JOIN errored_traces b2
ON b0.block_number = b2.block_number
AND b0.tx_position = b2.tx_position
AND b2.trace_address = 'ORIGIN'
),
aggregated_errors AS (
SELECT
block_number,
tx_position,
trace_address,
error,
IFF(MAX(any_error) IS NULL
AND error IS NULL
AND origin_error IS NULL, TRUE, FALSE) AS trace_succeeded
FROM
error_logic
GROUP BY
block_number,
tx_position,
trace_address,
error,
origin_error),
json_traces AS (
SELECT
block_number,
tx_position,
trace_address,
sub_traces,
number_array,
trace_index,
trace_succeeded,
trace_json :error :: STRING AS error_reason,
trace_json :revertReason :: STRING AS revert_reason,
trace_json :from :: STRING AS from_address,
trace_json :to :: STRING AS to_address,
IFNULL(
trace_json :value :: STRING,
'0x0'
) AS value_hex,
IFNULL(
utils.udf_hex_to_int(
trace_json :value :: STRING
),
'0'
) AS value_precise_raw,
utils.udf_decimal_adjust(
value_precise_raw,
18
) AS value_precise,
value_precise :: FLOAT AS VALUE,
utils.udf_hex_to_int(
trace_json :gas :: STRING
) :: INT AS gas,
utils.udf_hex_to_int(
trace_json :gasUsed :: STRING
) :: INT AS gas_used,
trace_json :input :: STRING AS input,
trace_json :output :: STRING AS output,
trace_json :type :: STRING AS TYPE,
traces_id
FROM
trace_index_sub_traces
JOIN aggregated_errors USING (
block_number,
tx_position,
trace_address
)
),
incremental_traces AS (
SELECT
f.block_number,
t.tx_hash,
t.block_timestamp,
t.origin_function_signature,
t.from_address AS origin_from_address,
t.to_address AS origin_to_address,
f.tx_position,
f.trace_index,
f.from_address AS from_address,
f.to_address AS to_address,
f.value_hex,
f.value_precise_raw,
f.value_precise,
f.value,
f.gas,
f.gas_used,
f.input,
f.output,
f.type,
f.sub_traces,
f.error_reason,
f.revert_reason,
f.traces_id,
f.trace_succeeded,
f.trace_address,
t.tx_status AS tx_succeeded
FROM
json_traces f
LEFT OUTER JOIN {{ ref('silver_dexalot__transactions') }}
t
ON f.tx_position = t.position
AND f.block_number = t.block_number
{% if is_incremental() and not full_reload_mode %}
AND t.modified_timestamp >= (
SELECT
DATEADD('hour', -24, MAX(modified_timestamp))
FROM
{{ this }})
{% endif %}
)
{% if is_incremental() %},
overflow_blocks AS (
SELECT
DISTINCT block_number
FROM
silver_traces
WHERE
source = 'overflow'
),
heal_missing_data AS (
SELECT
t.block_number,
txs.tx_hash,
txs.block_timestamp,
txs.origin_function_signature,
txs.from_address AS origin_from_address,
txs.to_address AS origin_to_address,
t.tx_position,
t.trace_index,
t.from_address,
t.to_address,
t.value_hex,
t.value_precise_raw,
t.value_precise,
t.value,
t.gas,
t.gas_used,
t.input,
t.output,
t.type,
t.sub_traces,
t.error_reason,
t.revert_reason,
t.fact_traces_id AS traces_id,
t.trace_succeeded,
t.trace_address,
txs.tx_status AS tx_succeeded
FROM
{{ this }}
t
JOIN {{ ref('silver_dexalot__transactions') }}
txs
ON t.tx_position = txs.position
AND t.block_number = txs.block_number
WHERE
t.tx_hash IS NULL
OR t.block_timestamp IS NULL
OR t.tx_succeeded IS NULL
)
{% endif %},
all_traces AS (
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
sub_traces,
error_reason,
revert_reason,
trace_succeeded,
trace_address,
tx_succeeded
FROM
incremental_traces
{% if is_incremental() %}
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
sub_traces,
error_reason,
revert_reason,
trace_succeeded,
trace_address,
tx_succeeded
FROM
heal_missing_data
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
sub_traces,
error_reason,
revert_reason,
trace_succeeded,
trace_address,
tx_succeeded
FROM
{{ this }}
JOIN overflow_blocks USING (block_number)
{% endif %}
)
SELECT
block_number,
block_timestamp,
tx_hash,
tx_position,
trace_index,
from_address,
to_address,
input,
output,
TYPE,
trace_address,
sub_traces,
VALUE,
value_precise_raw,
value_precise,
value_hex,
gas,
gas_used,
origin_from_address,
origin_to_address,
origin_function_signature,
trace_succeeded,
error_reason,
revert_reason,
tx_succeeded,
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }} AS fact_traces_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp
FROM
all_traces qualify(ROW_NUMBER() over(PARTITION BY block_number, tx_position, trace_index
ORDER BY
modified_timestamp DESC, block_timestamp DESC nulls last)) = 1

View File

@ -7,10 +7,150 @@
full_refresh = false,
tags = ['dexalot_non_realtime']
) }}
{{ fsc_evm.silver_traces_v1(
full_reload_start_block = 25000000,
full_reload_blocks = 2000000,
schema_name = 'bronze_dexalot',
use_partition_key = true
{# {{ fsc_evm.silver_traces_v1(
full_reload_start_block = 25000000,
full_reload_blocks = 2000000,
schema_name = 'bronze_dexalot',
use_partition_key = TRUE
) }}
#}
WITH bronze_traces AS (
SELECT
block_number,
partition_key,
VALUE :array_index :: INT AS tx_position,
DATA :result AS full_traces,
_inserted_timestamp
FROM
{% if is_incremental() and not full_reload_mode %}
{{ ref('bronze_dexalot__streamline_traces') }}
WHERE
_inserted_timestamp >= (
SELECT
MAX(_inserted_timestamp) _inserted_timestamp
FROM
{{ this }}
)
AND DATA :result IS NOT NULL {% elif is_incremental() and full_reload_mode %}
{{ ref('bronze_dexalot__streamline_fr_traces') }}
WHERE
partition_key BETWEEN (
SELECT
MAX(partition_key) - 100000
FROM
{{ this }}
)
AND (
SELECT
MAX(partition_key) + 2000000
FROM
{{ this }}
)
{% else %}
{{ ref('bronze_dexalot__streamline_fr_traces') }}
WHERE
partition_key <= 25000000
{% endif %}
qualify(ROW_NUMBER() over (PARTITION BY block_number, tx_position
ORDER BY
_inserted_timestamp DESC)) = 1
),
flatten_traces AS (
SELECT
block_number,
tx_position,
partition_key,
IFF(
path IN (
'result',
'result.value',
'result.type',
'result.to',
'result.input',
'result.gasUsed',
'result.gas',
'result.from',
'result.output',
'result.error',
'result.revertReason',
'result.time',
'gasUsed',
'gas',
'type',
'to',
'from',
'value',
'input',
'error',
'output',
'time',
'revertReason'
),
'ORIGIN',
REGEXP_REPLACE(REGEXP_REPLACE(path, '[^0-9]+', '_'), '^_|_$', '')
) AS trace_address,
_inserted_timestamp,
OBJECT_AGG(
key,
VALUE
) AS trace_json,
CASE
WHEN trace_address = 'ORIGIN' THEN NULL
WHEN POSITION(
'_' IN trace_address
) = 0 THEN 'ORIGIN'
ELSE REGEXP_REPLACE(
trace_address,
'_[0-9]+$',
'',
1,
1
)
END AS parent_trace_address,
SPLIT(
trace_address,
'_'
) AS trace_address_array
FROM
bronze_traces txs,
TABLE(
FLATTEN(
input => PARSE_JSON(
txs.full_traces
),
recursive => TRUE
)
) f
WHERE
f.index IS NULL
AND f.key != 'calls'
AND f.path != 'result'
GROUP BY
block_number,
tx_position,
partition_key,
trace_address,
_inserted_timestamp
)
SELECT
block_number,
tx_position,
trace_address,
parent_trace_address,
trace_address_array,
trace_json,
partition_key,
_inserted_timestamp,
{{ dbt_utils.generate_surrogate_key(
['block_number', 'tx_position', 'trace_address']
) }} AS traces_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
'{{ invocation_id }}' AS _invocation_id
FROM
flatten_traces qualify(ROW_NUMBER() over(PARTITION BY traces_id
ORDER BY
_inserted_timestamp DESC)) = 1

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver_dexalot__blocks') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver_dexalot__logs') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver_dexalot__receipts') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver_dexalot__traces') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_dexalot_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver_dexalot__transactions') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_dexalot_block_lookback") }}
)

View File

@ -1,12 +1,11 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = ["block_number", "tx_position", "trace_address"],
tags = ['traces_fix_dexalot']
{# {{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = ["block_number", "tx_position", "trace_address"],
tags = ['traces_fix_dexalot']
) }}
{% set batch_query %}
SELECT
MAX(next_batch_id) AS next_batch_id
FROM
@ -161,4 +160,4 @@ aggregated_errors AS (
prod_trace_succeeded
FROM
batch
CROSS JOIN final_errors
CROSS JOIN final_errors #}

View File

@ -1,43 +1,490 @@
{{ config(
materialized = 'view',
persist_docs ={ "relation": true,
"columns": true }
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = "block_number",
incremental_predicates = [fsc_evm.standard_predicate()],
cluster_by = "block_timestamp::date",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION",
tags = ['non_realtime'],
full_refresh = false
) }}
{# {{ fsc_evm.gold_traces_v1(
full_reload_start_block = 5000000,
full_reload_blocks = 2000000
) }}
#}
WITH silver_traces AS (
SELECT
block_number,
tx_position,
trace_address,
parent_trace_address,
trace_address_array,
trace_json,
traces_id,
'regular' AS source
FROM
{{ ref('silver__traces') }}
WHERE
1 = 1
{% if is_incremental() and not full_reload_mode %}
AND modified_timestamp > (
SELECT
MAX(modified_timestamp)
FROM
{{ this }}
) {% elif is_incremental() and full_reload_mode %}
AND block_number BETWEEN (
SELECT
MAX(
block_number
)
FROM
{{ this }}
)
AND (
SELECT
MAX(
block_number
) + 2000000
FROM
{{ this }}
)
{% else %}
AND block_number <= 5000000
{% endif %}
),
sub_traces AS (
SELECT
block_number,
tx_position,
parent_trace_address,
COUNT(*) AS sub_traces
FROM
silver_traces
GROUP BY
block_number,
tx_position,
parent_trace_address
),
trace_index_array AS (
SELECT
block_number,
tx_position,
trace_address,
ARRAY_AGG(flat_value) AS number_array
FROM
(
SELECT
block_number,
tx_position,
trace_address,
IFF(
VALUE :: STRING = 'ORIGIN',
-1,
VALUE :: INT
) AS flat_value
FROM
silver_traces,
LATERAL FLATTEN (
input => trace_address_array
)
)
GROUP BY
block_number,
tx_position,
trace_address
),
trace_index_sub_traces AS (
SELECT
b.block_number,
b.tx_position,
b.trace_address,
IFNULL(
sub_traces,
0
) AS sub_traces,
number_array,
ROW_NUMBER() over (
PARTITION BY b.block_number,
b.tx_position
ORDER BY
number_array ASC
) - 1 AS trace_index,
b.trace_json,
b.traces_id,
b.source
FROM
silver_traces b
LEFT JOIN sub_traces s
ON b.block_number = s.block_number
AND b.tx_position = s.tx_position
AND b.trace_address = s.parent_trace_address
JOIN trace_index_array n
ON b.block_number = n.block_number
AND b.tx_position = n.tx_position
AND b.trace_address = n.trace_address
),
errored_traces AS (
SELECT
block_number,
tx_position,
trace_address,
trace_json
FROM
trace_index_sub_traces
WHERE
trace_json :error :: STRING IS NOT NULL
),
error_logic AS (
SELECT
b0.block_number,
b0.tx_position,
b0.trace_address,
b0.trace_json :error :: STRING AS error,
b1.trace_json :error :: STRING AS any_error,
b2.trace_json :error :: STRING AS origin_error
FROM
trace_index_sub_traces b0
LEFT JOIN errored_traces b1
ON b0.block_number = b1.block_number
AND b0.tx_position = b1.tx_position
AND b0.trace_address RLIKE CONCAT(
'^',
b1.trace_address,
'(_[0-9]+)*$'
)
LEFT JOIN errored_traces b2
ON b0.block_number = b2.block_number
AND b0.tx_position = b2.tx_position
AND b2.trace_address = 'ORIGIN'
),
aggregated_errors AS (
SELECT
block_number,
tx_position,
trace_address,
error,
IFF(MAX(any_error) IS NULL
AND error IS NULL
AND origin_error IS NULL, TRUE, FALSE) AS trace_succeeded
FROM
error_logic
GROUP BY
block_number,
tx_position,
trace_address,
error,
origin_error),
json_traces AS (
SELECT
block_number,
tx_position,
trace_address,
sub_traces,
number_array,
trace_index,
trace_json AS DATA,
trace_succeeded,
trace_json :error :: STRING AS error_reason,
trace_json :revertReason :: STRING AS revert_reason,
trace_json :from :: STRING AS from_address,
trace_json :to :: STRING AS to_address,
IFNULL(
trace_json :value :: STRING,
'0x0'
) AS value_hex,
IFNULL(
utils.udf_hex_to_int(
trace_json :value :: STRING
),
'0'
) AS value_precise_raw,
utils.udf_decimal_adjust(
value_precise_raw,
18
) AS value_precise,
value_precise :: FLOAT AS VALUE,
utils.udf_hex_to_int(
trace_json :gas :: STRING
) :: INT AS gas,
utils.udf_hex_to_int(
trace_json :gasUsed :: STRING
) :: INT AS gas_used,
trace_json :input :: STRING AS input,
trace_json :output :: STRING AS output,
trace_json :type :: STRING AS TYPE,
concat_ws(
'_',
TYPE,
trace_address
) AS identifier,
IFF(
trace_succeeded,
'SUCCESS',
'FAIL'
) AS trace_status,
traces_id
FROM
trace_index_sub_traces
JOIN aggregated_errors USING (
block_number,
tx_position,
trace_address
)
),
incremental_traces AS (
SELECT
f.block_number,
t.tx_hash,
t.block_timestamp,
t.origin_function_signature,
t.from_address AS origin_from_address,
t.to_address AS origin_to_address,
t.tx_status,
f.tx_position,
f.trace_index,
f.from_address AS from_address,
f.to_address AS to_address,
f.value_hex,
f.value_precise_raw,
f.value_precise,
f.value,
f.gas,
f.gas_used,
f.input,
f.output,
f.type,
f.identifier,
f.sub_traces,
f.error_reason,
f.revert_reason,
f.trace_status,
f.data,
f.traces_id,
f.trace_succeeded,
f.trace_address,
IFF(
t.tx_status = 'SUCCESS',
TRUE,
FALSE
) AS tx_succeeded
FROM
json_traces f
LEFT OUTER JOIN {{ ref('silver__transactions') }}
t
ON f.tx_position = t.position
AND f.block_number = t.block_number
{% if is_incremental() and not full_reload_mode %}
AND t.modified_timestamp >= (
SELECT
DATEADD('hour', -24, MAX(modified_timestamp))
FROM
{{ this }})
{% endif %}
)
{% if is_incremental() %},
overflow_blocks AS (
SELECT
DISTINCT block_number
FROM
silver_traces
WHERE
source = 'overflow'
),
heal_missing_data AS (
SELECT
t.block_number,
txs.tx_hash,
txs.block_timestamp,
txs.origin_function_signature,
txs.from_address AS origin_from_address,
txs.to_address AS origin_to_address,
txs.tx_status,
t.tx_position,
t.trace_index,
t.from_address,
t.to_address,
t.value_hex,
t.value_precise_raw,
t.value_precise,
t.value,
t.gas,
t.gas_used,
t.input,
t.output,
t.type,
t.identifier,
t.sub_traces,
t.error_reason,
t.revert_reason,
t.trace_status,
t.data,
t.fact_traces_id AS traces_id,
t.trace_succeeded,
t.trace_address,
IFF(
txs.tx_status = 'SUCCESS',
TRUE,
FALSE
) AS tx_succeeded
FROM
{{ this }}
t
JOIN {{ ref('silver__transactions') }}
txs
ON t.tx_position = txs.position
AND t.block_number = txs.block_number
WHERE
t.tx_hash IS NULL
OR t.block_timestamp IS NULL
OR t.tx_status IS NULL
)
{% endif %},
all_traces AS (
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
incremental_traces
{% if is_incremental() %}
UNION ALL
SELECT
tx_hash,
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
avax_value AS VALUE,
avax_value_precise_raw AS value_precise_raw,
avax_value_precise AS value_precise,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
heal_missing_data
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
{{ this }}
JOIN overflow_blocks USING (block_number)
{% endif %}
)
SELECT
block_number,
block_timestamp,
tx_hash,
tx_position,
trace_index,
from_address,
to_address,
input,
output,
TYPE,
trace_address,
sub_traces,
VALUE,
value_precise_raw,
value_precise,
value_hex,
gas,
gas_used,
origin_from_address,
origin_to_address,
origin_function_signature,
trace_succeeded,
error_reason,
revert_reason,
tx_succeeded,
identifier,
DATA,
tx_status,
sub_traces,
trace_status,
error_reason,
trace_index,
COALESCE (
traces_id,
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }}
) AS fact_traces_id,
COALESCE(
inserted_timestamp,
'2000-01-01'
) AS inserted_timestamp,
COALESCE(
modified_timestamp,
'2000-01-01'
) AS modified_timestamp
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }} AS fact_traces_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp
FROM
{{ ref('silver__traces') }}
all_traces qualify(ROW_NUMBER() over(PARTITION BY block_number, tx_position, trace_index
ORDER BY
modified_timestamp DESC, block_timestamp DESC nulls last)) = 1

View File

@ -1,25 +1,37 @@
version: 2
models:
- name: core__fact_traces
description: '{{ doc("avax_traces_table_doc") }}'
description: '{{ doc("avax_traces_table_doc") }}'
columns:
- name: BLOCK_NUMBER
description: '{{ doc("avax_traces_block_no") }}'
description: '{{ doc("avax_traces_block_no") }}'
- name: BLOCK_TIMESTAMP
description: '{{ doc("avax_traces_blocktime") }}'
- name: TX_HASH
description: '{{ doc("avax_traces_tx_hash") }}'
description: '{{ doc("avax_traces_tx_hash") }}'
- name: TX_POSITION
description: The position of the transaction within the block.
- name: TRACE_INDEX
description: The index of the trace within the transaction.
- name: FROM_ADDRESS
description: '{{ doc("avax_traces_from") }}'
- name: TO_ADDRESS
description: '{{ doc("avax_traces_to") }}'
- name: AVAX_VALUE
description: '{{ doc("amount_deprecation") }}'
- name: AVAX_VALUE_PRECISE_RAW
description: '{{ doc("amount_deprecation") }}'
- name: AVAX_VALUE_PRECISE
description: '{{ doc("amount_deprecation") }}'
- name: INPUT
description: '{{ doc("avax_traces_input") }}'
- name: OUTPUT
description: '{{ doc("avax_traces_output") }}'
- name: TYPE
description: '{{ doc("avax_traces_type") }}'
- name: TRACE_ADDRESS
description: This field represents the position of the trace within the transaction.
- name: SUB_TRACES
description: '{{ doc("avax_traces_sub") }}'
- name: IDENTIFIER
description: '{{ doc("avax_traces_identifier") }}'
- name: DATA
description: '{{ doc("avax_traces_call_data") }}'
- name: VALUE
description: '{{ doc("avax_traces_value") }}'
- name: VALUE_PRECISE_RAW
@ -30,33 +42,27 @@ models:
description: '{{ doc("avax_traces_gas") }}'
- name: GAS_USED
description: '{{ doc("avax_traces_gas_used") }}'
- name: INPUT
description: '{{ doc("avax_traces_input") }}'
- name: OUTPUT
description: '{{ doc("avax_traces_output") }}'
- name: TYPE
description: '{{ doc("avax_traces_type") }}'
- name: IDENTIFIER
description: '{{ doc("avax_traces_identifier") }}'
- name: DATA
description: '{{ doc("avax_traces_call_data") }}'
- name: TX_STATUS
description: '{{ doc("avax_tx_status") }}'
- name: SUB_TRACES
description: '{{ doc("avax_traces_sub") }}'
- name: ORIGIN_FROM_ADDRESS
description: The from address at the transaction level.
- name: ORIGIN_TO_ADDRESS
description: The to address at the transaction level.
- name: ORIGIN_FUNCTION_SIGNATURE
description: The function signature at the transaction level.
- name: TRACE_STATUS
description: The status of the trace, either `SUCCESS` or `FAIL`
- name: TRACE_SUCCEEDED
description: Whether the trace succeeded or failed
- name: ERROR_REASON
description: The reason for the trace failure, if any.
- name: TRACE_INDEX
description: The index of the trace within the transaction.
- name: REVERT_REASON
description: The reason for the trace revert, if any.
- name: TX_STATUS
description: '{{ doc("avax_tx_status") }}'
- name: TX_SUCCEEDED
description: Whether the transaction succeeded or failed
- name: FACT_TRACES_ID
description: '{{ doc("pk") }}'
description: '{{ doc("pk") }}'
- name: INSERTED_TIMESTAMP
description: '{{ doc("inserted_timestamp") }}'
description: '{{ doc("inserted_timestamp") }}'
- name: MODIFIED_TIMESTAMP
description: '{{ doc("modified_timestamp") }}'
description: '{{ doc("modified_timestamp") }}'

View File

@ -0,0 +1,9 @@
{{ config (
materialized = "view",
tags = ['full_test']
) }}
SELECT
*
FROM
{{ ref('core__fact_traces') }}

View File

@ -0,0 +1,120 @@
version: 2
models:
- name: test_gold__fact_traces_full
description: "This is a view used to test all of the gold fact traces model."
tests:
- dbt_utils.unique_combination_of_columns:
combination_of_columns:
- TX_HASH
- TRACE_INDEX
- fsc_utils.sequence_gaps:
partition_by:
- TX_HASH
column_name: TRACE_INDEX
where: BLOCK_TIMESTAMP < CURRENT_DATE - 1 AND TX_HASH IS NOT NULL
columns:
- name: BLOCK_NUMBER
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: BLOCK_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: TX_HASH
tests:
- not_null
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: TX_POSITION
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: TRACE_INDEX
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: FROM_ADDRESS
tests:
- not_null:
where: TYPE <> 'SELFDESTRUCT'
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: TO_ADDRESS
tests:
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
where: TO_ADDRESS IS NOT NULL
- name: INPUT
tests:
- not_null
- name: TYPE
tests:
- not_null
- name: TRACE_ADDRESS
tests:
- not_null
- name: SUB_TRACES
tests:
- not_null
- name: VALUE
tests:
- not_null
- name: VALUE_PRECISE_RAW
tests:
- not_null
- name: VALUE_PRECISE
tests:
- not_null
- name: VALUE_HEX
tests:
- not_null
- name: GAS
tests:
- not_null
- name: GAS_USED
tests:
- not_null
- name: ORIGIN_FROM_ADDRESS
tests:
- not_null
- name: ORIGIN_FUNCTION_SIGNATURE
tests:
- not_null
- name: TRACE_SUCCEEDED
tests:
- not_null
- name: TX_SUCCEEDED
tests:
- not_null
- name: FACT_TRACES_ID
tests:
- not_null
- name: INSERTED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- name: MODIFIED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2

View File

@ -0,0 +1,16 @@
{{ config (
materialized = "view",
tags = ['recent_test']
) }}
SELECT
*
FROM
{{ ref('core__fact_traces') }}
WHERE
block_number > (
SELECT
block_number
FROM
{{ ref('_block_lookback') }}
)

View File

@ -0,0 +1,120 @@
version: 2
models:
- name: test_gold__fact_traces_recent
description: "This is a view used to test the last three days of fact traces."
tests:
- dbt_utils.unique_combination_of_columns:
combination_of_columns:
- TX_HASH
- TRACE_INDEX
- fsc_utils.sequence_gaps:
partition_by:
- TX_HASH
column_name: TRACE_INDEX
where: TX_HASH IS NOT NULL
columns:
- name: BLOCK_NUMBER
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: BLOCK_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: TX_HASH
tests:
- not_null
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: TX_POSITION
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: TRACE_INDEX
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: FROM_ADDRESS
tests:
- not_null:
where: TYPE <> 'SELFDESTRUCT'
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: TO_ADDRESS
tests:
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
where: TO_ADDRESS IS NOT NULL
- name: INPUT
tests:
- not_null
- name: TYPE
tests:
- not_null
- name: TRACE_ADDRESS
tests:
- not_null
- name: SUB_TRACES
tests:
- not_null
- name: VALUE
tests:
- not_null
- name: VALUE_PRECISE_RAW
tests:
- not_null
- name: VALUE_PRECISE
tests:
- not_null
- name: VALUE_HEX
tests:
- not_null
- name: GAS
tests:
- not_null
- name: GAS_USED
tests:
- not_null
- name: ORIGIN_FROM_ADDRESS
tests:
- not_null
- name: ORIGIN_FUNCTION_SIGNATURE
tests:
- not_null
- name: TRACE_SUCCEEDED
tests:
- not_null
- name: TX_SUCCEEDED
tests:
- not_null
- name: FACT_TRACES_ID
tests:
- not_null
- name: INSERTED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- name: MODIFIED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2

View File

@ -81,7 +81,7 @@ broken_blocks AS (
FROM
{{ ref("silver__transactions") }}
tx
LEFT JOIN {{ ref("silver__traces") }}
LEFT JOIN {{ ref("core__fact_traces") }}
tr USING (
block_number,
tx_hash

View File

@ -11,9 +11,9 @@ WITH base AS (
from_address,
to_address,
MIN(block_number) AS start_block,
MAX(_inserted_timestamp) AS _inserted_timestamp
MAX(modified_timestamp) AS _inserted_timestamp
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
TYPE = 'DELEGATECALL'
AND trace_status = 'SUCCESS'
@ -21,7 +21,7 @@ WITH base AS (
AND from_address != to_address -- exclude self-calls
{% if is_incremental() %}
AND _inserted_timestamp >= (
AND modified_timestamp >= (
SELECT
MAX(_inserted_timestamp) - INTERVAL '24 hours'
FROM
@ -99,5 +99,6 @@ FROM
FINAL f
JOIN {{ ref('silver__created_contracts') }} C
ON f.contract_address = C.created_contract_address
JOIN {{ ref('silver__created_contracts') }} p
ON f.proxy_address = p.created_contract_address
JOIN {{ ref('silver__created_contracts') }}
p
ON f.proxy_address = p.created_contract_address

View File

@ -473,7 +473,7 @@ valid_traces AS (
SELECT
base_address
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
JOIN function_mapping
ON function_sig = LEFT(
input,

View File

@ -13,7 +13,7 @@ SELECT
to_address AS created_contract_address,
from_address AS creator_address,
input AS created_contract_input,
_inserted_timestamp,
modified_timestamp AS _inserted_timestamp,
{{ dbt_utils.generate_surrogate_key(
['to_address']
) }} AS created_contracts_id,
@ -21,7 +21,7 @@ SELECT
SYSDATE() AS modified_timestamp,
'{{ invocation_id }}' AS _invocation_id
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
TYPE ILIKE 'create%'
AND to_address IS NOT NULL

View File

@ -1,490 +0,0 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = "block_number",
incremental_predicates = [fsc_evm.standard_predicate()],
cluster_by = "block_timestamp::date",
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION",
tags = ['non_realtime'],
full_refresh = false
) }}
{# {{ fsc_evm.gold_traces_v1(
full_reload_start_block = 5000000,
full_reload_blocks = 2000000
) }}
#}
WITH silver_traces AS (
SELECT
block_number,
tx_position,
trace_address,
parent_trace_address,
trace_address_array,
trace_json,
traces_id,
'regular' AS source
FROM
{{ ref('silver__traces2') }}
WHERE
1 = 1
{% if is_incremental() and not full_reload_mode %}
AND modified_timestamp > (
SELECT
MAX(modified_timestamp)
FROM
{{ this }}
) {% elif is_incremental() and full_reload_mode %}
AND block_number BETWEEN (
SELECT
MAX(
block_number
)
FROM
{{ this }}
)
AND (
SELECT
MAX(
block_number
) + 2000000
FROM
{{ this }}
)
{% else %}
AND block_number <= 5000000
{% endif %}
),
sub_traces AS (
SELECT
block_number,
tx_position,
parent_trace_address,
COUNT(*) AS sub_traces
FROM
silver_traces
GROUP BY
block_number,
tx_position,
parent_trace_address
),
trace_index_array AS (
SELECT
block_number,
tx_position,
trace_address,
ARRAY_AGG(flat_value) AS number_array
FROM
(
SELECT
block_number,
tx_position,
trace_address,
IFF(
VALUE :: STRING = 'ORIGIN',
-1,
VALUE :: INT
) AS flat_value
FROM
silver_traces,
LATERAL FLATTEN (
input => trace_address_array
)
)
GROUP BY
block_number,
tx_position,
trace_address
),
trace_index_sub_traces AS (
SELECT
b.block_number,
b.tx_position,
b.trace_address,
IFNULL(
sub_traces,
0
) AS sub_traces,
number_array,
ROW_NUMBER() over (
PARTITION BY b.block_number,
b.tx_position
ORDER BY
number_array ASC
) - 1 AS trace_index,
b.trace_json,
b.traces_id,
b.source
FROM
silver_traces b
LEFT JOIN sub_traces s
ON b.block_number = s.block_number
AND b.tx_position = s.tx_position
AND b.trace_address = s.parent_trace_address
JOIN trace_index_array n
ON b.block_number = n.block_number
AND b.tx_position = n.tx_position
AND b.trace_address = n.trace_address
),
errored_traces AS (
SELECT
block_number,
tx_position,
trace_address,
trace_json
FROM
trace_index_sub_traces
WHERE
trace_json :error :: STRING IS NOT NULL
),
error_logic AS (
SELECT
b0.block_number,
b0.tx_position,
b0.trace_address,
b0.trace_json :error :: STRING AS error,
b1.trace_json :error :: STRING AS any_error,
b2.trace_json :error :: STRING AS origin_error
FROM
trace_index_sub_traces b0
LEFT JOIN errored_traces b1
ON b0.block_number = b1.block_number
AND b0.tx_position = b1.tx_position
AND b0.trace_address RLIKE CONCAT(
'^',
b1.trace_address,
'(_[0-9]+)*$'
)
LEFT JOIN errored_traces b2
ON b0.block_number = b2.block_number
AND b0.tx_position = b2.tx_position
AND b2.trace_address = 'ORIGIN'
),
aggregated_errors AS (
SELECT
block_number,
tx_position,
trace_address,
error,
IFF(MAX(any_error) IS NULL
AND error IS NULL
AND origin_error IS NULL, TRUE, FALSE) AS trace_succeeded
FROM
error_logic
GROUP BY
block_number,
tx_position,
trace_address,
error,
origin_error),
json_traces AS (
SELECT
block_number,
tx_position,
trace_address,
sub_traces,
number_array,
trace_index,
trace_json AS DATA,
trace_succeeded,
trace_json :error :: STRING AS error_reason,
trace_json :revertReason :: STRING AS revert_reason,
trace_json :from :: STRING AS from_address,
trace_json :to :: STRING AS to_address,
IFNULL(
trace_json :value :: STRING,
'0x0'
) AS value_hex,
IFNULL(
utils.udf_hex_to_int(
trace_json :value :: STRING
),
'0'
) AS value_precise_raw,
utils.udf_decimal_adjust(
value_precise_raw,
18
) AS value_precise,
value_precise :: FLOAT AS VALUE,
utils.udf_hex_to_int(
trace_json :gas :: STRING
) :: INT AS gas,
utils.udf_hex_to_int(
trace_json :gasUsed :: STRING
) :: INT AS gas_used,
trace_json :input :: STRING AS input,
trace_json :output :: STRING AS output,
trace_json :type :: STRING AS TYPE,
concat_ws(
'_',
TYPE,
trace_address
) AS identifier,
IFF(
trace_succeeded,
'SUCCESS',
'FAIL'
) AS trace_status,
traces_id
FROM
trace_index_sub_traces
JOIN aggregated_errors USING (
block_number,
tx_position,
trace_address
)
),
incremental_traces AS (
SELECT
f.block_number,
t.tx_hash,
t.block_timestamp,
t.origin_function_signature,
t.from_address AS origin_from_address,
t.to_address AS origin_to_address,
t.tx_status,
f.tx_position,
f.trace_index,
f.from_address AS from_address,
f.to_address AS to_address,
f.value_hex,
f.value_precise_raw,
f.value_precise,
f.value,
f.gas,
f.gas_used,
f.input,
f.output,
f.type,
f.identifier,
f.sub_traces,
f.error_reason,
f.revert_reason,
f.trace_status,
f.data,
f.traces_id,
f.trace_succeeded,
f.trace_address,
IFF(
t.tx_status = 'SUCCESS',
TRUE,
FALSE
) AS tx_succeeded
FROM
json_traces f
LEFT OUTER JOIN {{ ref('silver__transactions') }}
t
ON f.tx_position = t.position
AND f.block_number = t.block_number
{% if is_incremental() and not full_reload_mode %}
AND t.modified_timestamp >= (
SELECT
DATEADD('hour', -24, MAX(modified_timestamp))
FROM
{{ this }})
{% endif %}
)
{% if is_incremental() %},
overflow_blocks AS (
SELECT
DISTINCT block_number
FROM
silver_traces
WHERE
source = 'overflow'
),
heal_missing_data AS (
SELECT
t.block_number,
txs.tx_hash,
txs.block_timestamp,
txs.origin_function_signature,
txs.from_address AS origin_from_address,
txs.to_address AS origin_to_address,
txs.tx_status,
t.tx_position,
t.trace_index,
t.from_address,
t.to_address,
t.value_hex,
t.value_precise_raw,
t.value_precise,
t.value,
t.gas,
t.gas_used,
t.input,
t.output,
t.type,
t.identifier,
t.sub_traces,
t.error_reason,
t.revert_reason,
t.trace_status,
t.data,
t.fact_traces_id AS traces_id,
t.trace_succeeded,
t.trace_address,
IFF(
txs.tx_status = 'SUCCESS',
TRUE,
FALSE
) AS tx_succeeded
FROM
{{ this }}
t
JOIN {{ ref('silver__transactions') }}
txs
ON t.tx_position = txs.position
AND t.block_number = txs.block_number
WHERE
t.tx_hash IS NULL
OR t.block_timestamp IS NULL
OR t.tx_status IS NULL
)
{% endif %},
all_traces AS (
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
incremental_traces
{% if is_incremental() %}
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
heal_missing_data
UNION ALL
SELECT
block_number,
tx_hash,
block_timestamp,
origin_function_signature,
origin_from_address,
origin_to_address,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
value_hex,
value_precise_raw,
value_precise,
VALUE,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
revert_reason,
trace_status,
DATA,
trace_succeeded,
trace_address,
tx_succeeded
FROM
{{ this }}
JOIN overflow_blocks USING (block_number)
{% endif %}
)
SELECT
block_number,
block_timestamp,
tx_hash,
tx_position,
trace_index,
from_address,
to_address,
input,
output,
TYPE,
trace_address,
sub_traces,
VALUE,
value_precise_raw,
value_precise,
value_hex,
gas,
gas_used,
origin_from_address,
origin_to_address,
origin_function_signature,
trace_succeeded,
error_reason,
revert_reason,
tx_succeeded,
identifier,
DATA,
tx_status,
trace_status,
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }} AS fact_traces_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp
FROM
all_traces qualify(ROW_NUMBER() over(PARTITION BY block_number, tx_position, trace_index
ORDER BY
modified_timestamp DESC, block_timestamp DESC nulls last)) = 1

View File

@ -15,17 +15,26 @@ WITH avax_base AS (
block_timestamp,
from_address,
to_address,
avax_value,
VALUE AS avax_value,
identifier,
_call_id,
concat_ws(
'-',
block_number,
tx_position,
CONCAT(
TYPE,
'_',
trace_address
)
) AS _call_id,
input,
_INSERTED_TIMESTAMP,
avax_value_precise_raw,
avax_value_precise,
modified_timestamp AS _INSERTED_TIMESTAMP,
value_precise_raw AS avax_value_precise_raw,
value_precise AS avax_value_precise,
tx_position,
trace_index
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
avax_value > 0
AND tx_status = 'SUCCESS'

View File

@ -31,10 +31,10 @@ function_calls AS (
SELECT
to_address AS contract_address,
COUNT(*) AS function_call_count,
MAX(_inserted_timestamp) AS max_inserted_timestamp_traces,
MAX(modified_timestamp) AS max_inserted_timestamp_traces,
MAX(block_number) AS latest_call_block
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
tx_status = 'SUCCESS'
AND trace_status = 'SUCCESS'
@ -43,7 +43,7 @@ function_calls AS (
AND input <> '0x'
{% if is_incremental() %}
AND _inserted_timestamp > (
AND modified_timestamp > (
SELECT
MAX(max_inserted_timestamp_traces)
FROM

View File

@ -3,393 +3,13 @@
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = "block_number",
cluster_by = "block_timestamp::date, _inserted_timestamp::date",
cluster_by = ['modified_timestamp::DATE','partition_key'],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION",
full_refresh = false,
tags = ['non_realtime']
) }}
WITH bronze_traces AS (
SELECT
block_number,
VALUE :array_index :: INT AS tx_position,
DATA :result AS full_traces,
_inserted_timestamp
FROM
{% if is_incremental() %}
{{ ref('bronze__traces') }}
WHERE
_inserted_timestamp >= (
SELECT
MAX(_inserted_timestamp) _inserted_timestamp
FROM
{{ this }}
)
AND DATA :result IS NOT NULL
{% else %}
{{ ref('bronze__traces_fr') }}
WHERE
_partition_by_block_id <= 5000000
AND DATA :result IS NOT NULL
{% endif %}
qualify(ROW_NUMBER() over (PARTITION BY block_number, tx_position
ORDER BY
_inserted_timestamp DESC)) = 1
),
flatten_traces AS (
SELECT
block_number,
tx_position,
IFF(
path IN (
'result',
'result.value',
'result.type',
'result.to',
'result.input',
'result.gasUsed',
'result.gas',
'result.from',
'result.output',
'result.error',
'result.revertReason',
'gasUsed',
'gas',
'type',
'to',
'from',
'value',
'input',
'error',
'output',
'revertReason'
),
'ORIGIN',
REGEXP_REPLACE(REGEXP_REPLACE(path, '[^0-9]+', '_'), '^_|_$', '')
) AS trace_address,
_inserted_timestamp,
OBJECT_AGG(
key,
VALUE
) AS trace_json,
CASE
WHEN trace_address = 'ORIGIN' THEN NULL
WHEN POSITION(
'_' IN trace_address
) = 0 THEN 'ORIGIN'
ELSE REGEXP_REPLACE(
trace_address,
'_[0-9]+$',
'',
1,
1
)
END AS parent_trace_address,
SPLIT(
trace_address,
'_'
) AS str_array
FROM
bronze_traces txs,
TABLE(
FLATTEN(
input => PARSE_JSON(
txs.full_traces
),
recursive => TRUE
)
) f
WHERE
f.index IS NULL
AND f.key != 'calls'
AND f.path != 'result'
GROUP BY
block_number,
tx_position,
trace_address,
_inserted_timestamp
),
sub_traces AS (
SELECT
block_number,
tx_position,
parent_trace_address,
COUNT(*) AS sub_traces
FROM
flatten_traces
GROUP BY
block_number,
tx_position,
parent_trace_address
),
num_array AS (
SELECT
block_number,
tx_position,
trace_address,
ARRAY_AGG(flat_value) AS num_array
FROM
(
SELECT
block_number,
tx_position,
trace_address,
IFF(
VALUE :: STRING = 'ORIGIN',
-1,
VALUE :: INT
) AS flat_value
FROM
flatten_traces,
LATERAL FLATTEN (
input => str_array
)
)
GROUP BY
block_number,
tx_position,
trace_address
),
cleaned_traces AS (
SELECT
b.block_number,
b.tx_position,
b.trace_address,
IFNULL(
sub_traces,
0
) AS sub_traces,
num_array,
ROW_NUMBER() over (
PARTITION BY b.block_number,
b.tx_position
ORDER BY
num_array ASC
) - 1 AS trace_index,
trace_json,
b._inserted_timestamp
FROM
flatten_traces b
LEFT JOIN sub_traces s
ON b.block_number = s.block_number
AND b.tx_position = s.tx_position
AND b.trace_address = s.parent_trace_address
JOIN num_array n
ON b.block_number = n.block_number
AND b.tx_position = n.tx_position
AND b.trace_address = n.trace_address
),
final_traces AS (
SELECT
tx_position,
trace_index,
block_number,
trace_address,
trace_json :error :: STRING AS error_reason,
trace_json :from :: STRING AS from_address,
trace_json :to :: STRING AS to_address,
IFNULL(
utils.udf_hex_to_int(
trace_json :value :: STRING
),
'0'
) AS avax_value_precise_raw,
utils.udf_decimal_adjust(
avax_value_precise_raw,
18
) AS avax_value_precise,
avax_value_precise :: FLOAT AS avax_value,
utils.udf_hex_to_int(
trace_json :gas :: STRING
) :: INT AS gas,
utils.udf_hex_to_int(
trace_json :gasUsed :: STRING
) :: INT AS gas_used,
trace_json :input :: STRING AS input,
trace_json :output :: STRING AS output,
trace_json :type :: STRING AS TYPE,
concat_ws(
'_',
TYPE,
trace_address
) AS identifier,
concat_ws(
'-',
block_number,
tx_position,
identifier
) AS _call_id,
_inserted_timestamp,
trace_json AS DATA,
sub_traces
FROM
cleaned_traces
),
new_records AS (
SELECT
f.block_number,
t.tx_hash,
t.block_timestamp,
t.tx_status,
f.tx_position,
f.trace_index,
f.from_address,
f.to_address,
f.avax_value_precise_raw,
f.avax_value_precise,
f.avax_value,
f.gas,
f.gas_used,
f.input,
f.output,
f.type,
f.identifier,
f.sub_traces,
f.error_reason,
IFF(
f.error_reason IS NULL,
'SUCCESS',
'FAIL'
) AS trace_status,
f.data,
IFF(
t.tx_hash IS NULL
OR t.block_timestamp IS NULL
OR t.tx_status IS NULL,
TRUE,
FALSE
) AS is_pending,
f._call_id,
f._inserted_timestamp
FROM
final_traces f
LEFT OUTER JOIN {{ ref('silver__transactions') }}
t
ON f.tx_position = t.position
AND f.block_number = t.block_number
{% if is_incremental() %}
AND t._INSERTED_TIMESTAMP >= (
SELECT
DATEADD('hour', -24, MAX(_inserted_timestamp))
FROM
{{ this }})
{% endif %}
)
{% if is_incremental() %},
missing_data AS (
SELECT
t.block_number,
txs.tx_hash,
txs.block_timestamp,
txs.tx_status,
t.tx_position,
t.trace_index,
t.from_address,
t.to_address,
t.avax_value_precise_raw,
t.avax_value_precise,
t.avax_value,
t.gas,
t.gas_used,
t.input,
t.output,
t.type,
t.identifier,
t.sub_traces,
t.error_reason,
t.trace_status,
t.data,
FALSE AS is_pending,
t._call_id,
GREATEST(
t._inserted_timestamp,
txs._inserted_timestamp
) AS _inserted_timestamp
FROM
{{ this }}
t
INNER JOIN {{ ref('silver__transactions') }}
txs
ON t.tx_position = txs.position
AND t.block_number = txs.block_number
WHERE
t.is_pending
)
{% endif %},
FINAL AS (
SELECT
block_number,
tx_hash,
block_timestamp,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
avax_value_precise_raw,
avax_value_precise,
avax_value,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
trace_status,
DATA,
is_pending,
_call_id,
_inserted_timestamp
FROM
new_records
{% if is_incremental() %}
UNION
SELECT
block_number,
tx_hash,
block_timestamp,
tx_status,
tx_position,
trace_index,
from_address,
to_address,
avax_value_precise_raw,
avax_value_precise,
avax_value,
gas,
gas_used,
input,
output,
TYPE,
identifier,
sub_traces,
error_reason,
trace_status,
DATA,
is_pending,
_call_id,
_inserted_timestamp
FROM
missing_data
{% endif %}
)
SELECT
*,
{{ dbt_utils.generate_surrogate_key(
['tx_hash', 'trace_index']
) }} AS traces_id,
SYSDATE() AS inserted_timestamp,
SYSDATE() AS modified_timestamp,
'{{ invocation_id }}' AS _invocation_id
FROM
FINAL qualify(ROW_NUMBER() over(PARTITION BY block_number, tx_position, trace_index
ORDER BY
_inserted_timestamp DESC, is_pending ASC)) = 1
{{ silver_traces_v1(
full_reload_start_block = 5000000,
full_reload_blocks = 3000000,
use_partition_key = true
) }}

View File

@ -1,15 +0,0 @@
-- depends_on: {{ ref('bronze__traces') }}
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = "block_number",
cluster_by = ['modified_timestamp::DATE','partition_key'],
post_hook = "ALTER TABLE {{ this }} ADD SEARCH OPTIMIZATION",
full_refresh = false,
tags = ['non_realtime']
) }}
{{ silver_traces_v1(
full_reload_start_block = 5000000,
full_reload_blocks = 3000000,
use_partition_key = true
) }}

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__blocks') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__confirmed_blocks') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__decoded_logs') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__logs') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__receipts') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -1,58 +1,52 @@
version: 2
models:
- name: test_silver__traces_full
description: "This is a view used to test all of the silver traces model."
tests:
- dbt_utils.unique_combination_of_columns:
combination_of_columns:
- BLOCK_NUMBER
- TX_POSITION
- TRACE_INDEX
- TRACES_ID
columns:
- name: BLOCK_NUMBER
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: BLOCK_TIMESTAMP
- not_null
- name: TX_POSITION
tests:
- not_null:
where: NOT IS_PENDING
- not_null
- name: TRACE_ADDRESS
tests:
- not_null
- name: TRACE_JSON
tests:
- not_null
- name: _INSERTED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: day
interval: 1
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: TX_HASH
tests:
- not_null:
where: NOT IS_PENDING
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: FROM_ADDRESS
tests:
- not_null:
where: TYPE <> 'SELFDESTRUCT'
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- name: TO_ADDRESS
tests:
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
where: TO_ADDRESS IS NOT NULL
- name: IDENTIFIER
- name: INSERTED_TIMESTAMP
tests:
- not_null
- name: AVAX_VALUE
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: MODIFIED_TIMESTAMP
tests:
- not_null
- name: GAS
tests:
- not_null
- name: GAS_USED
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__traces') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -1,34 +1,52 @@
version: 2
models:
- name: test_silver__traces_recent
description: "This is a view used to test the last three days of traces."
tests:
- dbt_utils.unique_combination_of_columns:
combination_of_columns:
- BLOCK_NUMBER
- TX_POSITION
- TRACE_INDEX
- TRACES_ID
columns:
- name: BLOCK_NUMBER
tests:
- not_null
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- NUMBER
- FLOAT
- name: BLOCK_TIMESTAMP
- not_null
- name: TX_POSITION
tests:
- not_null:
where: NOT IS_PENDING
- not_null
- name: TRACE_ADDRESS
tests:
- not_null
- name: TRACE_JSON
tests:
- not_null
- name: _INSERTED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: day
interval: 1
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: TX_HASH
- name: INSERTED_TIMESTAMP
tests:
- not_null:
where: NOT IS_PENDING
- dbt_expectations.expect_column_values_to_match_regex:
regex: 0[xX][0-9a-fA-F]+
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ
- name: MODIFIED_TIMESTAMP
tests:
- not_null
- dbt_expectations.expect_row_values_to_have_recent_data:
datepart: hour
interval: 2
- dbt_expectations.expect_column_values_to_be_in_type_list:
column_type_list:
- TIMESTAMP_LTZ
- TIMESTAMP_NTZ

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__transactions') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
{{ ref("_block_lookback") }}
)

View File

@ -3,21 +3,14 @@
tags = ['recent_test']
) }}
WITH last_3_days AS (
SELECT
block_number
FROM
{{ ref("_block_lookback") }}
)
SELECT
*
FROM
{{ ref('silver__transfers') }}
WHERE
block_number >= (
block_number > (
SELECT
block_number
FROM
last_3_days
)
{{ ref("_block_lookback") }}
)

View File

@ -1,12 +1,11 @@
{{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = ["block_number", "tx_position", "trace_address"],
tags = ['traces_fix']
{# {{ config (
materialized = "incremental",
incremental_strategy = 'delete+insert',
unique_key = ["block_number", "tx_position", "trace_address"],
tags = ['traces_fix']
) }}
{% set batch_query %}
SELECT
MAX(next_batch_id) AS next_batch_id
FROM
@ -161,4 +160,4 @@ aggregated_errors AS (
prod_trace_succeeded
FROM
batch
CROSS JOIN final_errors
CROSS JOIN final_errors #}

View File

@ -13,10 +13,19 @@ WITH base_contracts AS (
block_timestamp,
from_address,
to_address AS contract_address,
_call_id,
_inserted_timestamp
concat_ws(
'-',
block_number,
tx_position,
CONCAT(
TYPE,
'_',
trace_address
)
) AS _call_id,
modified_timestamp AS _inserted_timestamp
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
from_address = LOWER('0x808d7c71ad2ba3FA531b068a2417C63106BC0949')
AND TYPE ILIKE 'create%'

View File

@ -14,15 +14,24 @@ WITH contract_deployments AS (
block_timestamp,
from_address AS deployer_address,
to_address AS contract_address,
_call_id,
_inserted_timestamp,
concat_ws(
'-',
block_number,
tx_position,
CONCAT(
TYPE,
'_',
trace_address
)
) AS _call_id,
modified_timestamp AS _inserted_timestamp,
ROW_NUMBER() over (
ORDER BY
contract_address
) AS row_num
FROM
{{ ref(
'silver__traces'
'core__fact_traces'
) }}
WHERE
-- curve contract deployers

View File

@ -13,9 +13,9 @@ WITH contract_deployments AS (
block_timestamp,
from_address AS deployer_address,
to_address AS contract_address,
_inserted_timestamp
modified_timestamp AS _inserted_timestamp
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
from_address IN (
'0x05fb0089bec6d00b2f01f4096eb0e0488c79cd91',

View File

@ -13,9 +13,9 @@ WITH contract_deployments AS (
block_timestamp,
from_address AS deployer_address,
to_address AS contract_address,
_inserted_timestamp
modified_timestamp AS _inserted_timestamp
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
from_address = '0x416a7989a964c9ed60257b064efc3a30fe6bf2ee'
AND TYPE ILIKE 'create%'
@ -44,4 +44,4 @@ SELECT
contract_address AS pool_address,
_inserted_timestamp
FROM
contract_deployments
contract_deployments

View File

@ -6,15 +6,14 @@
tags = ['reorg','curated']
) }}
WITH
contracts AS (
WITH contracts AS (
SELECT
*
FROM
{{ ref('silver__contracts') }}
),
log_pull AS (
SELECT
l.tx_hash,
l.block_number,
@ -26,15 +25,13 @@ log_pull AS (
l._inserted_timestamp,
l._log_id
FROM
{{ ref('silver__logs') }} l
LEFT JOIN
contracts c
ON
l.contract_address = c.contract_address
{{ ref('silver__logs') }}
l
LEFT JOIN contracts C
ON l.contract_address = C.contract_address
WHERE
topics [0] :: STRING = '0x7ac369dbd14fa5ea3f473ed67cc9d598964a77501540ba6751eb0b3decf5870d'
AND
TOKEN_NAME LIKE '%Benqi %'
AND token_name LIKE '%Benqi %'
{% if is_incremental() %}
AND l._inserted_timestamp >= (
@ -53,7 +50,7 @@ traces_pull AS (
from_address AS token_address,
to_address AS underlying_asset
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
tx_hash IN (
SELECT
@ -72,8 +69,8 @@ contract_pull AS (
token_name,
token_symbol,
token_decimals,
CASE
WHEN TOKEN_NAME = 'Benqi AVAX' THEN '0xb31f66aa3c1e785363f0875a1b74e27b85fd66c7'
CASE
WHEN token_name = 'Benqi AVAX' THEN '0xb31f66aa3c1e785363f0875a1b74e27b85fd66c7'
ELSE t.underlying_asset
END AS underlying_asset,
l._inserted_timestamp,
@ -104,4 +101,4 @@ FROM
LEFT JOIN contracts C
ON C.contract_address = l.underlying_asset
WHERE
l.token_name IS NOT NULL
l.token_name IS NOT NULL

View File

@ -6,15 +6,14 @@
tags = ['reorg','curated']
) }}
WITH
contracts AS (
WITH contracts AS (
SELECT
*
FROM
{{ ref('silver__contracts') }}
),
log_pull AS (
SELECT
l.tx_hash,
l.block_number,
@ -26,16 +25,13 @@ log_pull AS (
l._inserted_timestamp,
l._log_id
FROM
{{ ref('silver__logs') }} l
LEFT JOIN
contracts c
ON
l.contract_address = c.contract_address
{{ ref('silver__logs') }}
l
LEFT JOIN contracts C
ON l.contract_address = C.contract_address
WHERE
topics [0] :: STRING = '0x17d6db5ffe5a3d1c3d7a98194dca4f7d808d621e6ff9077ed574513d553a2a85'
AND
TOKEN_NAME LIKE '%Banker Joe %'
AND token_name LIKE '%Banker Joe %'
{% if is_incremental() %}
AND l._inserted_timestamp >= (
@ -54,7 +50,7 @@ traces_pull AS (
from_address AS token_address,
to_address AS underlying_asset
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
tx_hash IN (
SELECT
@ -102,4 +98,4 @@ FROM
LEFT JOIN contracts C
ON C.contract_address = l.underlying_asset
WHERE
l.token_name IS NOT NULL
l.token_name IS NOT NULL

View File

@ -184,7 +184,7 @@ old_native_transfers AS (
trace_index,
from_address,
to_address,
avax_value,
VALUE AS avax_value,
avax_value * pow(
10,
18
@ -200,7 +200,7 @@ old_native_transfers AS (
0
) AS intra_grouping
FROM
{{ ref('silver__traces') }}
{{ ref('core__fact_traces') }}
WHERE
block_timestamp :: DATE >= (
SELECT
@ -250,13 +250,13 @@ old_native_transfers AS (
AND trace_status = 'SUCCESS'
{% if is_incremental() %}
AND _inserted_timestamp >= (
AND modified_timestamp >= (
SELECT
MAX(_inserted_timestamp) - INTERVAL '12 hours'
FROM
{{ this }}
)
AND _inserted_timestamp >= SYSDATE() - INTERVAL '7 day'
AND modified_timestamp >= SYSDATE() - INTERVAL '7 day'
{% endif %}
),
old_native_labels AS (

View File

@ -1,2 +1,2 @@
-- depends_on: {{ ref('test_silver__transactions_full') }}
{{ missing_txs(ref("test_silver__traces_full")) }}
{{ missing_txs(ref("test_gold__fact_traces_full")) }}

View File

@ -0,0 +1,2 @@
-- depends_on: {{ ref('test_silver__transactions_recent') }}
{{ recent_missing_txs(ref("test_gold__fact_traces_recent")) }}

View File

@ -1,2 +0,0 @@
-- depends_on: {{ ref('test_silver__transactions_recent') }}
{{ recent_missing_txs(ref("test_silver__traces_recent")) }}