mirror of
https://github.com/FlipsideCrypto/thorchain-models.git
synced 2026-02-06 13:37:10 +00:00
inital commit
This commit is contained in:
parent
eea736e38d
commit
397bede0fa
29
README.md
29
README.md
@ -1,15 +1,30 @@
|
||||
Welcome to your new dbt project!
|
||||
## Profile Set Up
|
||||
|
||||
### Using the starter project
|
||||
|
||||
Try running the following commands:
|
||||
- dbt run
|
||||
- dbt test
|
||||
#### Use the following within profiles.yml
|
||||
----
|
||||
|
||||
```yml
|
||||
thorchain:
|
||||
target: dev
|
||||
outputs:
|
||||
dev:
|
||||
type: snowflake
|
||||
account: <ACCOUNT>
|
||||
role: <ROLE>
|
||||
user: <USERNAME>
|
||||
password: <PASSWORD>
|
||||
region: <REGION>
|
||||
database: THORCHAIN_DEV
|
||||
warehouse: <WAREHOUSE>
|
||||
schema: silver
|
||||
threads: 4
|
||||
client_session_keep_alive: False
|
||||
query_tag: <TAG>
|
||||
```
|
||||
|
||||
### Resources:
|
||||
- Learn more about dbt [in the docs](https://docs.getdbt.com/docs/introduction)
|
||||
- Check out [Discourse](https://discourse.getdbt.com/) for commonly asked questions and answers
|
||||
- Join the [chat](https://community.getdbt.com/) on Slack for live discussions and support
|
||||
- Find [dbt events](https://events.getdbt.com) near you
|
||||
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
|
||||
- Check out [the blog](https://blog.getdbt.com/) for the latest news on dbt's development and best practices
|
||||
@ -7,7 +7,7 @@ version: '1.0.0'
|
||||
config-version: 2
|
||||
|
||||
# This setting configures which "profile" dbt uses for this project.
|
||||
profile: 'thorchain_models'
|
||||
profile: 'thorchain'
|
||||
|
||||
# These configurations specify where dbt should look for different types of files.
|
||||
# The `model-paths` config, for example, states that models in this project can be
|
||||
@ -32,7 +32,10 @@ clean-targets: # directories to be removed by `dbt clean`
|
||||
# as tables. These settings can be overridden in the individual model files
|
||||
# using the `{{ config(...) }}` macro.
|
||||
models:
|
||||
thorchain_models:
|
||||
# Config indicated by + and applies to all files under models/example/
|
||||
example:
|
||||
+materialized: view
|
||||
+copy_grants: true
|
||||
+persist_docs:
|
||||
relation: true
|
||||
columns: true
|
||||
|
||||
vars:
|
||||
"dbt_date:time_zone": GMT
|
||||
6
macros/create_sps.sql
Normal file
6
macros/create_sps.sql
Normal file
@ -0,0 +1,6 @@
|
||||
{% macro create_sps() %}
|
||||
{% if target.database == 'ALGORAND' %}
|
||||
CREATE schema IF NOT EXISTS _internal;
|
||||
{{ sp_create_prod_clone('_internal') }};
|
||||
{% endif %}
|
||||
{% endmacro %}
|
||||
7
macros/create_udfs.sql
Normal file
7
macros/create_udfs.sql
Normal file
@ -0,0 +1,7 @@
|
||||
{% macro create_udfs() %}
|
||||
{% set sql %}
|
||||
{{ create_algorand_udf_bulk_get_tx_info() }};
|
||||
{{ create_algorand_udf_bulk_get_tx_info() }};
|
||||
{% endset %}
|
||||
{% do run_query(sql) %}
|
||||
{% endmacro %}
|
||||
10
macros/run_sp_create_prod_clone.sql
Normal file
10
macros/run_sp_create_prod_clone.sql
Normal file
@ -0,0 +1,10 @@
|
||||
{% macro run_sp_create_prod_clone() %}
|
||||
{% set clone_query %}
|
||||
call algorand._internal.create_prod_clone(
|
||||
'algorand',
|
||||
'algorand_dev',
|
||||
'internal_dev'
|
||||
);
|
||||
{% endset %}
|
||||
{% do run_query(clone_query) %}
|
||||
{% endmacro %}
|
||||
66
macros/sp_create_prod_clone.sql
Normal file
66
macros/sp_create_prod_clone.sql
Normal file
@ -0,0 +1,66 @@
|
||||
{% macro sp_create_prod_clone(target_schema) -%}
|
||||
|
||||
create or replace procedure {{ target_schema }}.create_prod_clone(source_db_name string, destination_db_name string, role_name string)
|
||||
returns boolean
|
||||
language javascript
|
||||
execute as caller
|
||||
as
|
||||
$$
|
||||
snowflake.execute({sqlText: `BEGIN TRANSACTION;`});
|
||||
try {
|
||||
snowflake.execute({sqlText: `DROP DATABASE IF EXISTS ${DESTINATION_DB_NAME}`});
|
||||
snowflake.execute({sqlText: `CREATE DATABASE ${DESTINATION_DB_NAME} CLONE ${SOURCE_DB_NAME}`});
|
||||
snowflake.execute({sqlText: `DROP SCHEMA ${DESTINATION_DB_NAME}._INTERNAL`}); /* this only needs to be in prod */
|
||||
|
||||
var existing_schemas = snowflake.execute({sqlText: `SELECT table_schema
|
||||
FROM ${DESTINATION_DB_NAME}.INFORMATION_SCHEMA.TABLE_PRIVILEGES
|
||||
WHERE grantor IS NOT NULL
|
||||
GROUP BY 1;`});
|
||||
|
||||
while (existing_schemas.next()) {
|
||||
var schema = existing_schemas.getColumnValue(1)
|
||||
snowflake.execute({sqlText: `GRANT OWNERSHIP ON SCHEMA ${DESTINATION_DB_NAME}.${schema} TO ROLE ${ROLE_NAME} COPY CURRENT GRANTS;`});
|
||||
}
|
||||
|
||||
var existing_tables = snowflake.execute({sqlText: `SELECT table_schema, table_name
|
||||
FROM ${DESTINATION_DB_NAME}.INFORMATION_SCHEMA.TABLE_PRIVILEGES
|
||||
WHERE grantor IS NOT NULL
|
||||
GROUP BY 1,2;`});
|
||||
|
||||
while (existing_tables.next()) {
|
||||
var schema = existing_tables.getColumnValue(1)
|
||||
var table_name = existing_tables.getColumnValue(2)
|
||||
snowflake.execute({sqlText: `GRANT OWNERSHIP ON TABLE ${DESTINATION_DB_NAME}.${schema}.${table_name} TO ROLE ${ROLE_NAME} COPY CURRENT GRANTS;`});
|
||||
}
|
||||
|
||||
var existing_functions = snowflake.execute({sqlText: `SELECT function_schema, function_name, concat('(',array_to_string(regexp_substr_all(argument_signature, 'VARCHAR|NUMBER|FLOAT|ARRAY|VARIANT|OBJECT|DOUBLE'),','),')') as argument_signature
|
||||
FROM ${DESTINATION_DB_NAME}.INFORMATION_SCHEMA.FUNCTIONS;`});
|
||||
|
||||
while (existing_functions.next()) {
|
||||
var schema = existing_functions.getColumnValue(1)
|
||||
var function_name = existing_functions.getColumnValue(2)
|
||||
var argument_signature = existing_functions.getColumnValue(3)
|
||||
snowflake.execute({sqlText: `GRANT OWNERSHIP ON FUNCTION ${DESTINATION_DB_NAME}.${schema}.${function_name}${argument_signature} to role ${ROLE_NAME} REVOKE CURRENT GRANTS;`});
|
||||
}
|
||||
|
||||
var existing_procedures = snowflake.execute({sqlText: `SELECT procedure_schema, procedure_name, concat('(',array_to_string(regexp_substr_all(argument_signature, 'VARCHAR|NUMBER|FLOAT|ARRAY|VARIANT|OBJECT|DOUBLE'),','),')') as argument_signature
|
||||
FROM ${DESTINATION_DB_NAME}.INFORMATION_SCHEMA.PROCEDURES;`});
|
||||
|
||||
while (existing_procedures.next()) {
|
||||
var schema = existing_procedures.getColumnValue(1)
|
||||
var procedure_name = existing_procedures.getColumnValue(2)
|
||||
var argument_signature = existing_procedures.getColumnValue(3)
|
||||
snowflake.execute({sqlText: `GRANT OWNERSHIP ON PROCEDURE ${DESTINATION_DB_NAME}.${schema}.${procedure_name}${argument_signature} to role ${ROLE_NAME} REVOKE CURRENT GRANTS;`});
|
||||
}
|
||||
|
||||
snowflake.execute({sqlText: `GRANT OWNERSHIP ON DATABASE ${DESTINATION_DB_NAME} TO ROLE ${ROLE_NAME};`})
|
||||
snowflake.execute({sqlText: `COMMIT;`});
|
||||
} catch (err) {
|
||||
snowflake.execute({sqlText: `ROLLBACK;`});
|
||||
throw(err);
|
||||
}
|
||||
|
||||
return true
|
||||
$$
|
||||
|
||||
{%- endmacro %}
|
||||
58
macros/tests/date_gaps.sql
Normal file
58
macros/tests/date_gaps.sql
Normal file
@ -0,0 +1,58 @@
|
||||
{% macro date_gaps(
|
||||
table,
|
||||
partition_by,
|
||||
column
|
||||
) %}
|
||||
{%- set partition_sql = partition_by | join(", ") -%}
|
||||
{%- set previous_column = "prev_" ~ column -%}
|
||||
WITH source AS (
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ column }},
|
||||
LAG(
|
||||
{{ column }},
|
||||
1
|
||||
) over (
|
||||
{{ "PARTITION BY " ~ partition_sql if partition_sql }}
|
||||
ORDER BY
|
||||
{{ column }} ASC
|
||||
) AS {{ previous_column }}
|
||||
FROM
|
||||
{{ table }}
|
||||
)
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ previous_column }},
|
||||
{{ column }},
|
||||
DATEDIFF(
|
||||
days,
|
||||
{{ previous_column }},
|
||||
{{ column }}
|
||||
) - 1 AS gap
|
||||
FROM
|
||||
source
|
||||
{% if varargs -%}
|
||||
LEFT JOIN (
|
||||
{% for x in varargs %}
|
||||
(
|
||||
{{ dbt_utils.date_spine(
|
||||
datepart = "day",
|
||||
start_date = x.start_date,
|
||||
end_date = x.end_date
|
||||
) }}
|
||||
)
|
||||
{{- "UNION ALL" if not loop.last -}}
|
||||
{% endfor %}
|
||||
) exclude
|
||||
ON source.day = exclude.date_day
|
||||
{%- endif %}
|
||||
WHERE
|
||||
DATEDIFF(
|
||||
days,
|
||||
{{ previous_column }},
|
||||
{{ column }}
|
||||
) > 1
|
||||
{{ "AND source.day != exclude.date_day" if varargs }}
|
||||
ORDER BY
|
||||
gap DESC
|
||||
{% endmacro %}
|
||||
10
macros/tests/negative_one.sql
Normal file
10
macros/tests/negative_one.sql
Normal file
@ -0,0 +1,10 @@
|
||||
{% test negative_one(
|
||||
model,
|
||||
column_name
|
||||
) %}
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
{{ model }}
|
||||
WHERE
|
||||
{{ column_name }} = '-1' {% endtest %}
|
||||
34
macros/tests/sequence_distinct_gaps.sql
Normal file
34
macros/tests/sequence_distinct_gaps.sql
Normal file
@ -0,0 +1,34 @@
|
||||
{% macro sequence_distinct_gaps(
|
||||
table,
|
||||
column
|
||||
) %}
|
||||
{%- set partition_sql = partition_by | join(", ") -%}
|
||||
{%- set previous_column = "prev_" ~ column -%}
|
||||
WITH source AS (
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ column }},
|
||||
LAG(
|
||||
{{ column }},
|
||||
1
|
||||
) over (
|
||||
ORDER BY
|
||||
{{ column }} ASC
|
||||
) AS {{ previous_column }}
|
||||
FROM (
|
||||
SELECT DISTINCT {{ column }} FROM {{ table }}
|
||||
)
|
||||
|
||||
)
|
||||
SELECT
|
||||
{{ previous_column }},
|
||||
{{ column }},
|
||||
{{ column }} - {{ previous_column }}
|
||||
- 1 AS gap
|
||||
FROM
|
||||
source
|
||||
WHERE
|
||||
{{ column }} - {{ previous_column }} <> 1
|
||||
ORDER BY
|
||||
gap DESC
|
||||
{% endmacro %}
|
||||
35
macros/tests/sequence_gaps.sql
Normal file
35
macros/tests/sequence_gaps.sql
Normal file
@ -0,0 +1,35 @@
|
||||
{% macro sequence_gaps(
|
||||
table,
|
||||
partition_by,
|
||||
column
|
||||
) %}
|
||||
{%- set partition_sql = partition_by | join(", ") -%}
|
||||
{%- set previous_column = "prev_" ~ column -%}
|
||||
WITH source AS (
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ column }},
|
||||
LAG(
|
||||
{{ column }},
|
||||
1
|
||||
) over (
|
||||
{{ "PARTITION BY " ~ partition_sql if partition_sql }}
|
||||
ORDER BY
|
||||
{{ column }} ASC
|
||||
) AS {{ previous_column }}
|
||||
FROM
|
||||
{{ table }}
|
||||
)
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ previous_column }},
|
||||
{{ column }},
|
||||
{{ column }} - {{ previous_column }}
|
||||
- 1 AS gap
|
||||
FROM
|
||||
source
|
||||
WHERE
|
||||
{{ column }} - {{ previous_column }} <> 1
|
||||
ORDER BY
|
||||
gap DESC
|
||||
{% endmacro %}
|
||||
52
macros/tests/sequence_gaps_buffered_look_back.sql
Normal file
52
macros/tests/sequence_gaps_buffered_look_back.sql
Normal file
@ -0,0 +1,52 @@
|
||||
{% macro sequence_gaps_buffered_look_back(
|
||||
table,
|
||||
partition_by,
|
||||
column,
|
||||
delayed_column,
|
||||
delayed_period
|
||||
) %}
|
||||
{%- set partition_sql = partition_by | join(", ") -%}
|
||||
{%- set previous_column = "prev_" ~ column -%}
|
||||
WITH source AS (
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ column }},
|
||||
LAG(
|
||||
{{ column }},
|
||||
1
|
||||
) over (
|
||||
{{ "PARTITION BY " ~ partition_sql if partition_sql }}
|
||||
ORDER BY
|
||||
{{ column }} ASC
|
||||
) AS {{ previous_column }},
|
||||
LAG(
|
||||
{{ delayed_column }},
|
||||
1
|
||||
) over (
|
||||
{{ "PARTITION BY " ~ partition_sql if partition_sql }}
|
||||
ORDER BY
|
||||
{{ column }} ASC
|
||||
) AS {{ delayed_column }}
|
||||
FROM
|
||||
{{ table }}
|
||||
)
|
||||
SELECT
|
||||
{{ partition_sql + "," if partition_sql }}
|
||||
{{ previous_column }},
|
||||
{{ column }},
|
||||
{{ column }} - {{ previous_column }}
|
||||
- 1 AS gap
|
||||
FROM
|
||||
source
|
||||
WHERE
|
||||
{{ column }} - {{ previous_column }} <> 1
|
||||
AND
|
||||
{{ delayed_column }} < (
|
||||
SELECT
|
||||
MAX(
|
||||
{{ delayed_column }}
|
||||
)
|
||||
FROM
|
||||
{{ this }}
|
||||
) - INTERVAL '{{ delayed_period }}'
|
||||
{% endmacro %}
|
||||
@ -1,27 +0,0 @@
|
||||
|
||||
/*
|
||||
Welcome to your first dbt model!
|
||||
Did you know that you can also configure models directly within SQL files?
|
||||
This will override configurations stated in dbt_project.yml
|
||||
|
||||
Try changing "table" to "view" below
|
||||
*/
|
||||
|
||||
{{ config(materialized='table') }}
|
||||
|
||||
with source_data as (
|
||||
|
||||
select 1 as id
|
||||
union all
|
||||
select null as id
|
||||
|
||||
)
|
||||
|
||||
select *
|
||||
from source_data
|
||||
|
||||
/*
|
||||
Uncomment the line below to remove records with null `id` values
|
||||
*/
|
||||
|
||||
-- where id is not null
|
||||
@ -1,6 +0,0 @@
|
||||
|
||||
-- Use the `ref` function to select from other models
|
||||
|
||||
select *
|
||||
from {{ ref('my_first_dbt_model') }}
|
||||
where id = 1
|
||||
@ -1,21 +0,0 @@
|
||||
|
||||
version: 2
|
||||
|
||||
models:
|
||||
- name: my_first_dbt_model
|
||||
description: "A starter dbt model"
|
||||
columns:
|
||||
- name: id
|
||||
description: "The primary key for this table"
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
|
||||
- name: my_second_dbt_model
|
||||
description: "A starter dbt model"
|
||||
columns:
|
||||
- name: id
|
||||
description: "The primary key for this table"
|
||||
tests:
|
||||
- unique
|
||||
- not_null
|
||||
55
models/sources.yml
Normal file
55
models/sources.yml
Normal file
@ -0,0 +1,55 @@
|
||||
version: 2
|
||||
|
||||
sources:
|
||||
- name: shared
|
||||
database: flipside_prod_db
|
||||
schema: silver
|
||||
tables:
|
||||
- name: prices_v2
|
||||
- name: thorchain_midgard
|
||||
database: flipside_prod_db
|
||||
schema: BRONZE_MIDGARD_2_9_0_20220724
|
||||
tables:
|
||||
- name: midgard_active_vault_events
|
||||
- name: midgard_add_events
|
||||
- name: midgard_asgard_fund_yggdrasil_events
|
||||
- name: midgard_block_log
|
||||
- name: midgard_block_pool_depths
|
||||
- name: midgard_bond_events
|
||||
- name: bond_events_pk_count
|
||||
- name: midgard_constants
|
||||
- name: midgard_errata_events
|
||||
- name: midgard_fee_events
|
||||
- name: fee_events_pk_count
|
||||
- name: midgard_gas_events
|
||||
- name: midgard_inactive_vault_events
|
||||
- name: midgard_message_events
|
||||
- name: midgard_new_node_events
|
||||
- name: midgard_outbound_events
|
||||
- name: midgard_pending_liquidity_events
|
||||
- name: midgard_pool_balance_change_events
|
||||
- name: midgard_pool_events
|
||||
- name: midgard_refund_events
|
||||
- name: midgard_reserve_events
|
||||
- name: midgard_rewards_event_entries
|
||||
- name: midgard_rewards_events
|
||||
- name: midgard_set_ip_address_events
|
||||
- name: midgard_set_mimir_events
|
||||
- name: midgard_set_node_keys_events
|
||||
- name: midgard_set_version_events
|
||||
- name: midgard_slash_events
|
||||
- name: midgard_slash_points_events
|
||||
- name: midgard_stake_events
|
||||
- name: midgard_swap_events
|
||||
- name: midgard_switch_events
|
||||
- name: switch_events_pk_count
|
||||
- name: midgard_thorname_change_events
|
||||
- name: midgard_transfer_events
|
||||
- name: midgard_unstake_events
|
||||
- name: midgard_update_node_account_status_events
|
||||
- name: midgard_validator_request_leave_events
|
||||
- name: crosschain
|
||||
database: flipside_prod_db
|
||||
schema: silver_crosschain
|
||||
tables:
|
||||
- name: address_labels
|
||||
9
packages.yml
Normal file
9
packages.yml
Normal file
@ -0,0 +1,9 @@
|
||||
packages:
|
||||
- package: dbt-labs/dbt_utils
|
||||
version: [">=0.8.0", "<0.9.0"]
|
||||
- package: dbt-labs/dbt_external_tables
|
||||
version: 0.8.0
|
||||
- package: Snowflake-Labs/dbt_constraints
|
||||
version: [">=0.4.0"]
|
||||
- package: calogica/dbt_expectations
|
||||
version: [">=0.4.0", "<0.6.0"]
|
||||
Loading…
Reference in New Issue
Block a user