still need to test - but...

realizing we need auto_paginate_query in ShroomDK lol.
This commit is contained in:
Carlos R. Mercado 2022-07-27 11:16:48 -04:00
parent 498b66d8c1
commit 1e62df1aea
2 changed files with 85 additions and 32 deletions

View File

@ -0,0 +1,83 @@
#' Title
#'
#' @param token_address
#' @param min_tokens
#' @param block_min
#' @param block_max
#' @param amount_weighting
#' @param api_key
#'
#' @return
#' @export
#'
#' @examples
address_time_weighted_token_balance <- function(token_address, min_tokens = 0.0001,
block_min = 0,
block_max,
amount_weighting = TRUE,
api_key){
weight = ifelse(amount_weighting, "NEW_VALUE", "1")
query <- {
"
WITH block_tracked AS (
SELECT BLOCK as block,
TX_HASH AS hash,
TOKEN_ADDRESS as token_address,
SYMBOL AS symbol,
HOLDER as address,
PREV_VALUE as old_value,
CURR_VALUE as new_value,
-- max block is the cutoff block number for airdrop eligibility; do not use default 0
-- lag(block, 1, block max)
lag(block, 1, _BLOCK_MAX_) over (partition by address, token_address order by block DESC) as holder_next_block
FROM flipside_prod_db.tokenflow_eth.tokens_balance_diffs
WHERE TOKEN_ADDRESS = '_QUERY_TOKENS_' AND
BLOCK >= _BLOCK_MIN_ AND
BLOCK <= _BLOCK_MAX_ AND
new_value >= _MINVAL_
ORDER BY address desc, BLOCK desc),
time_points AS (
-- scale down time points by 10K to reduce integer overflow risk
-- use 1 for any amount, otherwise use NEW_VALUE
SELECT *, (_WEIGHT_ * (holder_next_block - block) )/10000 as time_points
FROM block_tracked
)
-- Aggregation here assumes no minimum required points.
SELECT address, token_address, sum(time_points) as _timepoints
FROM time_points
GROUP BY address, token_address
ORDER BY _timepoints DESC;
"
}
query <- gsub(pattern = "_BLOCK_MIN_",
replacement = block_min,
x = query,
fixed = TRUE)
query <- gsub(pattern = "_BLOCK_MAX_",
replacement = block_max,
x = query,
fixed = TRUE)
query <- gsub(pattern = "_QUERY_TOKENS_",
replacement = token_address,
x = query,
fixed = TRUE)
query <- gsub(pattern = "_MINVAL_",
replacement = min_tokens,
x = query,
fixed = TRUE)
query <- gsub(pattern = "_WEIGHT_",
replacement = weight,
x = query,
fixed = TRUE)
amount_holding <- shroomDK::auto_paginate_query(query, api_key)
return(amount_holding)
}

View File

@ -72,37 +72,7 @@ FROM token_holder
x = query,
fixed = TRUE)
qtoken <- shroomDK::create_query_token(query = query, api_key = api_key)
res <- shroomDK::get_query_from_token(qtoken$token, api_key = api_key)
amount_holding <- shroomDK::clean_query(res)
amount_holding <- shroomDK::auto_paginate_query(query, api_key)
# Handle Pagination via ShroomDK
# up to 1M rows max
# get 100,000 rows at a time
# stop when the most recent page < 100,000 items.
# otherwise stop at 1M total rows.
# NOTE: in the future, if we allow > 1M rows, will need to update this.
if(nrow(amount_holding) == 100000){
warning("Checking for additional pages of data...")
for(i in 2:10){
temp_page <- clean_query(
shroomDK::get_query_from_token(qtoken$token,
api_key = api_key,
page_number = i)
)
amount_holding <- rbind.data.frame(amount_holding, temp_page)
if(nrow(temp_page) < 100000 | i == 10){
# done
return(amount_holding)
} else {
# continue
}
}
} else {
return(amount_holding)
}
return(amount_holding)
}