filter updates

This commit is contained in:
flipside-kellen 2022-02-11 11:34:32 -08:00
parent cbf99c3fa5
commit 85795bdeca
15 changed files with 2626 additions and 1200 deletions

View File

@ -3,6 +3,7 @@ import os
import math
import json
import pandas as pd
from scrape_sol_nfts import clean_name
import snowflake.connector
os.chdir('/Users/kellenblumberg/git/nft-deal-score')
@ -235,6 +236,80 @@ def levana():
with open('./data/metadata/levana_dragon_eggs/{}.txt'.format(i), 'w') as outfile:
outfile.write(json.dumps(newd))
def solana():
mints = pd.read_csv('./data/solana_rarities.csv')
collection_info = pd.read_csv('./data/collection_info.csv')
metadata = pd.read_csv('./data/metadata.csv')
tokens = pd.read_csv('./data/tokens.csv')
tokens['token_id'] = tokens.token_id.astype(str)
metadata['token_id'] = metadata.token_id.astype(str)
metadata = metadata.merge(tokens)
metadata = metadata.merge(collection_info)
metadata['token_id'] = metadata.clean_token_id.fillna(metadata.token_id)
metadata = metadata[-metadata.feature_name.isin(['nft_rank','adj_nft_rank_0','adj_nft_rank_1','adj_nft_rank_2'])]
metadata['token_id'] = metadata.token_id.astype(int)
mints['token_id'] = mints.token_id.astype(int)
mints['collection'] = mints.collection.apply(lambda x: clean_name(x) )
# metadata[['collection']].drop_duplicates().to_csv('~/Downloads/tmp.csv', index=False)
for collection in metadata.collection.unique()[2:]:
print(collection)
mdf = metadata[metadata.collection == collection]
results = []
for token_id in mdf.token_id.unique():
pass
cur = mdf[mdf.token_id == token_id]
token_metadata = {}
m = mints[(mints.collection == collection) & (mints.token_id == token_id) ]
if not len(m):
print(token_id)
continue
mint_address = m.mint_address.values[0]
for row in cur.iterrows():
row = row[1]
token_metadata[row['feature_name']] = row['feature_value']
d = {
'commission_rate': None
, 'mint_address': mint_address
, 'token_id': token_id
, 'contract_address': row['contract_address']
, 'contract_name': row['collection']
, 'created_at_block_id': 0
, 'created_at_timestamp': str(row['created_at_timestamp'])
, 'created_at_tx_id': ''
, 'creator_address': row['contract_address']
, 'creator_name': row['collection']
, 'image_url': row['image_url']
, 'project_name': row['collection']
, 'token_id': int(token_id)
, 'token_metadata': token_metadata
, 'token_metadata_uri': row['image_url']
, 'token_name': row['collection']
}
results.append(d)
print('Uploading {} results'.format(len(results)))
n = 50
r = math.ceil(len(results) / n)
for i in range(r):
newd = {
"model": {
"blockchain": "solana",
"sinks": [
{
"destination": "{database_name}.silver.nft_metadata",
"type": "snowflake",
"unique_key": "blockchain || contract_address || token_id"
}
],
},
"results": results[(i * n):((i * n)+r)]
}
with open('./data/metadata/{}/{}.txt'.format(collection, i), 'w') as outfile:
outfile.write(json.dumps(newd))
def bayc():
with open('./data/bayc.json') as f:
j = json.load(f)

View File

@ -50,7 +50,7 @@ for c in m_df.collection.unique():
print(m_df[(m_df.token_id=='1') & (m_df.collection == 'Solana Monkey Business')])
print(m_df[(m_df.token_id=='10') & (m_df.collection == 'Aurory')])
for c in [ 'nft_rank','adj_nft_rank_0','adj_nft_rank_1' ]:
for c in [ 'nft_rank','adj_nft_rank_0','adj_nft_rank_1','adj_nft_rank_2' ]:
cur = rarities[[ 'collection','token_id',c ]].rename(columns={c: 'feature_value'})
cur['feature_name'] = c
m_df = m_df[ m_df.feature_name != c ]

View File

@ -17,6 +17,19 @@ from selenium.webdriver.common.keys import Keys
os.chdir('/Users/kellenblumberg/git/nft-deal-score')
os.environ['PATH'] += os.pathsep + '/Users/kellenblumberg/shared/'
# Updates
# Final updates to NTR App
# Helped gather mint_address data and metadata for solana hackathon
# Updated NFT Deal score model to enable easy addition to
# Accomplishments
# Version 1.0 of NTR app is now live at https://rstudio-connect.flipside.kitchen/ntr/ thanks to @eric
# Problems Encountered
# Still waiting for Harmony data to be released (was hoping it would be ready early this week)
# Priorities
# Assist with Solana <3 week where needed ()
# Build DeFi Kingdoms query
# Concerns
# browser = webdriver.Chrome()
# old = pd.read_csv('./data/tokens.csv')
@ -380,9 +393,31 @@ def scrape_recent_sales():
del o_sales['tmp']
o_sales.to_csv('./data/sales.csv', index=False)
def scrape_solanafloor():
browser.get('https://solanafloor.com/')
soup = BeautifulSoup(browser.page_source)
d0 = soup.find_all('div', class_='ag-pinned-left-cols-container')
d1 = soup.find_all('div', class_='ag-center-cols-clipper')
len(d0)
len(d1)
d0 = d0[1]
d1 = d1[1]
rows0 = d0.find_all('div', class_='ag-row')
rows1 = d1.find_all('div', class_='ag-row')
data = []
for r in rows1:
cell1 = r.find_all('div', class_='ag-cell')
a = cell1[0].find_all('a')[0]
project = re.split('/', a.attrs['href'])[-1]
data += [[ project, int('Lite' in cell1[0].text) ]]
df = pd.DataFrame(data, columns=['project','is_lite'])
df.to_csv('./data/sf_projects.csv', index=False)
def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenapes','peskypenguinclub' ], alerted = [], is_listings = True):
print('Scraping solanafloor listings...')
data = []
m_data = []
# collections = [ 'aurory','thugbirdz','meerkatmillionaires','aurory','degenapes' ]
# collections = [ 'aurory','thugbirdz','smb','degenapes' ]
# collections = [ 'smb' ]
@ -391,7 +426,11 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
, 'degenapes': 'degen-ape-academy'
, 'peskypenguinclub': 'pesky-penguins'
}
collection = 'smb'
collections = ['the-suites']
sf_projects = pd.read_csv('./data/sf_projects.csv')
old = pd.read_csv('./data/solana_rarities.csv')
collections = sf_projects[(sf_projects.to_scrape==1) & (sf_projects.is_lite==0) & (-sf_projects.collection.isin(old.collection.unique()))].collection.unique()
collection = 'portals'
for collection in collections:
if collection == 'boryokudragonz':
continue
@ -410,6 +449,7 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
page += 1
for j in [20, 30, 30, 30, 30, 30, 30, 30] * 1:
for _ in range(1):
pass
soup = BeautifulSoup(browser.page_source)
# for row in browser.find_elements_by_class_name('ag-row'):
# cells = row.find_elements_by_class_name('ag-cell')
@ -419,12 +459,16 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
# data += [[ collection, token_id, price ]]
d0 = soup.find_all('div', class_='ag-pinned-left-cols-container')
d1 = soup.find_all('div', class_='ag-center-cols-clipper')
h1 = soup.find_all('div', class_='ag-header-row')
if not len(d0) or not len(d1):
continue
d0 = d0[0]
d1 = d1[0]
h1 = h1[1]
rows0 = d0.find_all('div', class_='ag-row')
rows1 = d1.find_all('div', class_='ag-row')
hs1 = h1.find_all('div', class_='ag-header-cell')
hs1 = [ x.text.strip() for x in hs1 ]
for k in range(len(rows0)):
# for row in soup.find_all('div', class_='ag-row'):
# # print(row.text)
@ -432,6 +476,7 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
cell1 = rows1[k].find_all('div', class_='ag-cell')
if len(cell1) > 2:
token_id = cell0[0].text
mint_address = re.split('/', cell0[0].find_all('a')[0].attrs['href'])[-1] if len(cell0[0].find_all('a')) else None
price = cell1[2 if is_listings else 0].text
if len(token_id) and len(price):
# token_id = int(token_id[0].text)
@ -443,7 +488,12 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
if not price and is_listings:
continue
if not token_id in seen:
data += [[ collection, token_id, price ]]
if not is_listings:
data += [[ collection, token_id, mint_address, price ]]
for l in range(len(hs1)):
m_data += [[ collection, token_id, mint_address, hs1[l], cell1[l].text.strip() ]]
else:
data += [[ collection, token_id, price ]]
seen.append(token_id)
# else:
# print(row.text)
@ -459,12 +509,25 @@ def scrape_listings(browser, collections = [ 'aurory','thugbirdz','smb','degenap
else:
has_more = False
break
if not is_listings:
old = pd.read_csv('./data/solana_rarities.csv')
rarities = pd.DataFrame(data, columns=['collection','token_id','nft_rank']).drop_duplicates()
rarities = rarities.append(old).drop_duplicates()
print(rarities.groupby('collection').token_id.count())
rarities.to_csv('./data/solana_rarities.csv', index=False)
if not is_listings:
old = pd.read_csv('./data/solana_rarities.csv')
rarities = pd.DataFrame(data, columns=['collection','token_id','mint_address','nft_rank']).drop_duplicates()
rarities = rarities.append(old).drop_duplicates()
rarities = rarities[-rarities.collection.isin(rem)]
print(rarities.groupby('collection').token_id.count().reset_index().sort_values('token_id'))
rarities.to_csv('./data/solana_rarities.csv', index=False)
old = pd.read_csv('./data/sf_metadata.csv')
metadata = pd.DataFrame(m_data, columns=['collection','token_id','mint_address','feature_name','feature_value']).drop_duplicates()
metadata = metadata[ -metadata.feature_name.isin(['Rank *','Owner','Listed On','Price','USD','Buy Link']) ]
metadata = metadata.append(old).drop_duplicates()
metadata.feature_name.unique()
g = metadata[[ 'collection','token_id' ]].drop_duplicates().groupby('collection').token_id.count().reset_index().sort_values('token_id')
rem = g[g.token_id<99].collection.unique()
metadata = metadata[-metadata.collection.isin(rem)]
print(g)
# g.to_csv('~/Downloads')
metadata.to_csv('./data/sf_metadata.csv', index=False)
old = pd.read_csv('./data/listings.csv')
listings = pd.DataFrame(data, columns=['collection','token_id','price']).drop_duplicates()
@ -894,6 +957,108 @@ def scratch():
o_sales.head()
o_sales.to_csv('./data/md_sales.csv', index=False)
def create_mint_csv():
mints = pd.DataFrame()
auth_to_mint = {}
for collection, update_authority in d.items():
auth_to_mint[update_authority] = collection
for fname in [ './data/mints/'+f for f in os.listdir('./data/mints') ]:
pass
with open(fname, 'r') as f:
j = json.load(f)
cur = pd.DataFrame(j)
cur.columns = ['mint_address']
cur['update_authority'] = re.split('/|_', fname)[3]
cur['collection'] = re.split('/|_', fname)[3]
def scrape_how_rare_is():
d = {
'degenapes': 40
,'aurory': 40
}
data = []
for collection, num_pages in d.items():
for page in range(num_pages):
if len(data):
print(data[-1])
url = 'https://howrare.is/{}/?page={}&ids=&sort_by=rank'.format(collection, page)
browser.get(url)
sleep(0.1)
soup = BeautifulSoup(browser.page_source)
len(soup.find_all('div', class_='featured_item_img'))
for div in soup.find_all('div', class_='featured_item_img'):
image_url = div.find_all('img')[0].attrs['src']
token_id = re.split('/', div.find_all('a')[0].attrs['href'])[-2]
data += [[ collection, token_id, image_url ]]
df = pd.DataFrame(data, columns=['collection','token_id','image_url'])
df['collection'] = df.collection.apply(lambda x: clean_name(x) )
df['clean_token_id'] = df.token_id
df['chain'] = 'Solana'
tokens = pd.read_csv('./data/tokens.csv')
tokens = tokens[-tokens.collection.isin(df.collection.unique())]
tokens = tokens.append(df)
tokens.to_csv('./data/tokens.csv', index=False)
def scrape_mints():
nft_mint_addresses = pd.read_csv('./data/nft_mint_addresses.csv')
nft_mint_addresses['collection'] = nft_mint_addresses.collection.apply(lambda x: clean_name(x) )
nft_mint_addresses.head()
solana_nfts = pd.read_csv('./data/solana_nfts.csv')
solana_nfts = solana_nfts[solana_nfts.update_authority.notnull()]
solana_nfts = solana_nfts[solana_nfts.collection != 'Boryoku Baby Dragonz']
print(solana_nfts.groupby('update_authority').collection.count().reset_index().sort_values('collection', ascending=0).head(10))
nft_mint_addresses.collection.unique()
nft_mint_addresses = nft_mint_addresses.merge( solana_nfts )
nft_mint_addresses.collection.unique()
mints = pd.read_csv('./data/solana_mints.csv')
mints = mints[-mints.collection.isin(nft_mint_addresses.collection.unique())]
mints = mints.append(nft_mint_addresses[list(mints.columns)])
mints.head()
seen = list(mints.update_authority.unique())
rpc = 'https://red-cool-wildflower.solana-mainnet.quiknode.pro/a1674d4ab875dd3f89b34863a86c0f1931f57090/'
d = {}
for row in solana_nfts.iterrows():
row = row[1]
d[row['collection']] = row['update_authority']
remaining = sorted(solana_nfts[-solana_nfts.collection.isin(mints.collection.unique())].collection.unique())
print('{}'.format(len(remaining)))
collection = 'Boryoku Dragonz'
for collection in remaining:
update_authority = d[collection]
if update_authority in seen or collection in [ 'Solana Monkey Business','Thugbirdz','Degenerate Ape Academy','Pesky Penguins','Aurory' ]:
print('Seen '+collection)
continue
else:
print('Working on '+collection)
sleep(.10 * 60)
os.system('metaboss -r {} -t 300 snapshot mints --update-authority {} --output ~/git/nft-deal-score/data/mints '.format(rpc, update_authority))
mints = pd.DataFrame()
auth_to_mint = {}
for collection, update_authority in d.items():
auth_to_mint[update_authority] = collection
for fname in [ './data/mints/'+f for f in os.listdir('./data/mints') ]:
if not '.json' in fname:
continue
with open(fname, 'r') as f:
j = json.load(f)
cur = pd.DataFrame(j)
if len(cur):
cur.columns = ['mint_address']
cur['update_authority'] = re.split('/|_', fname)[3]
cur['collection'] = cur.update_authority.apply(lambda x: auth_to_mint[x] )
mints = mints.append(cur)
g = mints.groupby('collection').update_authority.count().reset_index()
mints[mints.update_authority == 'DRGNjvBvnXNiQz9dTppGk1tAsVxtJsvhEmojEfBU3ezf']
g.to_csv('~/Downloads/tmp.csv', index=False)
mints.to_csv('./data/solana_mints.csv', index=False)
# scrape_listings(['smb'])
# alerted = []
# for i in range(1):

View File

@ -0,0 +1,10 @@
"use strict";
exports.__esModule = true;
exports.hello = void 0;
var world = 'world';
function hello(world) {
if (world === void 0) { world = 'world'; }
return "Hello ".concat(world, "! ");
}
exports.hello = hello;
console.log("Hello!");

View File

@ -0,0 +1,117 @@
import { Connection, clusterApiUrl, PublicKey } from '@solana/web3.js';
// import bs58 from 'bs58';
const connection = new Connection(clusterApiUrl('mainnet-beta'));
const MAX_NAME_LENGTH = 32;
const MAX_URI_LENGTH = 200;
const MAX_SYMBOL_LENGTH = 10;
const MAX_CREATOR_LEN = 32 + 1 + 1;
const MAX_CREATOR_LIMIT = 5;
const MAX_DATA_SIZE = 4 + MAX_NAME_LENGTH + 4 + MAX_SYMBOL_LENGTH + 4 + MAX_URI_LENGTH + 2 + 1 + 4 + MAX_CREATOR_LIMIT * MAX_CREATOR_LEN;
const MAX_METADATA_LEN = 1 + 32 + 32 + MAX_DATA_SIZE + 1 + 1 + 9 + 172;
const CREATOR_ARRAY_START = 1 + 32 + 32 + 4 + MAX_NAME_LENGTH + 4 + MAX_URI_LENGTH + 4 + MAX_SYMBOL_LENGTH + 2 + 1 + 4;
console.log(`MAX_METADATA_LEN = ${MAX_METADATA_LEN}`);
// const TOKEN_METADATA_PROGRAM = new PublicKey('cndy3Z4yapfJBmL3ShUp5exZKqR3z33thTzeNMm2gRZ');
const candyMachineId = new PublicKey('trshC9cTgL3BPXoAbp5w9UfnUMWEJx5G61vUijXPMLH');
// const getMintAddresses = async (firstCreatorAddress: PublicKey) => {
// const metadataAccounts = await connection.getProgramAccounts(
// TOKEN_METADATA_PROGRAM,
// {
// // The mint address is located at byte 33 and lasts for 32 bytes.
// dataSlice: { offset: 33, length: 32 },
// filters: [
// // Only get Metadata accounts.
// { dataSize: MAX_METADATA_LEN },
// // Filter using the first creator.
// {
// memcmp: {
// offset: 1,
// bytes: firstCreatorAddress.toBase58(),
// },
// },
// ],
// },
// );
// return metadataAccounts.map((metadataAccountInfo) => (
// // bs58.encode(metadataAccountInfo.account.data)
// (metadataAccountInfo.account.data)
// ));
// };
// getMintAddresses(candyMachineId);
// import { Connection, clusterApiUrl, PublicKey } from '@solana/web3.js';
// import bs58 from 'bs58';
// const connection = new Connection(clusterApiUrl('mainnet-beta'));
// const MAX_NAME_LENGTH = 32;
// const MAX_URI_LENGTH = 200;
// const MAX_SYMBOL_LENGTH = 10;
// const MAX_CREATOR_LEN = 32 + 1 + 1;
// const MAX_CREATOR_LIMIT = 5;
// const MAX_DATA_SIZE = 4 + MAX_NAME_LENGTH + 4 + MAX_SYMBOL_LENGTH + 4 + MAX_URI_LENGTH + 2 + 1 + 4 + MAX_CREATOR_LIMIT * MAX_CREATOR_LEN;
// const MAX_METADATA_LEN = 1 + 32 + 32 + MAX_DATA_SIZE + 1 + 1 + 9 + 172;
// const CREATOR_ARRAY_START = 1 + 32 + 32 + 4 + MAX_NAME_LENGTH + 4 + MAX_URI_LENGTH + 4 + MAX_SYMBOL_LENGTH + 2 + 1 + 4;
const TOKEN_METADATA_PROGRAM = new PublicKey('metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s');
const CANDY_MACHINE_V2_PROGRAM = new PublicKey('cndy3Z4yapfJBmL3ShUp5exZKqR3z33thTzeNMm2gRZ');
// const candyMachineId = new PublicKey('ENTER_YOUR_CANDY_MACHINE_ID_HERE');
const getMintAddresses = async () => {
const metadataAccounts = await connection.getProgramAccounts(
// TOKEN_METADATA_PROGRAM,
new PublicKey('TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA'),
{
// The mint address is located at byte 33 and lasts for 32 bytes.
// dataSlice: { offset: 33, length: 32 },
filters: [
// Only get Metadata accounts.
{ dataSize: 165 },
// Filter using the first creator.
{
memcmp: {
offset: 1,
bytes: new PublicKey('trshC9cTgL3BPXoAbp5w9UfnUMWEJx5G61vUijXPMLH').toBase58(),
},
},
],
},
);
return metadataAccounts;
return metadataAccounts.map((metadataAccountInfo) => (
// bs58.encode(metadataAccountInfo.account.data)
(metadataAccountInfo.account.data)
));
};
const getCandyMachineCreator = async (candyMachine: PublicKey): Promise<[PublicKey, number]> => (
PublicKey.findProgramAddress(
[Buffer.from('candy_machine'), candyMachine.toBuffer()],
CANDY_MACHINE_V2_PROGRAM,
)
);
(async () => {
// const candyMachineCreator = await getCandyMachineCreator(candyMachineId);
// console.log(`candyMachineCreator`);
// console.log(candyMachineCreator.toString());
const a = await getMintAddresses();
console.log(`a`);
console.log(a);
})();

View File

@ -0,0 +1,30 @@
import { Connection } from '@metaplex/js';
import { Metadata } from '@metaplex-foundation/mpl-token-metadata';
import { PublicKey } from '@solana/web3.js';
(async () => {
const connection = new Connection('mainnet-beta');
// const tokenMint = '9ARngHhVaCtH5JFieRdSS5Y8cdZk2TMF4tfGSWFB9iSK';
const tokenMint = '5XKoz4nuPFU78jcEVREMZoh9kKsYnCvrTAmpRzvVdJp1';
const metadataPDA = await Metadata.getPDA(new PublicKey(tokenMint));
// Metadata.getCandyMachineCreator()
// Metadata.getPDA()
const tokenMetadata = await Metadata.load(connection, metadataPDA);
console.log(tokenMetadata.data);
/*
MetadataData {
key: 4,
updateAuthority: '9uBX3ASjxWvNBAD1xjbVaKA74mWGZys3RGSF7DdeDD3F',
mint: '9ARngHhVaCtH5JFieRdSS5Y8cdZk2TMF4tfGSWFB9iSK',
data: MetadataDataData {
name: 'SMB #1355',
symbol: 'SMB',
uri: 'https://arweave.net/3wXyF1wvK6ARJ_9ue-O58CMuXrz5nyHEiPFQ6z5q02E',
sellerFeeBasisPoints: 500,
creators: [ [Creator] ]
},
primarySaleHappened: 1,
isMutable: 1
}
*/
})();

View File

@ -0,0 +1,57 @@
import { Connection, clusterApiUrl, PublicKey } from '@solana/web3.js';
// import bs58 from 'bs58';
const connection = new Connection(clusterApiUrl('mainnet-beta'));
const MAX_NAME_LENGTH = 32;
const MAX_URI_LENGTH = 200;
const MAX_SYMBOL_LENGTH = 10;
const MAX_CREATOR_LEN = 32 + 1 + 1;
const MAX_CREATOR_LIMIT = 5;
const MAX_DATA_SIZE = 4 + MAX_NAME_LENGTH + 4 + MAX_SYMBOL_LENGTH + 4 + MAX_URI_LENGTH + 2 + 1 + 4 + MAX_CREATOR_LIMIT * MAX_CREATOR_LEN;
const MAX_METADATA_LEN = 1 + 32 + 32 + MAX_DATA_SIZE + 1 + 1 + 9 + 172;
const CREATOR_ARRAY_START = 1 + 32 + 32 + 4 + MAX_NAME_LENGTH + 4 + MAX_URI_LENGTH + 4 + MAX_SYMBOL_LENGTH + 2 + 1 + 4;
// const TOKEN_METADATA_PROGRAM = new PublicKey('metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s');
const TOKEN_METADATA_PROGRAM = new PublicKey('TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA');
const candyMachineId = new PublicKey('8mNmf15xNrMFQLNSNrHxxswy7a1NfaSFwXHkVUPeMWwU');
const getMintAddresses = async (firstCreatorAddress: PublicKey) => {
const metadataAccounts = await connection.getProgramAccounts(
TOKEN_METADATA_PROGRAM,
{
// The mint address is located at byte 33 and lasts for 32 bytes.
// dataSlice: { offset: 33, length: 32 },
filters: [
// Only get Metadata accounts.
// { dataSize: MAX_METADATA_LEN },
{ dataSize: 165 },
// Filter using the first creator.
{
memcmp: {
// offset: CREATOR_ARRAY_START,
// bytes: firstCreatorAddress.toBase58(),
offset: 1,
bytes: new PublicKey('4FYjfa71puV4PD12cyqXotu6z2FhLiqFSHjEfYiFLnbj').toBase58(),
},
},
],
},
);
return metadataAccounts;
// return metadataAccounts.map((metadataAccountInfo) => (
// bs58.encode(metadataAccountInfo.account.data)
// ));
};
(async () => {
const a = await getMintAddresses(candyMachineId);
console.log(`a`);
console.log(a);
console.log(a.length);
})();

View File

@ -0,0 +1,101 @@
{
"compilerOptions": {
/* Visit https://aka.ms/tsconfig.json to read more about this file */
/* Projects */
// "incremental": true, /* Enable incremental compilation */
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
// "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
/* Language and Environment */
"target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
// "jsx": "preserve", /* Specify what JSX code is generated. */
// "experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */
// "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */
// "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
/* Modules */
"module": "commonjs", /* Specify what module code is generated. */
// "rootDir": "./", /* Specify the root folder within your source files. */
// "moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
// "typeRoots": [], /* Specify multiple folders that act like `./node_modules/@types`. */
// "types": [], /* Specify type package names to be included without being referenced in a source file. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
// "resolveJsonModule": true, /* Enable importing .json files */
// "noResolve": true, /* Disallow `import`s, `require`s or `<reference>`s from expanding the number of files TypeScript should add to a project. */
/* JavaScript Support */
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */
/* Emit */
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
"sourceMap": true, /* Create source map files for emitted JavaScript files. */
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */
"outDir": "dist", /* Specify an output folder for all emitted files. */
// "removeComments": true, /* Disable emitting comments. */
// "noEmit": true, /* Disable emitting files from a compilation. */
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
// "newLine": "crlf", /* Set the newline character for emitting files. */
// "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */
// "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
// "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
/* Interop Constraints */
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
"esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
"forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
/* Type Checking */
"strict": true, /* Enable all strict type-checking options. */
// "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied `any` type.. */
// "strictNullChecks": true, /* When type checking, take into account `null` and `undefined`. */
// "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
// "strictBindCallApply": true, /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */
// "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
// "noImplicitThis": true, /* Enable error reporting when `this` is given the type `any`. */
// "useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */
// "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
// "noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */
// "noUnusedParameters": true, /* Raise an error when a function parameter isn't read */
// "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
// "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
// "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
// "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */
// "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
// "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type */
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
/* Completeness */
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
"skipLibCheck": true /* Skip type checking all .d.ts files. */
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -69,20 +69,21 @@ sales.price.max()
def add_model_sales():
sales = pd.read_csv('./data/sales.csv').rename(columns={'sale_date':'block_timestamp'})
print(sales.groupby('collection').token_id.count())
sales.token_id.unique()
sales.groupby('collection').token_id.count()
sales[sales.collection == 'Galactic Punks']
del sales['tx_id']
old = pd.read_csv('./data/pred_price copy.csv').rename(columns={'rank':'nft_rank'})
old = pd.read_csv('./data/pred_price.csv').rename(columns={'rank':'nft_rank'})
old = pd.read_csv('./data/pred_price copy.csv').rename(columns={'rank':'nft_rank'})
old.groupby('collection').token_id.count()
sales['token_id'] = sales.token_id.astype(int).astype(str)
old['token_id'] = old.token_id.astype(str)
sales = sales.merge( old[['collection','token_id','nft_rank']] )
sales.groupby('collection').token_id.count()
sales.head()
sales['block_timestamp'] = sales.block_timestamp.apply(lambda x: str(x)[:19] )
sales['price'] = sales.price.apply(lambda x: round(x, 2))
print(sales.groupby('collection').token_id.count())
sales.to_csv('./data/model_sales.csv', index=False)
@ -105,13 +106,6 @@ def update_token_ids():
df = pd.read_csv('./data/{}.csv'.format(c))
df['token_id'] = df.token_id.apply(lambda x: str(int(float(x))) )
df['tmp'] = df.token_id.apply(lambda x: (str(x)[:5]))
df[(df.collection == 'Galactic Punks') & (df.price == 99)]
df[(df.collection == 'Galactic Punks') & (df.price == 99) & (df.tx_id == 'B57DB0555DED1D9593765EB9EF09796068268B91CF211CC5BF445AA0006205EC')]
df[(df.collection == 'Galactic Punks') & (df.price == 99) & (df.tx_id == 'B57DB0555DED1D9593765EB9EF09796068268B91CF211CC5BF445AA0006205EC')].token_id.values
tokens[(tokens.collection == 'Galactic Punks') ].token_id.values
tokens[(tokens.collection == 'Galactic Punks') & (tokens.token_id == '25984997114855597728010029317878710272') ].token_id.values
tokens[(tokens.token_id == '25984997114855597728010029317878710272') ].token_id.values
tokens[(tokens.token_id == '"25984997114855597728010029317878710272"') ].token_id.values
df['tmp'] = df.token_id.apply(lambda x: x[:10] )
tokens['tmp'] = tokens.token_id.apply(lambda x: x[:10] )
len(tokens)
@ -152,5 +146,6 @@ def update_token_ids():
df[df.collection == 'Galactic Punks']
print(df.groupby('collection').token_id.count() )
df.to_csv('./data/{}.csv'.format(c), index=False)
update_token_ids()
update_token_ids()
add_model_sales()

View File

@ -11,6 +11,10 @@ library(shinyjs)
require(dplyr)
library(htmlwidgets)
library(reactable)
# library(promises)
# library(future)
# plan(multisession)
plotly.style <- list(
plot_bgcolor = "rgba(0, 0, 0, 0)",

View File

@ -1,6 +1,8 @@
server <- function(input, output, session) {
load('data.Rdata')
metadata <- unique(attributes[, list(collection, feature_name, feature_value)])
SD_MULT = 3
SD_SCALE = 1.95
@ -56,6 +58,343 @@ server <- function(input, output, session) {
)
})
output$maxnftrankinput2 <- renderUI({
textInput(
inputId = 'maxnftrank2'
, label = NULL
, width = "100%"
)
})
output$minnftrankinput2 <- renderUI({
textInput(
inputId = 'minnftrank2'
, label = NULL
, width = "100%"
)
})
output$maxrarityrankinput2 <- renderUI({
textInput(
inputId = 'maxrarityrank2'
, label = NULL
, width = "100%"
)
})
output$minrarityrankinput2 <- renderUI({
textInput(
inputId = 'minrarityrank2'
, label = NULL
, width = "100%"
)
})
output$filter1select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 1) {
return(NULL)
}
name <- name[1]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter1'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter2select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 2) {
return(NULL)
}
name <- name[2]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter2'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter3select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 3) {
return(NULL)
}
name <- name[3]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter3'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter4select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 4) {
return(NULL)
}
name <- name[4]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter4'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter5select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 5) {
return(NULL)
}
name <- name[5]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter5'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter6select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 6) {
return(NULL)
}
name <- name[6]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter6'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter7select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 7) {
return(NULL)
}
name <- name[7]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter7'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter8select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 8) {
return(NULL)
}
name <- name[8]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter8'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter9select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 9) {
return(NULL)
}
name <- name[9]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter9'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter10select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 10) {
return(NULL)
}
name <- name[10]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter10'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter11select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 11) {
return(NULL)
}
name <- name[11]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter11'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter12select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 12) {
return(NULL)
}
name <- name[12]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter12'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter13select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 13) {
return(NULL)
}
name <- name[13]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter13'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter14select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 14) {
return(NULL)
}
name <- name[14]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter14'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter15select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 15) {
return(NULL)
}
name <- name[15]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter15'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter16select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 16) {
return(NULL)
}
name <- name[16]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter16'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter17select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 17) {
return(NULL)
}
name <- name[17]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter17'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$filter18select <- renderUI({
selected <- getCollection()
name <- getMetadataColumns()
if(length(name) < 18) {
return(NULL)
}
name <- name[18]
m <- metadata[ collection == eval(selected) & feature_name == eval(name) ]
choices <- c('Any', sort(m$feature_value))
selectInput(
inputId = 'filter18'
, label = NULL
, selected = 'Any'
, choices = choices
, width = "100%"
)
})
output$collectionselect <- renderUI({
choices <- sort(unique(pred_price$collection))
selectInput(
@ -128,7 +467,40 @@ server <- function(input, output, session) {
cur_0 <- pred_price[collection == eval(selected) ]
cur_1 <- cur_0[ token_id == eval(as.numeric(input$tokenid)) ]
if (nrow(cur_1)) {
t <- paste0("Market Rank #", format(cur_1$rk[1], big.mark=",")," / ",format(nrow(cur_0), big.mark=","))
t <- paste0("Deal Score Rank #", format(cur_1$rk[1], big.mark=",")," / ",format(nrow(cur_0), big.mark=","))
}
}
paste0(t)
})
output$salesAverage <- renderText({
data <- getSalesData()
t <- ''
if (nrow(data)) {
p <- format(round(mean(head(data$price, 100)), 1), big.mark=',')
f <- format(round(mean(head(data$vs_floor, 100)), 1), big.mark=',')
print('p')
print(p)
print(f)
t <- paste0(p, ' $SOL (+',f,' vs the floor)')
}
paste0(t)
})
output$rarityrank <- renderText({
id <- getTokenId()
selected <- getCollection()
chain <- getChain()
t <- ""
if( length(id) == 0 | length(selected) == 0 ) {
return(t)
}
if (!is.na(id) & !is.na(selected)) {
cur_0 <- pred_price[collection == eval(selected) ]
cur_1 <- cur_0[ token_id == eval(as.numeric(input$tokenid)) ]
if (nrow(cur_1)) {
a <- ifelse( chain == 'Solana', 'HowRare', 'NotFoundTerra' )
t <- paste0(a, " Rank #", format(cur_1$nft_rank[1], big.mark=",")," / ",format(nrow(cur_0), big.mark=","))
}
}
paste0(t)
@ -213,7 +585,7 @@ server <- function(input, output, session) {
return(head(attributes, 0))
}
cur <- attributes[ token_id == eval(as.numeric(id)) & collection == eval(selected) ]
# cur <- merge( cur, feature_values[collection == eval(selected), list(feature_name, feature_value, pred_vs_baseline, pct_vs_baseline) ], all.x=TRUE )
cur <- merge( cur, feature_values[collection == eval(selected), list(feature_name, feature_value, pct_vs_baseline) ], all.x=TRUE )
cur <- cur[order(rarity)]
# floor <- getFloors()[2]
# log_coef <- coefsdf[ collection == eval(selected) ]$log_coef[1]
@ -228,9 +600,9 @@ server <- function(input, output, session) {
# mult <- ratio / s
# cur[, pct_vs_baseline := pct_vs_baseline * eval(mult) ]
# }
cur[, vs_baseline := 0 ]
cur[, pred_vs_baseline := 0 ]
cur[, vs_baseline := 0 ]
# cur[, vs_baseline := 0 ]
# cur[, pred_vs_baseline := 0 ]
# cur[, vs_baseline := 0 ]
# cur[, vs_baseline := round((pred_vs_baseline * eval(lin_coef)) + (pct_vs_baseline * eval(floor) * eval(log_coef) ), 1) ]
# cur[, pred_vs_baseline := round(pred_vs_baseline, 1) ]
# cur[, vs_baseline := round(pred_vs_baseline + (pct_vs_baseline * eval(floor)), 1) ]
@ -246,7 +618,7 @@ server <- function(input, output, session) {
# reactable(data[, list( feature, value, rarity, vs_baseline, pred_vs_baseline, pct_vs_baseline )],
# data <- data[, list( feature, value, rarity, pct_vs_baseline )]
data <- data[, list( feature_name, feature_value, rarity )]
data <- data[, list( feature_name, feature_value, rarity, pct_vs_baseline )]
reactable(data,
defaultColDef = colDef(
headerStyle = list(background = "#10151A")
@ -256,16 +628,16 @@ server <- function(input, output, session) {
outlined = FALSE,
columns = list(
feature_name = colDef(name = "Attribute", align = "left"),
feature_value = colDef(name = "Value", align = "left"),
rarity = colDef(name = "Rarity", align = "left")
# pct_vs_baseline = colDef(
# name="Value", header=with_tooltip("Value", "The estimated price impact of this feature vs the floor")
# , html = TRUE
# , align = "left"
# , cell = function(x) {
# htmltools::tags$span(paste0('+', format(round(x*1000)/10, digits=4, decimal.mark=".", big.mark=","), '%'))
# }
# )
feature_value = colDef(name = "Name", align = "left"),
rarity = colDef(name = "Rarity", align = "left"),
pct_vs_baseline = colDef(
name="General Price Impact", header=with_tooltip("General Price Impact", "The estimated price impact of this feature vs the floor")
, html = TRUE
, align = "left"
, cell = function(x) {
htmltools::tags$span(paste0('+', format(round(x*1000)/10, digits=4, decimal.mark=".", big.mark=","), '%'))
}
)
)
)
})
@ -328,41 +700,221 @@ server <- function(input, output, session) {
)
})
output$salestable <- renderReactable({
getFilteredSalesData <- function(data, selected, val, i) {
if(length(val) > 0) {
if(val != 'Any') {
att <- getMetadataColumns()
if(length(att) >= i) {
att <- att[i]
include <- attributes[collection == eval(selected) & feature_name == eval(att) & feature_value == eval(val), list(token_id) ]
data <- merge(data, include)
}
}
}
return(data)
}
getSalesDataFn <- function(selected, sales, tokens, pred_price, attributes) {
data <- sales[ collection == eval(selected)]
m <- pred_price[collection == eval(selected), list(token_id, rk)]
data <- merge(data, m, all.x=TRUE)
data <- merge(data, tokens[collection == eval(selected), list(collection, token_id, image_url)], all.x=T )
data <- data[, list( token_id, image_url, block_timestamp, price, pred, mn_20, nft_rank, rk )]
data <- data[order(-block_timestamp)]
data[, vs_floor := pmax(0, price - mn_20) ]
m <- dcast(attributes[collection == eval(selected), list(token_id, feature_name, feature_value)], token_id ~ feature_name, value.var='feature_value')
names <- colnames(m)
data <- merge(data, m, all.x=TRUE)
data <- data[order(-block_timestamp)]
data[, mn_20 := pmin(mn_20, price) ]
data[, mn_20_label := paste0(format(round(mn_20, 1), scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
data[, price_label := paste0(format(price, scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
data[, block_timestamp := substr(block_timestamp, 1, 10) ]
return(data)
}
getSalesData <- reactive({
selected <- getCollection()
if( length(selected) == 0 ) {
return(NULL)
}
# data <- sales[ collection == eval(selected) , list( token_id, block_timestamp, price, pred, mn_20 )]
data <- sales[ collection == eval(selected) , list( token_id, block_timestamp, price )]
data[, price := paste0(format(price, scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
# data[, pred := paste0(format(round(pred, 1), scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
data <- sales[ collection == eval(selected)]
m <- pred_price[collection == eval(selected), list(token_id, rk)]
data <- merge(data, m, all.x=TRUE)
m <- dcast(attributes[collection == eval(selected), list(token_id, feature_name, clean_name)], token_id ~ feature_name, value.var='clean_name')
data <- merge(data, m, all.x=TRUE)
if(input$maxnftrank2 != '') {
r <- as.numeric(input$maxnftrank2)
data <- data[ rk <= eval(r) ]
}
if(input$minnftrank2 != '') {
data <- data[ rk >= eval(as.numeric(input$minnftrank2)) ]
}
if(input$maxrarityrank2 != '') {
r <- as.numeric(input$maxrarityrank2)
data <- data[ nft_rank <= eval(r) ]
}
if(input$minrarityrank2 != '') {
data <- data[ nft_rank >= eval(as.numeric(input$minrarityrank2)) ]
}
data <- getFilteredSalesData(data, selected, input$filter1, 1)
data <- getFilteredSalesData(data, selected, input$filter2, 2)
data <- getFilteredSalesData(data, selected, input$filter3, 3)
data <- getFilteredSalesData(data, selected, input$filter4, 4)
data <- getFilteredSalesData(data, selected, input$filter5, 5)
data <- getFilteredSalesData(data, selected, input$filter6, 6)
data <- getFilteredSalesData(data, selected, input$filter7, 7)
data <- getFilteredSalesData(data, selected, input$filter8, 8)
data <- getFilteredSalesData(data, selected, input$filter9, 9)
data <- getFilteredSalesData(data, selected, input$filter10, 10)
data <- getFilteredSalesData(data, selected, input$filter11, 11)
data <- getFilteredSalesData(data, selected, input$filter12, 12)
data <- getFilteredSalesData(data, selected, input$filter13, 13)
data <- getFilteredSalesData(data, selected, input$filter14, 14)
data <- getFilteredSalesData(data, selected, input$filter15, 15)
data <- getFilteredSalesData(data, selected, input$filter16, 16)
data <- getFilteredSalesData(data, selected, input$filter17, 17)
data <- getFilteredSalesData(data, selected, input$filter18, 18)
data <- getFilteredSalesData(data, selected, input$filter19, 19)
data <- getFilteredSalesData(data, selected, input$filter20, 20)
data <- data[order(-block_timestamp)]
data <- merge(data, tokens[collection == eval(selected), list(collection, token_id, image_url)], all.x=T )
data <- data[, list( token_id, image_url, block_timestamp, price, pred, mn_20, nft_rank, rk )]
reactable(data,
defaultColDef = colDef(
headerStyle = list(background = "#10151A")
),
filterable = TRUE,
borderless = TRUE,
outlined = FALSE,
searchable = FALSE,
columns = list(
token_id = colDef(name = "Token ID", align = "left"),
block_timestamp = colDef(name = "Sale Date", align = "left"),
price = colDef(name = "Price", align = "left"),
# pred = colDef(name = "Fair Market Price", align = "left"),
rk = colDef(name = "DS Rank", align = "left")
# mn_20 = colDef(name = "Floor Price", align = "left")
)
)
data <- data[order(-block_timestamp)]
data[, vs_floor := pmax(0, price - mn_20) ]
m <- dcast(attributes[collection == eval(selected), list(token_id, feature_name, feature_value)], token_id ~ feature_name, value.var='feature_value')
names <- colnames(m)
data <- merge(data, m, all.x=TRUE)
data <- data[order(-block_timestamp)]
data[, mn_20 := pmin(mn_20, price) ]
data[, mn_20_label := paste0(format(round(mn_20, 1), scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
data[, price_label := paste0(format(price, scientific = FALSE, digits=2, decimal.mark=".", big.mark=","))]
data[, block_timestamp := substr(block_timestamp, 1, 10) ]
return(data)
})
getMetadataColumns <- reactive({
selected <- getCollection()
m <- unique(metadata[ collection == eval(selected), list(feature_name) ])
names <- sort(m$feature_name)
return(names)
})
getFilterText <- function(i) {
t <- ''
m <- getMetadataColumns()
if(length(m) >= i) {
t <- m[i]
}
return(t)
}
output$filter1 <- renderText({
paste0(getFilterText(1))
})
output$filter2 <- renderText({
paste0(getFilterText(2))
})
output$filter3 <- renderText({
paste0(getFilterText(3))
})
output$filter4 <- renderText({
paste0(getFilterText(4))
})
output$filter5 <- renderText({
paste0(getFilterText(5))
})
output$filter6 <- renderText({
paste0(getFilterText(6))
})
output$filter7 <- renderText({
paste0(getFilterText(7))
})
output$filter8 <- renderText({
paste0(getFilterText(8))
})
output$filter9 <- renderText({
paste0(getFilterText(9))
})
output$filter10 <- renderText({
paste0(getFilterText(10))
})
output$filter11 <- renderText({
paste0(getFilterText(11))
})
output$filter12 <- renderText({
paste0(getFilterText(12))
})
output$filter13 <- renderText({
paste0(getFilterText(13))
})
output$filter14 <- renderText({
paste0(getFilterText(14))
})
output$filter15 <- renderText({
paste0(getFilterText(15))
})
output$filter16 <- renderText({
paste0(getFilterText(16))
})
output$filter17 <- renderText({
paste0(getFilterText(17))
})
output$filter18 <- renderText({
paste0(getFilterText(18))
})
output$filter19 <- renderText({
paste0(getFilterText(19))
})
output$filter20 <- renderText({
paste0(getFilterText(20))
})
output$salestable <- renderReactable({
selected <- getCollection()
if( length(selected) == 0 ) {
return(NULL)
}
# data <- future(getSalesData()) %...>% head() %>% print()
data <- getSalesData()
# data <- future(getSalesDataFn(selected, sales, tokens, pred_price, attributes)) %...>%
reactable(data,
defaultColDef = colDef(
headerStyle = list(background = "#10151A")
),
# filterable = TRUE,
borderless = TRUE,
outlined = FALSE,
searchable = FALSE,
columns = list(
token_id = colDef(name = "Token ID", align = "left"),
image_url = colDef(name = "Token", align = "left", cell = function(value, index) {
if(index <= 100) {
htmltools::tags$img(src=value)
} else {
return(NULL)
}
}),
block_timestamp = colDef(name = "Sale Date", align = "left"),
price_label = colDef(name = "Price", align = "left"),
pred = colDef(name = "Fair Market Price", align = "left"),
rk = colDef(name = "Deal Score Rank", align = "left"),
nft_rank = colDef(name = "Rarity Rank", align = "left"),
mn_20_label = colDef(name = "Floor Price", align = "left")
)
)
})
getPriceDistributionData <- reactive({
@ -538,6 +1090,9 @@ server <- function(input, output, session) {
df <- merge(df, tokens[collection == eval(selected), list(collection, token_id, image_url)] )
tuple <- getConvertedPrice()
floors <- getFloors()
print('getListingData')
print(tuple)
print(floors)
df[, pred_price_0 := pred_price ]
df[, pred_price := pred_price + eval(tuple[1]) + ( eval(tuple[2]) * pred_price / eval(floors[1]) ) ]
@ -550,7 +1105,7 @@ server <- function(input, output, session) {
df[, pred_price := paste0(format(round(pred_price, 1), digits=3, decimal.mark=".", big.mark=",")) ]
df <- df[, list(image_url, token_id, price, pred_price, deal_score, rk)]
m <- dcast(attributes[collection == eval(selected)], collection + token_id ~ feature_name, value.var='clean_name')
m <- dcast(attributes[collection == eval(selected)], collection + token_id ~ feature_name, value.var='feature_value')
df <- merge(df, m, all.x=TRUE)
df[, collection := NULL]
df <- df[order(-deal_score)]
@ -564,6 +1119,8 @@ server <- function(input, output, session) {
if( nrow(df) == 0 ) {
return(NULL)
}
print('head(df)')
print(head(df))
df <- df[ deal_score >= 10 ]
df[, hover_text := paste0('<b>#',token_id,'</b><br>Listing Price: ',price,'<br>Fair Market Price: ',pred_price,'<br>Deal Score: ',deal_score) ]
f <- min(df[price > 0]$price)
@ -706,7 +1263,10 @@ server <- function(input, output, session) {
if (name == 'solana-monkey-business') name <- 'smb'
if (name == 'degen-ape-academy') name <- 'degenapes'
href <- paste0('https://howrare.is/',name,'/',id)
url <- span("*Rarity from ", a("howrare.is", href=href)," used in the model")
cur_0 <- pred_price[collection == eval(selected) ]
cur_1 <- cur_0[ token_id == eval(as.numeric(input$tokenid)) ]
url <- span("*Rarity from ", a("howrare.is", href=href),paste0(" (rank #",format(cur_1$nft_rank[1], big.mark = ','),") used in the model"))
HTML(paste(url))
})
@ -755,14 +1315,18 @@ server <- function(input, output, session) {
filterable = TRUE,
outlined = FALSE,
columns = list(
image_url = colDef(name = "Token", align = "left", cell = function(value) {
htmltools::tags$img(src=value)
image_url = colDef(name = "Token", align = "left", cell = function(value, index) {
if(index <= 100) {
htmltools::tags$img(src=value)
} else {
return(NULL)
}
}),
token_id = colDef(name = "Token ID", align = "left"),
price = colDef(name = "Listed Price", align = "left"),
pred_price = colDef(name = "Fair Market Price", align = "left"),
deal_score = colDef(name = "Deal Score", align = "left"),
rk = colDef(name = "Market Rank", align = "left")
rk = colDef(name = "Deal Score Rank", align = "left")
),
searchable = FALSE
)

132
viz/ui.R
View File

@ -78,13 +78,16 @@ fluidPage(
div(class = "subtitle", textOutput("tokenrank", inline=TRUE), icon(class="padding-left-5", id="rank-tooltip", "info-circle") )
, bsTooltip(id = "rank-tooltip", title = "Dynamic value rank based on the estimated fair market price modeled from historical sales. Model and rank will update periodically as we get more sales data.", placement = "bottom", trigger = "hover")
)
, div(
div(class = "subtitle", textOutput("rarityrank", inline=TRUE))
)
, div(class = "link", uiOutput('randomearthurl'))
)
, fluidRow(
column(6
column(5
, div(class = "token-img", uiOutput("tokenimg"))
)
, column(6, div(
, column(7, div(
class = "table"
, reactableOutput("attributestable")
, bsTooltip(id = "value-tooltip", title = "Represents the dollar impact this feature has on the price vs the floor", placement = "bottom", trigger = "hover")
@ -110,12 +113,11 @@ fluidPage(
, div(class='description', 'Click a dot to select the token')
)
, fluidRow(
column(4
class = 'filters'
, column(4
, div(
class = "inputtitle"
, "Max Price"
# , icon(id="floor-price-tooltip", "info-circle")
# , bsTooltip(id = "floor-price-tooltip", title = "Update this number to the current floor price of the collection, which will update the rest of the numbers on this page", placement = "bottom", trigger = "hover")
)
, fluidRow(uiOutput("maxpriceinput"))
)
@ -135,6 +137,126 @@ fluidPage(
class="grey8row"
, h2("Historical Sales", icon(class="padding-left-10", id="historical-sales-tooltip", "info-circle"))
, bsTooltip(id = "historical-sales-tooltip", title = "This app is still in beta - sales data may be incomplete or delayed", placement = "bottom", trigger = "hover")
, fluidRow(
class = 'filters'
, column(3
, div(
class = "inputtitle"
, "Min Deal Score Rank"
)
, fluidRow(uiOutput("minnftrankinput2"))
)
, column(3
, div(
class = "inputtitle"
, "Max Deal Score Rank"
)
, fluidRow(uiOutput("maxnftrankinput2"))
)
, column(3
, div(
class = "inputtitle"
, "Min Rarity Rank"
)
, fluidRow(uiOutput("minrarityrankinput2"))
)
, column(3
, div(
class = "inputtitle"
, "Max Rarity Rank"
)
, fluidRow(uiOutput("maxrarityrankinput2"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter1', inline=TRUE)
)
, fluidRow(uiOutput("filter1select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter2', inline=TRUE)
)
, fluidRow(uiOutput("filter2select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter3', inline=TRUE)
)
, fluidRow(uiOutput("filter3select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter4', inline=TRUE)
)
, fluidRow(uiOutput("filter4select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter5', inline=TRUE)
)
, fluidRow(uiOutput("filter5select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter6', inline=TRUE)
)
, fluidRow(uiOutput("filter6select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter7', inline=TRUE)
)
, fluidRow(uiOutput("filter7select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter8', inline=TRUE)
)
, fluidRow(uiOutput("filter8select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter9', inline=TRUE)
)
, fluidRow(uiOutput("filter9select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter10', inline=TRUE)
)
, fluidRow(uiOutput("filter10select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter11', inline=TRUE)
)
, fluidRow(uiOutput("filter11select"))
)
, column(3
, div(
class = "inputtitle"
, textOutput('filter12', inline=TRUE)
)
, fluidRow(uiOutput("filter12select"))
)
)
, div(
class = 'padding-bottom-1'
, 'Average from most recent 100 sales using these filters: '
, textOutput('salesAverage', inline=TRUE)
)
, div(class = "table", reactableOutput("salestable"))
, div(class = "description", 'This app is still in beta - sales data may be incomplete or delayed')
)

View File

@ -232,6 +232,32 @@ input[type=number] {
}
/*******************/
/* Filters */
/*******************/
.filters > div > .inputtitle {
font-weight: 100;
font-size: 1.25rem;
padding: 0;
margin: 0;
}
.filters > div > .row > div > .form-group {
padding: 0 1rem 1rem 0;
margin: 0;
}
.filters .form-control, .filters .selectize-input > *, .filters .selectize-dropdown > * {
font-weight: 100;
font-size: 1.25rem;
}
.filters > div {
padding: 0;
margin: 0;
}
.filter:first-child() {
padding-left: 0;
}
/***********************/
/* React Table */
/***********************/
@ -298,6 +324,9 @@ tr {
/*******************/
/* General */
/*******************/
.padding-bottom-1 {
padding-bottom: 1rem;
}
.row {
margin: 0;
}