rosacastillo
commited on
Commit
·
dca94fa
1
Parent(s):
7652a7b
updating dashboard data. Migrating live data
Browse files- data/all_trades_profitability.parquet +2 -2
- data/delivers.parquet +2 -2
- data/fpmmTrades.parquet +2 -2
- data/fpmms.parquet +2 -2
- data/invalid_trades.parquet +2 -2
- data/requests.parquet +2 -2
- data/summary_profitability.parquet +2 -2
- data/t_map.pkl +2 -2
- data/tools.parquet +2 -2
- data/tools_accuracy.csv +2 -2
- live_data/analysis_of_markets_data.ipynb +0 -0
- live_data/markets_live_data.parquet +0 -3
- live_data/markets_live_data_old.parquet +0 -3
- scripts/live_markets_data.py +0 -276
- scripts/live_traders_data.py +0 -153
- scripts/live_utils.py +0 -14
- scripts/queries.py +0 -79
data/all_trades_profitability.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1bb84f24b4dd4bef131f6f302ee4511e38ca6a02bf5991adf6dcf1333adee17
|
3 |
+
size 1446213
|
data/delivers.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0910344b2ea12c551d29517706dbc173095ac3c209e87db052a85f61774eb285
|
3 |
+
size 447542653
|
data/fpmmTrades.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f70bf0db5698f72afc40b17ddfcbcc129889a64e5b3f322f74c981d3a98b857d
|
3 |
+
size 4833072
|
data/fpmms.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bf77a20572a7eda2aa3487ec6828f3e56013e9a654a4007eae65a5f40a779175
|
3 |
+
size 368370
|
data/invalid_trades.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2cb0cfde1ef4547862cfe2efcdc494dba4069ec0124a5acc749c48007e4dd897
|
3 |
+
size 244868
|
data/requests.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac95eb71f26583a0c4a44703c757af665ae5ca243b3682f0828c96c2b9a7ace5
|
3 |
+
size 13437238
|
data/summary_profitability.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76bc2e023eac09adc81cc337c46f023273a8aa310aed018f15506b3ba00a5d7e
|
3 |
+
size 37419
|
data/t_map.pkl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:795ffdc54398c79a7ed3dbf7ede11730a1fcc665098aa9f1ad6fefebb6b64ac6
|
3 |
+
size 11186989
|
data/tools.parquet
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2b8f1a48a72e24c7486c7fa20895fd4b8566a4c1b3e5f00f8eab0b9e71ce8e0d
|
3 |
+
size 448725346
|
data/tools_accuracy.csv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d8afd4ddbff90b4232e8883484a7ffb188df18c01d667ea5dcdc4690bfce7914
|
3 |
+
size 1007
|
live_data/analysis_of_markets_data.ipynb
DELETED
The diff for this file is too large to render.
See raw diff
|
|
live_data/markets_live_data.parquet
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:797e30e861c7468a5cf840a23b560d66bb9a1103d5bdae2cd86a485a43592e6e
|
3 |
-
size 21197
|
|
|
|
|
|
|
|
live_data/markets_live_data_old.parquet
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:362255753caee81b4453f2d9c0fcf5870c770c3cf32d09c15ae6a571692a6874
|
3 |
-
size 19052
|
|
|
|
|
|
|
|
scripts/live_markets_data.py
DELETED
@@ -1,276 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# ------------------------------------------------------------------------------
|
3 |
-
#
|
4 |
-
# Copyright 2024 Valory AG
|
5 |
-
#
|
6 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
-
# you may not use this file except in compliance with the License.
|
8 |
-
# You may obtain a copy of the License at
|
9 |
-
#
|
10 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
-
#
|
12 |
-
# Unless required by applicable law or agreed to in writing, software
|
13 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
-
# See the License for the specific language governing permissions and
|
16 |
-
# limitations under the License.
|
17 |
-
#
|
18 |
-
# ------------------------------------------------------------------------------
|
19 |
-
|
20 |
-
import functools
|
21 |
-
import warnings
|
22 |
-
from typing import Optional, Generator, Callable
|
23 |
-
import os
|
24 |
-
import pandas as pd
|
25 |
-
from datetime import datetime, timedelta, UTC
|
26 |
-
import requests
|
27 |
-
from tqdm import tqdm
|
28 |
-
from typing import List, Dict
|
29 |
-
from live_traders_data import add_trading_info
|
30 |
-
from utils import SUBGRAPH_API_KEY, measure_execution_time
|
31 |
-
from live_utils import OMEN_SUBGRAPH_URL, CREATOR, BATCH_SIZE, DATA_DIR
|
32 |
-
from queries import (
|
33 |
-
FPMMS_WITH_TOKENS_QUERY,
|
34 |
-
ID_FIELD,
|
35 |
-
DATA_FIELD,
|
36 |
-
ANSWER_FIELD,
|
37 |
-
ANSWER_TIMESTAMP_FIELD,
|
38 |
-
QUERY_FIELD,
|
39 |
-
TITLE_FIELD,
|
40 |
-
OUTCOMES_FIELD,
|
41 |
-
OPENING_TIMESTAMP_FIELD,
|
42 |
-
CREATION_TIMESTAMP_FIELD,
|
43 |
-
LIQUIDITY_FIELD,
|
44 |
-
LIQUIDIY_MEASURE_FIELD,
|
45 |
-
TOKEN_AMOUNTS_FIELD,
|
46 |
-
ERROR_FIELD,
|
47 |
-
QUESTION_FIELD,
|
48 |
-
FPMMS_FIELD,
|
49 |
-
)
|
50 |
-
|
51 |
-
|
52 |
-
ResponseItemType = List[Dict[str, str]]
|
53 |
-
SubgraphResponseType = Dict[str, ResponseItemType]
|
54 |
-
|
55 |
-
|
56 |
-
class RetriesExceeded(Exception):
|
57 |
-
"""Exception to raise when retries are exceeded during data-fetching."""
|
58 |
-
|
59 |
-
def __init__(
|
60 |
-
self, msg="Maximum retries were exceeded while trying to fetch the data!"
|
61 |
-
):
|
62 |
-
super().__init__(msg)
|
63 |
-
|
64 |
-
|
65 |
-
def hacky_retry(func: Callable, n_retries: int = 3) -> Callable:
|
66 |
-
"""Create a hacky retry strategy.
|
67 |
-
Unfortunately, we cannot use `requests.packages.urllib3.util.retry.Retry`,
|
68 |
-
because the subgraph does not return the appropriate status codes in case of failure.
|
69 |
-
Instead, it always returns code 200. Thus, we raise exceptions manually inside `make_request`,
|
70 |
-
catch those exceptions in the hacky retry decorator and try again.
|
71 |
-
Finally, if the allowed number of retries is exceeded, we raise a custom `RetriesExceeded` exception.
|
72 |
-
|
73 |
-
:param func: the input request function.
|
74 |
-
:param n_retries: the maximum allowed number of retries.
|
75 |
-
:return: The request method with the hacky retry strategy applied.
|
76 |
-
"""
|
77 |
-
|
78 |
-
@functools.wraps(func)
|
79 |
-
def wrapper_hacky_retry(*args, **kwargs) -> SubgraphResponseType:
|
80 |
-
"""The wrapper for the hacky retry.
|
81 |
-
|
82 |
-
:return: a response dictionary.
|
83 |
-
"""
|
84 |
-
retried = 0
|
85 |
-
|
86 |
-
while retried <= n_retries:
|
87 |
-
try:
|
88 |
-
if retried > 0:
|
89 |
-
warnings.warn(f"Retrying {retried}/{n_retries}...")
|
90 |
-
|
91 |
-
return func(*args, **kwargs)
|
92 |
-
except (ValueError, ConnectionError) as e:
|
93 |
-
warnings.warn(e.args[0])
|
94 |
-
finally:
|
95 |
-
retried += 1
|
96 |
-
|
97 |
-
raise RetriesExceeded()
|
98 |
-
|
99 |
-
return wrapper_hacky_retry
|
100 |
-
|
101 |
-
|
102 |
-
@hacky_retry
|
103 |
-
def query_subgraph(url: str, query: str, key: str) -> SubgraphResponseType:
|
104 |
-
"""Query a subgraph.
|
105 |
-
|
106 |
-
Args:
|
107 |
-
url: the subgraph's URL.
|
108 |
-
query: the query to be used.
|
109 |
-
key: the key to use in order to access the required data.
|
110 |
-
|
111 |
-
Returns:
|
112 |
-
a response dictionary.
|
113 |
-
"""
|
114 |
-
content = {QUERY_FIELD: query}
|
115 |
-
headers = {
|
116 |
-
"Accept": "application/json",
|
117 |
-
"Content-Type": "application/json",
|
118 |
-
}
|
119 |
-
res = requests.post(url, json=content, headers=headers)
|
120 |
-
|
121 |
-
if res.status_code != 200:
|
122 |
-
raise ConnectionError(
|
123 |
-
"Something went wrong while trying to communicate with the subgraph "
|
124 |
-
f"(Error: {res.status_code})!\n{res.text}"
|
125 |
-
)
|
126 |
-
|
127 |
-
body = res.json()
|
128 |
-
if ERROR_FIELD in body.keys():
|
129 |
-
raise ValueError(f"The given query is not correct: {body[ERROR_FIELD]}")
|
130 |
-
|
131 |
-
data = body.get(DATA_FIELD, {}).get(key, None)
|
132 |
-
if data is None:
|
133 |
-
raise ValueError(f"Unknown error encountered!\nRaw response: \n{body}")
|
134 |
-
|
135 |
-
return data
|
136 |
-
|
137 |
-
|
138 |
-
def fpmms_fetcher(current_timestamp: int) -> Generator[ResponseItemType, int, None]:
|
139 |
-
"""An indefinite fetcher for the FPMMs."""
|
140 |
-
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
141 |
-
print(f"omen_subgraph = {omen_subgraph}")
|
142 |
-
while True:
|
143 |
-
fpmm_id = yield
|
144 |
-
fpmms_query = FPMMS_WITH_TOKENS_QUERY.substitute(
|
145 |
-
creator=CREATOR,
|
146 |
-
fpmm_id=fpmm_id,
|
147 |
-
current_timestamp=current_timestamp,
|
148 |
-
fpmms_field=FPMMS_FIELD,
|
149 |
-
first=BATCH_SIZE,
|
150 |
-
id_field=ID_FIELD,
|
151 |
-
answer_timestamp_field=ANSWER_TIMESTAMP_FIELD,
|
152 |
-
question_field=QUESTION_FIELD,
|
153 |
-
outcomes_field=OUTCOMES_FIELD,
|
154 |
-
title_field=TITLE_FIELD,
|
155 |
-
opening_timestamp_field=OPENING_TIMESTAMP_FIELD,
|
156 |
-
creation_timestamp_field=CREATION_TIMESTAMP_FIELD,
|
157 |
-
liquidity_field=LIQUIDITY_FIELD,
|
158 |
-
liquidity_measure_field=LIQUIDIY_MEASURE_FIELD,
|
159 |
-
token_amounts_field=TOKEN_AMOUNTS_FIELD,
|
160 |
-
)
|
161 |
-
print(f"Executing query {fpmms_query}")
|
162 |
-
yield query_subgraph(omen_subgraph, fpmms_query, FPMMS_FIELD)
|
163 |
-
|
164 |
-
|
165 |
-
def fetch_fpmms(current_timestamp: int) -> pd.DataFrame:
|
166 |
-
"""Fetch all the fpmms of the creator."""
|
167 |
-
print("Fetching all markets")
|
168 |
-
latest_id = ""
|
169 |
-
fpmms = []
|
170 |
-
fetcher = fpmms_fetcher(current_timestamp)
|
171 |
-
for _ in tqdm(fetcher, unit="fpmms", unit_scale=BATCH_SIZE):
|
172 |
-
batch = fetcher.send(latest_id)
|
173 |
-
if len(batch) == 0:
|
174 |
-
print("no data")
|
175 |
-
break
|
176 |
-
|
177 |
-
# TODO Add the incremental batching system from market creator
|
178 |
-
# prev_fpmms is the previous local file with the markets
|
179 |
-
# for fpmm in batch:
|
180 |
-
# if fpmm["id"] not in fpmms or "trades" not in prev_fpmms[fpmm["id"]]:
|
181 |
-
# prev_fpmms[fpmm["id"]] = fpmm
|
182 |
-
print(f"length of the data received = {len(batch)}")
|
183 |
-
latest_id = batch[-1].get(ID_FIELD, "")
|
184 |
-
if latest_id == "":
|
185 |
-
raise ValueError(f"Unexpected data format retrieved: {batch}")
|
186 |
-
|
187 |
-
fpmms.extend(batch)
|
188 |
-
|
189 |
-
print("Finished collecting data")
|
190 |
-
return pd.DataFrame(fpmms)
|
191 |
-
|
192 |
-
|
193 |
-
def get_answer(fpmm: pd.Series) -> str:
|
194 |
-
"""Get an answer from its index, using Series of an FPMM."""
|
195 |
-
return fpmm[QUESTION_FIELD][OUTCOMES_FIELD][fpmm[ANSWER_FIELD]]
|
196 |
-
|
197 |
-
|
198 |
-
def get_first_token_perc(row):
|
199 |
-
if row["total_tokens"] == 0.0:
|
200 |
-
return 0
|
201 |
-
return round((row["token_first_amount"] / row["total_tokens"]) * 100, 2)
|
202 |
-
|
203 |
-
|
204 |
-
def get_second_token_perc(row):
|
205 |
-
if row["total_tokens"] == 0.0:
|
206 |
-
return 0
|
207 |
-
return round((row["token_second_amount"] / row["total_tokens"]) * 100, 2)
|
208 |
-
|
209 |
-
|
210 |
-
def transform_fpmms(fpmms: pd.DataFrame, filename: str, current_timestamp: int) -> None:
|
211 |
-
"""Transform an FPMMS dataframe."""
|
212 |
-
|
213 |
-
# prepare the new ones
|
214 |
-
# Add current timestamp
|
215 |
-
fpmms["tokens_timestamp"] = current_timestamp
|
216 |
-
fpmms["open"] = True
|
217 |
-
|
218 |
-
# computation of token distributions
|
219 |
-
fpmms["token_first_amount"] = fpmms.outcomeTokenAmounts.apply(lambda x: int(x[0]))
|
220 |
-
fpmms["token_second_amount"] = fpmms.outcomeTokenAmounts.apply(lambda x: int(x[1]))
|
221 |
-
fpmms["total_tokens"] = fpmms.apply(
|
222 |
-
lambda x: x.token_first_amount + x.token_second_amount, axis=1
|
223 |
-
)
|
224 |
-
fpmms["first_token_perc"] = fpmms.apply(lambda x: get_first_token_perc(x), axis=1)
|
225 |
-
fpmms["second_token_perc"] = fpmms.apply(lambda x: get_second_token_perc(x), axis=1)
|
226 |
-
fpmms.drop(
|
227 |
-
columns=["token_first_amount", "token_second_amount", "total_tokens"],
|
228 |
-
inplace=True,
|
229 |
-
)
|
230 |
-
# previous file to update?
|
231 |
-
old_fpmms = None
|
232 |
-
if os.path.exists(DATA_DIR / filename):
|
233 |
-
old_fpmms = pd.read_parquet(DATA_DIR / filename)
|
234 |
-
|
235 |
-
if old_fpmms is not None:
|
236 |
-
# update which markets are not open anymore
|
237 |
-
open_markets = list(fpmms.id.unique())
|
238 |
-
print("Updating market status of old markets")
|
239 |
-
open_mask = old_fpmms["id"].isin(open_markets)
|
240 |
-
old_fpmms.loc[~open_mask, "status"] = False
|
241 |
-
|
242 |
-
# now concatenate
|
243 |
-
print("Appending new data to previous data")
|
244 |
-
fpmms = pd.concat([old_fpmms, fpmms], ignore_index=True)
|
245 |
-
# fpmms.drop_duplicates(inplace=True)
|
246 |
-
|
247 |
-
return
|
248 |
-
|
249 |
-
|
250 |
-
@measure_execution_time
|
251 |
-
def compute_distributions(filename: Optional[str]) -> pd.DataFrame:
|
252 |
-
"""Fetch, process, store and return the markets as a Dataframe."""
|
253 |
-
|
254 |
-
print("fetching new markets information")
|
255 |
-
current_timestamp = int(datetime.now(UTC).timestamp())
|
256 |
-
fpmms = fetch_fpmms(current_timestamp)
|
257 |
-
print(fpmms.head())
|
258 |
-
|
259 |
-
print("transforming and updating previous data")
|
260 |
-
|
261 |
-
transform_fpmms(fpmms, filename, current_timestamp)
|
262 |
-
print(fpmms.head())
|
263 |
-
|
264 |
-
# WIP
|
265 |
-
# print("Adding trading information")
|
266 |
-
add_trading_info(fpmms)
|
267 |
-
print("saving the data")
|
268 |
-
print(fpmms.info())
|
269 |
-
if filename:
|
270 |
-
fpmms.to_parquet(DATA_DIR / filename, index=False)
|
271 |
-
|
272 |
-
return fpmms
|
273 |
-
|
274 |
-
|
275 |
-
if __name__ == "__main__":
|
276 |
-
compute_distributions("markets_live_data.parquet")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/live_traders_data.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import os
|
3 |
-
from datetime import datetime, timedelta, UTC
|
4 |
-
import pandas as pd
|
5 |
-
from collections import defaultdict
|
6 |
-
from typing import Any, Optional
|
7 |
-
from tqdm import tqdm
|
8 |
-
from live_utils import OMEN_SUBGRAPH_URL, CREATOR, BATCH_SIZE, DATA_DIR
|
9 |
-
from utils import SUBGRAPH_API_KEY, _to_content
|
10 |
-
from queries import omen_trader_votes_query
|
11 |
-
|
12 |
-
|
13 |
-
headers = {
|
14 |
-
"Accept": "application/json, multipart/mixed",
|
15 |
-
"Content-Type": "application/json",
|
16 |
-
}
|
17 |
-
|
18 |
-
|
19 |
-
def _query_omen_xdai_subgraph(
|
20 |
-
fpmm_id: str,
|
21 |
-
) -> dict[str, Any]:
|
22 |
-
"""Query the subgraph."""
|
23 |
-
omen_subgraph = OMEN_SUBGRAPH_URL.substitute(subgraph_api_key=SUBGRAPH_API_KEY)
|
24 |
-
print(f"omen_subgraph = {omen_subgraph}")
|
25 |
-
grouped_results = defaultdict(list)
|
26 |
-
id_gt = ""
|
27 |
-
|
28 |
-
while True:
|
29 |
-
query = omen_trader_votes_query.substitute(
|
30 |
-
fpmm_creator=CREATOR.lower(),
|
31 |
-
first=BATCH_SIZE,
|
32 |
-
id_gt=id_gt,
|
33 |
-
fpmm_id=fpmm_id,
|
34 |
-
)
|
35 |
-
print(f"query for the omen to collect trades {query}")
|
36 |
-
content_json = _to_content(query)
|
37 |
-
|
38 |
-
res = requests.post(omen_subgraph, headers=headers, json=content_json)
|
39 |
-
result_json = res.json()
|
40 |
-
# print(f"result = {result_json}")
|
41 |
-
user_trades = result_json.get("data", {}).get("fpmmTrades", [])
|
42 |
-
|
43 |
-
if not user_trades:
|
44 |
-
break
|
45 |
-
|
46 |
-
for trade in user_trades:
|
47 |
-
fpmm_id = trade.get("fpmm", {}).get("id")
|
48 |
-
grouped_results[fpmm_id].append(trade)
|
49 |
-
|
50 |
-
id_gt = user_trades[len(user_trades) - 1]["id"]
|
51 |
-
|
52 |
-
all_results = {
|
53 |
-
"data": {
|
54 |
-
"fpmmTrades": [
|
55 |
-
trade
|
56 |
-
for trades_list in grouped_results.values()
|
57 |
-
for trade in trades_list
|
58 |
-
]
|
59 |
-
}
|
60 |
-
}
|
61 |
-
|
62 |
-
return all_results
|
63 |
-
|
64 |
-
|
65 |
-
def transform_trades(trades_json: dict) -> pd.DataFrame:
|
66 |
-
# convert to dataframe
|
67 |
-
print("transforming trades")
|
68 |
-
df = pd.DataFrame(trades_json["data"]["fpmmTrades"])
|
69 |
-
if len(df) == 0:
|
70 |
-
print("No trades for this market")
|
71 |
-
return df
|
72 |
-
|
73 |
-
# print(df.info())
|
74 |
-
|
75 |
-
# convert creator to address
|
76 |
-
df["trade_creator"] = df["creator"].apply(lambda x: x["id"])
|
77 |
-
|
78 |
-
# normalize fpmm column
|
79 |
-
fpmm = pd.json_normalize(df["fpmm"])
|
80 |
-
fpmm.columns = [f"fpmm.{col}" for col in fpmm.columns]
|
81 |
-
df = pd.concat([df, fpmm], axis=1)
|
82 |
-
|
83 |
-
# drop fpmm column
|
84 |
-
df.drop(["fpmm"], axis=1, inplace=True)
|
85 |
-
|
86 |
-
# convert into int
|
87 |
-
df.outcomeIndex = pd.to_numeric(df.outcomeIndex, errors="coerce")
|
88 |
-
return df
|
89 |
-
|
90 |
-
|
91 |
-
def compute_from_timestamp_value(
|
92 |
-
fpmm_id: str, opening_timestamp: int, fpmms: pd.DataFrame
|
93 |
-
) -> Optional[int]:
|
94 |
-
"""Function to find the latest timestamp registered for a specific market"""
|
95 |
-
try:
|
96 |
-
market_data = fpmms.loc[fpmms["id"] == fpmm_id]
|
97 |
-
# how many previous samples do we have?
|
98 |
-
if len(market_data) == 1:
|
99 |
-
# take the opening Timestamp of the Market
|
100 |
-
return opening_timestamp
|
101 |
-
timestamps = (market_data.tokens_timestamp.values).sort()
|
102 |
-
# the last value is the current timestamp so we need to take the previous one
|
103 |
-
return timestamps[-2]
|
104 |
-
except Exception as e:
|
105 |
-
print(
|
106 |
-
f"Error when trying to get the from timestamp value of the market id {fpmm_id}"
|
107 |
-
)
|
108 |
-
return None
|
109 |
-
|
110 |
-
|
111 |
-
def compute_votes_distribution(market_trades: pd.DataFrame):
|
112 |
-
"""Function to compute the distribution of votes for the trades of a market"""
|
113 |
-
total_trades = len(market_trades)
|
114 |
-
print(f"The total number of trades is {total_trades}")
|
115 |
-
# outcomeIndex is always 1 or 0?
|
116 |
-
sum_outcome_index_1 = sum(market_trades.outcomeIndex)
|
117 |
-
print(f"The total number of votes for index 1 is {sum_outcome_index_1}")
|
118 |
-
percentage_index_1 = round((sum_outcome_index_1 / total_trades) * 100, 2)
|
119 |
-
return (100 - percentage_index_1), percentage_index_1
|
120 |
-
|
121 |
-
|
122 |
-
def add_trading_info(fpmms: pd.DataFrame) -> None:
|
123 |
-
# Iterate over the markets
|
124 |
-
print("Adding votes distribution per market")
|
125 |
-
fpmms["votes_first_outcome_perc"] = 0.0
|
126 |
-
fpmms["votes_second_outcome_perc"] = 0.0
|
127 |
-
for i, fpmm in tqdm(fpmms.iterrows(), total=len(fpmms), desc="Analysing trades"):
|
128 |
-
# read trades from latest read timestamp
|
129 |
-
market_id = fpmm["id"]
|
130 |
-
print(f"Adding information for the market {market_id}")
|
131 |
-
market_trades_json = _query_omen_xdai_subgraph(
|
132 |
-
fpmm_id=market_id,
|
133 |
-
)
|
134 |
-
market_trades = transform_trades(market_trades_json)
|
135 |
-
if len(market_trades) == 0:
|
136 |
-
continue
|
137 |
-
# to compute the votes distribution
|
138 |
-
print("Computing the votes distribution")
|
139 |
-
first_outcome, second_outcome = compute_votes_distribution(market_trades)
|
140 |
-
print(
|
141 |
-
f"first outcome votes ={first_outcome}, second outcome votes = {second_outcome}"
|
142 |
-
)
|
143 |
-
fpmms.loc[fpmms["id"] == market_id, "votes_first_outcome_perc"] = first_outcome
|
144 |
-
fpmms.loc[fpmms["id"] == market_id, "votes_second_outcome_perc"] = (
|
145 |
-
second_outcome
|
146 |
-
)
|
147 |
-
print("Dataset after adding trading info")
|
148 |
-
print(fpmms.head())
|
149 |
-
return
|
150 |
-
|
151 |
-
|
152 |
-
if __name__ == "__main__":
|
153 |
-
print("collecting votes distribution")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/live_utils.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from string import Template
|
2 |
-
from pathlib import Path
|
3 |
-
|
4 |
-
|
5 |
-
CREATOR = "0x89c5cc945dd550BcFfb72Fe42BfF002429F46Fec"
|
6 |
-
BATCH_SIZE = 1000
|
7 |
-
# OMEN_SUBGRAPH = "https://api.thegraph.com/subgraphs/name/protofire/omen-xdai"
|
8 |
-
OMEN_SUBGRAPH_URL = Template(
|
9 |
-
"""https://gateway-arbitrum.network.thegraph.com/api/${subgraph_api_key}/subgraphs/id/9fUVQpFwzpdWS9bq5WkAnmKbNNcoBwatMR4yZq81pbbz"""
|
10 |
-
)
|
11 |
-
SCRIPTS_DIR = Path(__file__).parent
|
12 |
-
ROOT_DIR = SCRIPTS_DIR.parent
|
13 |
-
DATA_DIR = ROOT_DIR / "live_data"
|
14 |
-
MAX_UINT_HEX = "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
scripts/queries.py
CHANGED
@@ -59,39 +59,6 @@ FPMMS_QUERY = Template(
|
|
59 |
"""
|
60 |
)
|
61 |
|
62 |
-
FPMMS_WITH_TOKENS_QUERY = Template(
|
63 |
-
"""
|
64 |
-
{
|
65 |
-
${fpmms_field}(
|
66 |
-
where: {
|
67 |
-
creator: "${creator}",
|
68 |
-
id_gt: "${fpmm_id}",
|
69 |
-
isPendingArbitration: false
|
70 |
-
currentAnswer: null
|
71 |
-
openingTimestamp_gt:${current_timestamp}
|
72 |
-
},
|
73 |
-
orderBy: ${id_field}
|
74 |
-
orderDirection: asc
|
75 |
-
first: ${first}
|
76 |
-
){
|
77 |
-
${id_field}
|
78 |
-
${question_field} {
|
79 |
-
${outcomes_field}
|
80 |
-
${answer_timestamp_field}
|
81 |
-
answers{
|
82 |
-
answer
|
83 |
-
}
|
84 |
-
}
|
85 |
-
${title_field}
|
86 |
-
${opening_timestamp_field}
|
87 |
-
${creation_timestamp_field}
|
88 |
-
${liquidity_field}
|
89 |
-
${liquidity_measure_field}
|
90 |
-
${token_amounts_field}
|
91 |
-
}
|
92 |
-
}
|
93 |
-
"""
|
94 |
-
)
|
95 |
omen_xdai_trades_query = Template(
|
96 |
"""
|
97 |
{
|
@@ -145,52 +112,6 @@ omen_xdai_trades_query = Template(
|
|
145 |
"""
|
146 |
)
|
147 |
|
148 |
-
omen_trader_votes_query = Template(
|
149 |
-
"""
|
150 |
-
{
|
151 |
-
fpmmTrades(
|
152 |
-
where: {
|
153 |
-
type: Buy,
|
154 |
-
fpmm_: {
|
155 |
-
creator: "${fpmm_creator}",
|
156 |
-
id: "${fpmm_id}",
|
157 |
-
},
|
158 |
-
id_gt: "${id_gt}"
|
159 |
-
}
|
160 |
-
first: ${first}
|
161 |
-
orderBy: id
|
162 |
-
orderDirection: asc
|
163 |
-
) {
|
164 |
-
id
|
165 |
-
title
|
166 |
-
collateralToken
|
167 |
-
outcomeTokenMarginalPrice
|
168 |
-
oldOutcomeTokenMarginalPrice
|
169 |
-
type
|
170 |
-
creator {
|
171 |
-
id
|
172 |
-
}
|
173 |
-
creationTimestamp
|
174 |
-
collateralAmount
|
175 |
-
collateralAmountUSD
|
176 |
-
feeAmount
|
177 |
-
outcomeIndex
|
178 |
-
outcomeTokensTraded
|
179 |
-
transactionHash
|
180 |
-
fpmm {
|
181 |
-
id
|
182 |
-
outcomes
|
183 |
-
title
|
184 |
-
condition {
|
185 |
-
id
|
186 |
-
}
|
187 |
-
}
|
188 |
-
}
|
189 |
-
}
|
190 |
-
"""
|
191 |
-
)
|
192 |
-
|
193 |
-
|
194 |
conditional_tokens_gc_user_query = Template(
|
195 |
"""
|
196 |
{
|
|
|
59 |
"""
|
60 |
)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
omen_xdai_trades_query = Template(
|
63 |
"""
|
64 |
{
|
|
|
112 |
"""
|
113 |
)
|
114 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
conditional_tokens_gc_user_query = Template(
|
116 |
"""
|
117 |
{
|