Skip to content

Commit

Permalink
Fixes#355: [Pdr acc] CORS, Debug Mode, the first day issue on the cal…
Browse files Browse the repository at this point in the history
…culation (#358)

* issue355: Fix CORS - logic on calculation - tests

* the duplicated property in the mypy.ini is deleted

* issue355 min stake amount is deleted
  • Loading branch information
kdetry authored Nov 15, 2023
1 parent a91160b commit a1d0cea
Show file tree
Hide file tree
Showing 7 changed files with 175 additions and 24 deletions.
2 changes: 1 addition & 1 deletion mypy.ini
Original file line number Diff line number Diff line change
Expand Up @@ -66,4 +66,4 @@ ignore_missing_imports = True
ignore_missing_imports = True

[mypy-web3.*]
ignore_missing_imports = True
ignore_missing_imports = True
16 changes: 9 additions & 7 deletions pdr_backend/accuracy/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def calculate_timeframe_timestamps(contract_timeframe: str) -> Tuple[int, int]:

end_ts = int(datetime.utcnow().timestamp())
time_delta = (
timedelta(weeks=2)
timedelta(weeks=1)
if contract_timeframe == "5m"
else timedelta(weeks=4)
# timedelta(days=1)
Expand Down Expand Up @@ -100,10 +100,9 @@ def save_statistics_to_file():
)

contract_ids = [contract["id"] for contract in contracts]

# Get statistics for all contracts
statistics = calculate_statistics_for_all_assets(
contract_ids, start_ts_param, end_ts_param, network_param
contract_ids, contracts, start_ts_param, end_ts_param, network_param
)

output.append(
Expand Down Expand Up @@ -139,15 +138,18 @@ def serve_statistics_from_file():
try:
with open(JSON_FILE_PATH, "r") as f:
data = json.load(f)
return jsonify(data)
response = jsonify(data)
response.headers.add("Access-Control-Allow-Origin", "*") # Allow any origin
return response
except Exception as e:
# abort(500, description=str(e))
return jsonify({"error": "Internal Server Error", "message": str(e)}), 500
response = jsonify({"error": "Internal Server Error", "message": str(e)})
response.headers.add("Access-Control-Allow-Origin", "*") # Allow any origin
return response, 500


if __name__ == "__main__":
# Start the thread to save predictions data to a file every 5 minutes
thread = threading.Thread(target=save_statistics_to_file)
thread.start()

app.run(debug=True)
app.run(debug=False)
10 changes: 9 additions & 1 deletion pdr_backend/util/subgraph_predictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
class ContractIdAndSPE(TypedDict):
id: str
seconds_per_epoch: int
name: str


class FilterMode(Enum):
Expand Down Expand Up @@ -238,6 +239,9 @@ def fetch_contract_id_and_spe(
}){
id
secondsPerEpoch
token {
name
}
}
}
""" % json.dumps(
Expand All @@ -253,7 +257,11 @@ def fetch_contract_id_and_spe(
# Parse the results and construct ContractDetail objects
contract_data = result["data"]["predictContracts"]
contracts: List[ContractIdAndSPE] = [
{"id": contract["id"], "seconds_per_epoch": contract["secondsPerEpoch"]}
{
"id": contract["id"],
"seconds_per_epoch": contract["secondsPerEpoch"],
"name": contract["token"]["name"],
}
for contract in contract_data
]

Expand Down
44 changes: 33 additions & 11 deletions pdr_backend/util/subgraph_slot.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

from pdr_backend.util.subgraph import query_subgraph
from pdr_backend.util.networkutil import get_subgraph_url
from pdr_backend.util.subgraph_predictions import ContractIdAndSPE


@dataclass
Expand Down Expand Up @@ -212,23 +213,35 @@ def process_single_slot(

staked_yesterday = staked_today = 0.0
correct_predictions_count = slots_evaluated = 0

if float(slot.roundSumStakes) == 0.0:
return None

# split the id to get the slot timestamp
timestamp = int(slot.id.split("-")[1]) # Using dot notation for attribute access

if timestamp < end_of_previous_day_timestamp:
if (
end_of_previous_day_timestamp - SECONDS_IN_A_DAY
< timestamp
< end_of_previous_day_timestamp
):
staked_yesterday += float(slot.roundSumStakes)
else:
elif timestamp > end_of_previous_day_timestamp:
staked_today += float(slot.roundSumStakes)
if float(slot.roundSumStakes) == 0:
return None

prediction_result = calculate_prediction_prediction_result(
slot.roundSumStakesUp, slot.roundSumStakes
)
true_values: List[Dict[str, Any]] = slot.trueValues or []
true_value = true_values[0]["trueValue"] if true_values else None
if true_values and prediction_result["direction"] == (1 if true_value else 0):
correct_predictions_count += 1
prediction_result = calculate_prediction_prediction_result(
slot.roundSumStakesUp, slot.roundSumStakes
)

true_values: List[Dict[str, Any]] = slot.trueValues or []
true_value = true_values[0]["trueValue"] if true_values else None

if len(true_values) > 0 and prediction_result["direction"] == (
1 if true_value else 0
):
correct_predictions_count += 1

if len(true_values) > 0 and true_value is not None:
slots_evaluated += 1

return staked_yesterday, staked_today, correct_predictions_count, slots_evaluated
Expand Down Expand Up @@ -278,6 +291,7 @@ def aggregate_statistics(
@enforce_types
def calculate_statistics_for_all_assets(
asset_ids: List[str],
contracts: List[ContractIdAndSPE],
start_ts_param: int,
end_ts_param: int,
network: str = "mainnet",
Expand Down Expand Up @@ -313,7 +327,15 @@ def calculate_statistics_for_all_assets(
if correct_predictions_count == 0
else (correct_predictions_count / slots_evaluated) * 100
)

# filter contracts to get the contract with the current asset id
contract = next(
(contract for contract in contracts if contract["id"] == asset_id),
None,
)

overall_stats[asset_id] = {
"token_name": contract["name"] if contract else None,
"average_accuracy": average_accuracy,
"total_staked_yesterday": staked_yesterday,
"total_staked_today": staked_today,
Expand Down
108 changes: 108 additions & 0 deletions pdr_backend/util/test_ganache/test_predictor_stats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
from typing import List, Set
from enforce_typing import enforce_types

from pdr_backend.util.predictoor_stats import (
aggregate_prediction_statistics,
get_endpoint_statistics,
get_cli_statistics,
)

from pdr_backend.util.subgraph_predictions import (
Prediction,
)

sample_predictions = [
Prediction(
pair="ADA/USDT",
timeframe="5m",
prediction=True,
stake=0.050051425480971974,
trueval=False,
timestamp=1698527100,
source="binance",
payout=0.0,
user="0xd2a24cb4ff2584bad80ff5f109034a891c3d88dd",
),
Prediction(
pair="ADA/USDT",
timeframe="5m",
prediction=True,
stake=0.0500,
trueval=True,
timestamp=1698527700,
source="binance",
payout=0.0,
user="0xd2a24cb4ff2584bad80ff5f109034a891c3d88dd",
),
]


@enforce_types
def test_aggregate_prediction_statistics():
stats, correct_predictions = aggregate_prediction_statistics(sample_predictions)
assert isinstance(stats, dict)
assert "pair_timeframe" in stats
assert "predictor" in stats
assert correct_predictions == 1 # Adjust based on your sample data


@enforce_types
def test_get_endpoint_statistics():
accuracy, pair_timeframe_stats, predictoor_stats = get_endpoint_statistics(
sample_predictions
)
assert isinstance(accuracy, float)
assert isinstance(pair_timeframe_stats, List) # List[PairTimeframeStat]
assert isinstance(predictoor_stats, List) # List[PredictoorStat]
for pair_timeframe_stat in pair_timeframe_stats:
assert isinstance(pair_timeframe_stat, dict)
assert "pair" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["pair"], str
)
assert "timeframe" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["timeframe"], str
)
assert "accuracy" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["accuracy"], float
)
assert "stake" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["stake"], float
)
assert "payout" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["payout"], float
)
assert "number_of_predictions" in pair_timeframe_stat and isinstance(
pair_timeframe_stat["number_of_predictions"], int
)

for predictoor_stat in predictoor_stats:
assert isinstance(predictoor_stat, dict) and len(predictoor_stat) == 6
assert "predictoor_address" in predictoor_stat and isinstance(
predictoor_stat["predictoor_address"], str
)
assert "accuracy" in predictoor_stat and isinstance(
predictoor_stat["accuracy"], float
)
assert "stake" in predictoor_stat and isinstance(
predictoor_stat["stake"], float
)
assert "payout" in predictoor_stat and isinstance(
predictoor_stat["payout"], float
)
assert "number_of_predictions" in predictoor_stat and isinstance(
predictoor_stat["number_of_predictions"], int
)
assert "details" in predictoor_stat and isinstance(
predictoor_stat["details"], Set
)
assert len(predictoor_stat["details"]) == 1


@enforce_types
def test_get_cli_statistics(capsys):
get_cli_statistics(sample_predictions)
captured = capsys.readouterr()
output = captured.out
assert "Overall Accuracy" in output
assert "Accuracy for Pair" in output
assert "Accuracy for Predictoor Address" in output
6 changes: 4 additions & 2 deletions pdr_backend/util/test_ganache/test_subgraph_predictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@
MOCK_CONTRACT_DETAILS_RESPONSE = {
"data": {
"predictContracts": [
{"id": "contract1", "secondsPerEpoch": 300},
{"id": "contract2", "secondsPerEpoch": 600},
{"id": "contract1", "secondsPerEpoch": 300, "token": {"name": "token1"}},
{"id": "contract2", "secondsPerEpoch": 600, "token": {"name": "token2"}},
]
}
}
Expand Down Expand Up @@ -147,6 +147,8 @@ def test_fetch_contract_id_and_spe(
assert len(contract_details) == 2
assert contract_details[0]["id"] == "contract1"
assert contract_details[0]["seconds_per_epoch"] == 300
assert contract_details[0]["name"] == "token1"
assert contract_details[1]["id"] == "contract2"
assert contract_details[1]["seconds_per_epoch"] == 600
assert contract_details[1]["name"] == "token2"
mock_query_subgraph.assert_called_once()
13 changes: 11 additions & 2 deletions pdr_backend/util/test_ganache/test_subgraph_slot.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from unittest.mock import patch
from dataclasses import asdict
from typing import Dict
from typing import Dict, List
from enforce_typing import enforce_types

from pdr_backend.util.subgraph_slot import (
Expand All @@ -13,6 +13,7 @@
calculate_statistics_for_all_assets,
PredictSlot,
)
from pdr_backend.util.subgraph_predictions import ContractIdAndSPE

# Sample data for tests
SAMPLE_PREDICT_SLOT = PredictSlot(
Expand Down Expand Up @@ -141,9 +142,17 @@ def test_aggregate_statistics():
def test_calculate_statistics_for_all_assets(mock_fetch_slots):
# Set up the mock to return a predetermined value
mock_fetch_slots.return_value = {"0xAsset": [SAMPLE_PREDICT_SLOT] * 1000}
# Contracts List
contracts: List[ContractIdAndSPE] = [
{"id": "0xAsset", "seconds_per_epoch": 300, "name": "TEST/USDT"}
]
# Test the calculate_statistics_for_all_assets function
statistics = calculate_statistics_for_all_assets(
asset_ids=["0xAsset"], start_ts_param=1000, end_ts_param=2000, network="mainnet"
asset_ids=["0xAsset"],
contracts=contracts,
start_ts_param=1000,
end_ts_param=2000,
network="mainnet",
)
# Verify that the statistics are calculated as expected
assert statistics["0xAsset"]["average_accuracy"] == 100.0
Expand Down

0 comments on commit a1d0cea

Please sign in to comment.