import polars as pl
import glob
import math
import warnings
warnings.filterwarnings("ignore")Issue 3
3. Out-of-Range / Suspicious Values
This notebook documents the analysis for Issue #3: Out-of-Range / Suspicious Values in the Autonity Oracle data. It covers:
- What is this issue about?
- Why conduct this issue analysis?
- How to conduct this issue analysis?
- What are the results?
3.1 What Is This Issue About?
Certain Oracle submissions are unexpectedly large, zero, negative, or significantly off-market compared to real FX data. Examples include:
- Extremely large prices like
6.3e+25indicating scaling errors. - Negative or zero prices, which should not occur.
- Large spikes or sudden changes inconsistent with actual market data.
Additionally, cross-rates for Autonity tokens (ATN, NTN) may be inconsistent (NTN-USD ≠ NTN-ATN × ATN-USD).
3.2 Why Conduct This Issue Analysis?
- Data Integrity: Ensuring accuracy and reliability of the Oracle data.
- Security & Reliability: Identifying potential bugs or malicious activities before Mainnet.
- Cross-rate Consistency: Confirming internal consistency for token prices.
3.3 How to Conduct the Analysis?
Use Python with the Polars library (v1.24.0) to:
- Load and preprocess Oracle submission CSV files.
- Converted price submissions from Wei to decimals (dividing by
1e18). - Compared FX pairs to minute-level Yahoo Finance benchmarks to detect:
- Deviations exceeding ±20%.
- Negative, zero, or excessively large prices.
- Checked Autonity token cross-rates for consistency within a 10% tolerance.
Below is the analysis script:
def load_and_preprocess_oracle_submissions(submission_glob: str) -> pl.LazyFrame:
"""
Loads Oracle Submission CSVs and returns a Polars DataFrame.
"""
files = sorted(glob.glob(submission_glob))
if not files:
raise ValueError(f"No CSV files found matching pattern {submission_glob}")
lf_list = []
for f in files:
lf_temp = pl.scan_csv(
f,
dtypes={"Timestamp": pl.Utf8},
null_values=[""],
ignore_errors=True,
)
lf_list.append(lf_temp)
lf = pl.concat(lf_list)
lf = lf.with_columns(
pl.col("Timestamp")
.str.strptime(pl.Datetime, strict=False)
.alias("Timestamp_dt")
)
lf = lf.with_columns(
[
pl.col("Timestamp_dt").cast(pl.Date).alias("date_only"),
pl.col("Timestamp_dt").dt.weekday().alias("weekday_num"),
]
)
return lf
def load_yahoo_finance_data(directory_pattern: str, pair_label: str) -> pl.DataFrame:
"""
Loads Yahoo Finance CSVs and returns a Polars DataFrame.
"""
files = sorted(glob.glob(directory_pattern))
if not files:
raise ValueError(f"No Yahoo Finance CSV files found: {directory_pattern}")
lf_list = []
for f in files:
lf_temp = pl.scan_csv(
f,
has_header=False,
skip_rows=3,
new_columns=["Datetime", "Close", "High", "Low", "Open", "Volume"],
try_parse_dates=True,
)
lf_list.append(lf_temp)
lf = pl.concat(lf_list)
df = (
lf.select(
[
pl.col("Datetime").alias("timestamp_benchmark"),
pl.col("Close").alias("benchmark_close"),
]
)
.sort("timestamp_benchmark")
.collect()
.with_columns(
[
pl.lit(pair_label).alias("symbol"),
]
)
)
return df
def load_all_fx_benchmarks() -> dict[str, pl.DataFrame]:
"""
Loads FX data from Yahoo Finance.
"""
mapping = {
"AUD-USD": "../yahoo-finance/data/AUDUSD/AUDUSD=X_1m_*.csv",
"CAD-USD": "../yahoo-finance/data/CADUSD/CADUSD=X_1m_*.csv",
"EUR-USD": "../yahoo-finance/data/EURUSD/EURUSD=X_1m_*.csv",
"GBP-USD": "../yahoo-finance/data/GBPUSD/GBPUSD=X_1m_*.csv",
"JPY-USD": "../yahoo-finance/data/JPYUSD/JPYUSD=X_1m_*.csv",
"SEK-USD": "../yahoo-finance/data/SEKUSD/SEKUSD=X_1m_*.csv",
}
result = {}
for pair_label, pattern in mapping.items():
df_pair = load_yahoo_finance_data(pattern, pair_label)
result[pair_label] = df_pair
return result
def convert_wei_to_decimal(price_wei: float) -> float:
"""
Convert from Wei-based representation to a decimal.
"""
if price_wei is None or math.isnan(price_wei):
return None
return price_wei / 1e18
def flag_suspicious_values(
df_submissions: pl.DataFrame,
fx_pairs: list[str],
autonity_pairs: list[str],
fx_benchmarks: dict[str, pl.DataFrame],
percent_threshold: float = 0.20,
join_tolerance: str = "30s",
dynamic_thresholds: dict[str, float] | None = None,
):
"""
Detect suspicious or out-of-range values in Oracle data using as-of joins for time alignment
and dynamic thresholds for 'excessively large' values.
"""
new_cols = []
for c in fx_pairs + autonity_pairs:
if c.endswith(" Price"):
dec_col = c.replace(" Price", " Price Decimal")
new_cols.append((pl.col(c).cast(pl.Float64) / 1e18).alias(dec_col))
df_submissions = df_submissions.with_columns(new_cols)
suspicious_frames: list[pl.DataFrame] = []
final_columns = [
"Timestamp_dt",
"Validator Address",
"oracle_price_decimal",
"benchmark_close",
"rel_diff_from_bench",
"ATN-USD Price Decimal",
"NTN-USD Price Decimal",
"NTN-ATN Price Decimal",
"ntn_usd_estimated",
"rel_diff_cross",
"suspect_reason",
]
for pair_label in fx_pairs:
if not pair_label.endswith(" Price"):
continue
base_name = pair_label.replace(" Price", "") # e.g. "AUD-USD"
decimal_col = f"{base_name} Price Decimal"
if base_name not in fx_benchmarks:
continue
df_bench = fx_benchmarks[base_name]
df_local = (
df_submissions
.select(["Timestamp_dt", "Validator Address", decimal_col])
.sort("Timestamp_dt")
)
df_bench_sorted = df_bench.sort("timestamp_benchmark")
df_joined = df_local.join_asof(
df_bench_sorted,
left_on="Timestamp_dt",
right_on="timestamp_benchmark",
strategy="nearest", # or "backward"/"forward"
tolerance=join_tolerance
).with_columns(
(
(pl.col(decimal_col) - pl.col("benchmark_close")).abs()
/ pl.col("benchmark_close").abs()
)
.alias("rel_diff_from_bench")
)
if dynamic_thresholds and base_name in dynamic_thresholds:
max_threshold = dynamic_thresholds[base_name]
else:
max_threshold = 5.0 # fallback
df_flagged_fx = (
df_joined
.select(
[
"Timestamp_dt",
"Validator Address",
pl.col(decimal_col).alias("oracle_price_decimal"),
"benchmark_close",
"rel_diff_from_bench",
]
)
.with_columns(
[
pl.when(
(pl.col("oracle_price_decimal").is_not_null()) &
(pl.col("oracle_price_decimal") <= 0)
)
.then(pl.lit("Non-positive price; "))
.otherwise(pl.lit(""))
.alias("cond1"),
pl.when(
(pl.col("oracle_price_decimal").is_not_null()) &
(pl.col("oracle_price_decimal") >= max_threshold)
)
.then(pl.lit("Excessively large price; "))
.otherwise(pl.lit(""))
.alias("cond2"),
pl.when(
(pl.col("oracle_price_decimal").is_not_null()) &
(pl.col("benchmark_close").is_not_null()) &
(pl.col("rel_diff_from_bench") > percent_threshold)
)
.then(pl.lit(f"Deviation > {int(percent_threshold*100)}%; "))
.otherwise(pl.lit(""))
.alias("cond3"),
]
)
.with_columns(
[
(pl.col("cond1") + pl.col("cond2") + pl.col("cond3")).alias("suspect_reason")
]
)
.filter(pl.col("suspect_reason") != "")
.drop(["cond1", "cond2", "cond3"])
)
df_flagged_fx = df_flagged_fx.with_columns(
[
pl.lit(None).cast(pl.Float64).alias("ATN-USD Price Decimal"),
pl.lit(None).cast(pl.Float64).alias("NTN-USD Price Decimal"),
pl.lit(None).cast(pl.Float64).alias("NTN-ATN Price Decimal"),
pl.lit(None).cast(pl.Float64).alias("ntn_usd_estimated"),
pl.lit(None).cast(pl.Float64).alias("rel_diff_cross"),
]
)
df_flagged_fx = df_flagged_fx.select(final_columns)
suspicious_frames.append(df_flagged_fx)
required_cols = {
"ATN-USD Price Decimal",
"NTN-USD Price Decimal",
"NTN-ATN Price Decimal",
}
if required_cols.issubset(set(df_submissions.columns)):
df_autonity = df_submissions.select(
[
"Timestamp_dt",
"Validator Address",
"ATN-USD Price Decimal",
"NTN-USD Price Decimal",
"NTN-ATN Price Decimal",
]
)
df_autonity = df_autonity.with_columns(
(pl.col("NTN-ATN Price Decimal") * pl.col("ATN-USD Price Decimal"))
.alias("ntn_usd_estimated")
)
df_autonity = df_autonity.with_columns(
(
(
(pl.col("ntn_usd_estimated") - pl.col("NTN-USD Price Decimal")).abs()
/ (pl.col("NTN-USD Price Decimal").abs() + 1e-18)
).alias("rel_diff_cross")
)
)
cross_tolerance = 0.10 # 10%
df_autonity_suspect = (
df_autonity
.with_columns(
pl.when(pl.col("rel_diff_cross") > cross_tolerance)
.then(pl.lit("Cross-rate mismatch > 10%; "))
.otherwise(pl.lit(""))
.alias("suspect_reason")
)
.filter(pl.col("suspect_reason") != "")
)
df_autonity_suspect = df_autonity_suspect.with_columns(
[
pl.lit(None).cast(pl.Float64).alias("oracle_price_decimal"),
pl.lit(None).cast(pl.Float64).alias("benchmark_close"),
pl.lit(None).cast(pl.Float64).alias("rel_diff_from_bench"),
]
)
df_autonity_suspect = df_autonity_suspect.select(final_columns)
suspicious_frames.append(df_autonity_suspect)
if suspicious_frames:
df_suspicious = pl.concat(suspicious_frames, how="vertical")
else:
df_suspicious = pl.DataFrame(
{"Timestamp_dt": [], "Validator Address": [], "suspect_reason": []}
)
return df_suspicious
def analyze_out_of_range_values(
submission_glob: str,
fx_pairs: list[str],
autonity_pairs: list[str],
yahoo_data_dict: dict[str, pl.DataFrame],
deviation_threshold: float = 0.20,
join_tolerance: str = "30s",
dynamic_thresholds: dict[str, float] | None = None,
):
"""
Main analysis function.
"""
lf_sub = load_and_preprocess_oracle_submissions(submission_glob)
df_sub = lf_sub.collect()
df_suspicious = flag_suspicious_values(
df_submissions=df_sub,
fx_pairs=fx_pairs,
autonity_pairs=autonity_pairs,
fx_benchmarks=yahoo_data_dict,
percent_threshold=deviation_threshold,
join_tolerance=join_tolerance,
dynamic_thresholds=dynamic_thresholds,
)
if not df_suspicious.is_empty():
suspicious_preview = df_suspicious.to_dicts()
for row in suspicious_preview:
ts_ = row.get("Timestamp_dt")
val_addr = row.get("Validator Address")
reason = row.get("suspect_reason")
price = row.get("oracle_price_decimal")
benchmark = row.get("benchmark_close")
rel_diff = row.get("rel_diff_from_bench")
cross_diff = row.get("rel_diff_cross") # if from cross-rate
line_parts = [f"{ts_} | {val_addr} | {reason}"]
if price is not None:
line_parts.append(f"oracle_price={price:.4f}")
if benchmark is not None:
line_parts.append(f"bench={benchmark:.4f}")
if rel_diff is not None:
line_parts.append(f"diff={rel_diff*100:.2f}%")
if cross_diff is not None:
line_parts.append(f"cross_diff={cross_diff*100:.2f}%")
return df_suspiciousfx_cols = [
"AUD-USD Price",
"CAD-USD Price",
"EUR-USD Price",
"GBP-USD Price",
"JPY-USD Price",
"SEK-USD Price",
]
autonity_cols = ["ATN-USD Price", "NTN-USD Price", "NTN-ATN Price"]
yahoo_data = load_all_fx_benchmarks()
df_outliers = analyze_out_of_range_values(
submission_glob="../submission-data/Oracle_Submission_*.csv",
fx_pairs=fx_cols,
autonity_pairs=autonity_cols,
yahoo_data_dict=yahoo_data,
deviation_threshold=0.20,
join_tolerance="30s",
dynamic_thresholds={
"AUD-USD": 2.0,
"CAD-USD": 2.0,
"EUR-USD": 3.0,
"GBP-USD": 3.0,
"JPY-USD": 200.0,
"SEK-USD": 20.0,
"ATN-USD": 1.0,
"NTN-USD": 1.0,
"NTN-ATN": 1.0,
},
)3.4 What are the results?
The following results summarize the suspicious submissions detected:
num_suspicious = df_outliers.height
print(f"Total suspicious submissions detected: {num_suspicious}")
if num_suspicious == 0:
print("No suspicious values detected within the ±20% threshold.")
else:
display(df_outliers)Total suspicious submissions detected: 0
No suspicious values detected within the ±20% threshold.
Note: You may see many null in the df_outliers table. This is expected behavior when a row is only flagged for a specific category (e.g., Forex mismatch or cross-rate mismatch), and the columns for the other category remain null. If the table is empty, that indicates no outliers were detected.
- Negative or zero prices: Indicate significant issues like data feed outages or software errors.
- Extreme values: Likely result from incorrect scaling or data staleness.
- Large deviations (>20%): Suggest problems with validator data sources or calculation logic.
- Cross-rate mismatches (>10%): Highlight misconfigurations or inconsistencies between token price feeds.
Validators frequently flagged with suspicious data require further investigation, particularly if patterns or correlations emerge.
List of all Validators and their Fraction Suspecious Submissions
lf_sub = load_and_preprocess_oracle_submissions("../submission-data/Oracle_Submission_*.csv")
df_all_submissions = lf_sub.collect()
df_validator_submissions = (
df_all_submissions
.group_by("Validator Address")
.agg([
pl.count().alias("total_submissions"),
])
)
df_validator_outliers = (
df_outliers
.group_by("Validator Address")
.agg([
pl.count().alias("suspicious_submissions"),
])
)
df_validator_stats = (
df_validator_submissions
.join(df_validator_outliers, on="Validator Address", how="left")
.with_columns([
pl.col("suspicious_submissions").fill_null(0), # if a validator never appears in df_outliers
(
pl.col("suspicious_submissions") / pl.col("total_submissions")
).alias("suspicious_ratio")
])
)
df_validator_stats = (
df_validator_stats
.select([
"Validator Address",
"total_submissions",
"suspicious_submissions",
(pl.col("suspicious_ratio") * 100).round(2).alias("suspicious_ratio_pct"),
])
.sort("suspicious_submissions", descending=True)
)
for row in df_validator_stats.to_dicts():
fraction_suspicious_submissions = row['suspicious_ratio_pct']
if fraction_suspicious_submissions is None:
fraction_suspicious_submissions = "0%"
else:
fraction_suspicious_submissions = f"{fraction_suspicious_submissions}%"
print(
f"Validator {row['Validator Address']}: "
f"total={row['total_submissions']}, "
f"suspicious_submissions={row['suspicious_submissions']}, "
f"fraction_suspicious_submissions={fraction_suspicious_submissions}"
)Validator 0xE4686A4C6E63A8ab51B458c52EB779AEcf0B74f7: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x01F788E4371a70D579C178Ea7F48E04e8B2CD743: total=2837, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x3597d2D42f8Fbbc82E8b1046048773aD6DDB717E: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xf10f56Bf0A28E0737c7e6bB0aF92f3DDad34aE6a: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x94470A842Ea4f44e668EB9C2AB81367b6Ce01772: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xdF239e0D5b4E6e820B0cFEF6972A90893c2073AB: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xE9FFF86CAdC3136b3D94948B8Fd23631EDaa2dE3: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x791A7F840ac11841cCB0FaA968B2e3a0Db930fCe: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x527192F3D2408C84087607b7feE1d0f907821E17: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x3fe573552E14a0FC11Da25E43Fef11e16a785068: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x9d28e40E9Ec4789f9A0D17e421F76D8D0868EA44: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x984A46Ec685Bb41A7BBb2bc39f80C78410ff4057: total=2877, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x64F83c2538A646A550Ad9bEEb63427a377359DEE: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x22A76e194A49c9e5508Cd4A3E1cD555D088ECB08: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x23b4Be9536F93b8D550214912fD0e38417Ff7209: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x24915749B793375a8C93090AF19928aFF1CAEcb6: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xBE287C82A786218E008FF97320b08244BE4A282c: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x94d28f08Ff81A80f4716C0a8EfC6CAC2Ec74d09E: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x00a96aaED75015Bb44cED878D927dcb15ec1FF54: total=2866, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x8584A78A9b94f332A34BBf24D2AF83367Da31894: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xd61a48b0e11B0Dc6b7Bd713B1012563c52591BAA: total=2873, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x9C7dAABb5101623340C925CFD6fF74088ff5672e: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xfD97FB8835d25740A2Da27c69762D74F6A931858: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x8f91e0ADF8065C3fFF92297267E02DF32C2978FF: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x59031767f20EA8F4a3d90d33aB0DAA2ca469Fd9a: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xD9fDab408dF7Ae751691BeC2efE3b713ba3f9C36: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x383A3c437d3F12f60E5fC990119468D3561EfBfc: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x26E2724dBD14Fbd52be430B97043AA4c83F05852: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xd625d50B0d087861c286d726eC51Cf4Bd9c54357: total=2856, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x1476A65D7B5739dE1805d5130441A94022Ee49fe: total=2462, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x7232e75a8bFd8c9ab002BB3A00eAa885BC72A6dd: total=2877, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xcdEed21b471b0Dc54faF74480A0E700fCc42a7b6: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xBBf36374eb23968F25aecAEbb97BF3118f3c2fEC: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xB5d8be2AB4b6d7E6be7Ea28E91b370223a06289f: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x551f3300FCFE0e392178b3542c009948008B2a9F: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x358488a4EdCA493FCD87610dcd50c62c8A3Dd658: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xEf0Ba5e345C2C3937df5667A870Aae5105CAa3a5: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x4cD134001EEF0843B9c69Ba9569d11fDcF4bd495: total=2823, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x5603caFE3313D0cf56Fd4bE4A2f606dD6E43F8Eb: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xDF2D0052ea56A860443039619f6DAe4434bc0Ac4: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xC1F9acAF1824F6C906b35A0D2584D6E25077C7f5: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x100E38f7BCEc53937BDd79ADE46F34362470577B: total=2876, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x99E2B4B27BDe92b42D04B6CF302cF564D2C13b74: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xbfDcAF35f52F9ef423ac8F2621F9eef8be6dEd17: total=2833, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x6a395dE946c0493157404E2b1947493c633f569E: total=2874, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x6747c02DE7eb2099265e55715Ba2E03e8563D051: total=2840, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xcf716b3930d7cf6f2ADAD90A27c39fDc9D643BBd: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x831B837C3DA1B6c2AB68a690206bDfF368877E19: total=2863, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x36142A4f36974e2935192A1111C39330aA296D3C: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x197B2c44b887c4aC01243BDE7E4bBa8bd95BC3a8: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xf34CD6c09a59d7D3d1a6C3dC231a7834E5615D6A: total=2834, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x2928FE5b911BCAf837cAd93eB9626E86a189f1dd: total=2829, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x1Be7f70BCf8393a7e4A5BcC66F6f15d6e35cfBBC: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xF9B38D02959379d43C764064dE201324d5e12931: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x3AaF7817618728ffEF81898E11A3171C33faAE41: total=2874, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xc5B9d978715F081E226cb28bADB7Ba4cde5f9775: total=2879, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x718361fc3637199F24a2437331677D6B89a40519: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0xDCA5DFF3D42f2db3C18dBE823380A0A81db49A7E: total=2880, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x19E356ebC20283fc74AF0BA4C179502A1F62fA7B: total=2833, suspicious_submissions=0, fraction_suspicious_submissions=0%
Validator 0x5E17e837DcBa2728C94f95c38fA8a47CB9C8818F: total=2876, suspicious_submissions=0, fraction_suspicious_submissions=0%
Please note, total indicates the total number of submissions recorded for this validator. suspicious_submissions shows how many of those submissions were flagged as suspicious (e.g. out of range or zero/negative values). fraction_suspicious_submissions reports the percentage of the validator’s submissions that fell into the suspicious category.