import polars as pl
import glob
import statistics
import math
from itertools import groupby
from typing import List
import warnings
warnings.filterwarnings("ignore")Issue 5
5. Confidence Value Anomalies
This notebook documents the analysis for Issue #5: Confidence Value Anomalies in the Autonity Oracle data. It covers:
- What is this issue about?
- Why conduct this issue analysis?
- How to conduct this issue analysis?
- What are the results?
5.1 What Is This Issue About?
Validators submit a confidence metric alongside their price submissions, representing their certainty in the provided data. Potential anomalies include:
- Validators consistently submitting the same confidence value (e.g. always
50or100). - Frequent occurrences of zero or null confidence values.
- Confidence values that do not vary in response to market volatility or price changes.
This analysis investigates the patterns and consistency of these confidence values.
5.2 Why Conduct This Issue Analysis?
- Reliability Check: Confidence should reflect real uncertainty, not remain fixed or arbitrary.
- System Integrity: Identifying anomalies helps ensure that validators comply with expected behavior prior to Mainnet launch.
- Decision-making: Confidence anomalies can degrade decision quality in downstream applications relying on Oracle data.
5.3 How to Conduct the Analysis?
Use Python with the Polars library (v1.24.0) to:
- Load and preprocess Oracle submission CSV files.
- Detect all “Confidence” columns automatically.
- Calculate basic statistics for confidence values per validator and pair:
- Minimum, maximum, mean, standard deviation, number of distinct confidence values.
- Frequency distribution of the most common confidence values.
- Identify validators that consistently submit fixed or zero variation confidence values.
- Calculate correlation between confidence and price changes:
- Compute price volatility as absolute price differences between submissions.
- Evaluate the correlation to verify if confidence values genuinely reflect market volatility.
Below is the analysis script:
def load_and_preprocess_submissions(submission_glob: str) -> pl.DataFrame:
"""
Loads Oracle Submission CSVs and returns a Polars DataFrame.
"""
files = sorted(glob.glob(submission_glob))
if not files:
raise ValueError(f"No CSV files found matching pattern {submission_glob}")
lf_list = []
for f in files:
lf_temp = pl.scan_csv(
f,
dtypes={"Timestamp": pl.Utf8},
null_values=[""],
ignore_errors=True,
)
lf_list.append(lf_temp)
lf = pl.concat(lf_list)
lf = lf.with_columns(
pl.col("Timestamp")
.str.strptime(pl.Datetime, strict=False)
.alias("Timestamp_dt")
)
lf = lf.with_columns(
[
pl.col("Timestamp_dt").cast(pl.Date).alias("date_only"),
pl.col("Timestamp_dt").dt.weekday().alias("weekday_num"),
]
)
return lf.collect()
def list_confidence_columns(df: pl.DataFrame) -> List[str]:
"""
Identifies all "Confidence" columns by name.
"""
return [c for c in df.columns if "Confidence" in c]
def list_price_columns(df: pl.DataFrame) -> List[str]:
"""
Identifies all "Price" columns by name.
"""
return [c for c in df.columns if "Price" in c]
def confidence_distribution_by_validator(
df: pl.DataFrame, confidence_cols: List[str], top_k_freq: int = 3
) -> pl.DataFrame:
"""
Summarizes the distribution of ALL confidence values per validator (aggregated over all pairs).
Returns min/max/mean/std/distinct count, plus the top frequency values.
"""
keep_cols = ["Validator Address", "Timestamp_dt"] + confidence_cols
df_small = df.select([c for c in keep_cols if c in df.columns])
df_long = df_small.melt(
id_vars=["Validator Address", "Timestamp_dt"],
value_vars=confidence_cols,
variable_name="confidence_col",
value_name="confidence_val",
)
lf_long = df_long.lazy()
grouped_basic = lf_long.group_by(["Validator Address"]).agg(
[
pl.count("confidence_val").alias("count_rows"),
pl.min("confidence_val").alias("min_conf"),
pl.max("confidence_val").alias("max_conf"),
pl.mean("confidence_val").alias("mean_conf"),
pl.std("confidence_val").alias("std_conf"),
pl.n_unique("confidence_val").alias("distinct_values_count"),
]
)
df_basic = grouped_basic.collect()
freq_lf = (
lf_long.group_by(["Validator Address", "confidence_val"])
.agg(pl.count("confidence_val").alias("value_count"))
.sort(
["Validator Address", "value_count"],
descending=[False, True],
)
)
df_freq = freq_lf.collect()
def top_k_values_string(rows: list, k: int):
"""
Return a string with format: val(count), val2(count2), ...
e.g. '0(14), 35(5), 100(3)'
"""
parts = []
for r in rows[:k]:
cval = r["confidence_val"]
count_ = r["value_count"]
if cval is None:
val_str = "null"
else:
val_str = str(int(cval))
parts.append(f"{val_str}({count_})")
return ", ".join(parts)
def top_k_values_list(rows: list, k: int):
"""
Return just the numeric confidence values in the top k, ignoring their counts.
"""
out = []
for r in rows[:k]:
cval = r["confidence_val"]
if cval is not None:
out.append(int(cval))
return out
validator_map = {}
freq_dicts = df_freq.to_dicts()
for key, group in groupby(freq_dicts, key=lambda d: d["Validator Address"]):
group_list = list(group)
group_list_sorted = sorted(
group_list, key=lambda x: x["value_count"], reverse=True
)
freq_str = top_k_values_string(group_list_sorted, top_k_freq)
freq_list = top_k_values_list(group_list_sorted, top_k_freq)
validator_map[key] = {"freq_str": freq_str, "freq_list": freq_list}
df_freq_map = pl.DataFrame(
{
"Validator Address": list(validator_map.keys()),
"top_freq_values_str": [v["freq_str"] for v in validator_map.values()],
"top_freq_values_list": [v["freq_list"] for v in validator_map.values()],
}
)
df_merged = df_basic.join(
df_freq_map, on=["Validator Address"], how="left"
)
df_merged = df_merged.with_columns(
[
(pl.col("max_conf") == pl.col("min_conf")).alias("zero_variation"),
(pl.col("distinct_values_count") == 1).alias("only_one_value"),
pl.col("top_freq_values_list").list.contains(0).alias("has_zero_conf"),
]
)
return df_merged.sort(["Validator Address"])
def check_confidence_vs_price_correlation(
df: pl.DataFrame, fx_pairs: List[str], autonity_pairs: List[str]
) -> pl.DataFrame:
"""
Measures how well Confidence tracks price changes (absolute difference from previous submission).
Note: This still checks each 'Price' column vs. its corresponding 'Confidence' column.
"""
df_local = df.clone()
price_cols = fx_pairs + autonity_pairs
new_cols = []
for pc in price_cols:
decimal_col = pc.replace(" Price", " Price Decimal")
new_cols.append((pl.col(pc).cast(pl.Float64) / 1e18).alias(decimal_col))
df_local = df_local.with_columns(new_cols)
results_rows = []
def base_name(price_col: str) -> str:
return price_col.replace(" Price", "")
for pc in price_cols:
conf_col = pc.replace("Price", "Confidence").strip()
dec_col = pc.replace(" Price", " Price Decimal")
pair_lbl = base_name(pc)
if conf_col not in df_local.columns or dec_col not in df_local.columns:
continue
df_pair = (
df_local
.select(["Validator Address", "Timestamp_dt", dec_col, conf_col])
.filter(pl.col(dec_col).is_not_null() & pl.col(conf_col).is_not_null())
.sort(["Validator Address", "Timestamp_dt"])
)
df_pair = df_pair.with_columns(
(pl.col(dec_col) - pl.col(dec_col).shift(1))
.over("Validator Address")
.abs()
.alias("abs_price_change")
)
lf_cor = (
df_pair.lazy()
.group_by("Validator Address")
.agg(
[
pl.col("abs_price_change").alias("price_change_list"),
pl.col(conf_col).alias("confidence_list"),
]
)
)
local_rows = lf_cor.collect().to_dicts()
for row in local_rows:
validator = row["Validator Address"]
pc_list = row["price_change_list"]
conf_list = row["confidence_list"]
if len(pc_list) < 2:
corr_val = None
else:
corr_val = pearson_correlation(pc_list, conf_list)
results_rows.append(
{
"Validator Address": validator,
"pair_label": pair_lbl,
"corr_conf_price_change": corr_val,
"num_points": len(pc_list),
}
)
df_corr = pl.DataFrame(results_rows)
return df_corr.sort(["pair_label", "Validator Address"])
def pearson_correlation(xs, ys):
"""
Computes Pearson correlation between two lists of floats.
"""
clean_data = [
(x, y)
for (x, y) in zip(xs, ys)
if (x is not None and y is not None and not math.isnan(x) and not math.isnan(y))
]
if len(clean_data) < 2:
return None
xs_clean, ys_clean = zip(*clean_data)
mean_x = statistics.mean(xs_clean)
mean_y = statistics.mean(ys_clean)
num = sum((x - mean_x) * (y - mean_y) for x, y in zip(xs_clean, ys_clean))
den_x = math.sqrt(sum((x - mean_x) ** 2 for x in xs_clean))
den_y = math.sqrt(sum((y - mean_y) ** 2 for y in ys_clean))
if den_x == 0 or den_y == 0:
return None
return num / (den_x * den_y)
def analyze_confidence_value_anomalies(
submission_glob: str, fx_pairs: List[str], autonity_pairs: List[str]
):
"""
Main analysis function.
"""
df_all = load_and_preprocess_submissions(submission_glob)
conf_cols = list_confidence_columns(df_all)
df_conf_dist = confidence_distribution_by_validator(
df_all, conf_cols, top_k_freq=3
)
df_anomalies = df_conf_dist.filter(
pl.col("zero_variation") | pl.col("only_one_value")
)
df_corr = check_confidence_vs_price_correlation(df_all, fx_pairs, autonity_pairs)
return {
"df_confidence_distribution": df_conf_dist,
"df_confidence_anomalies": df_anomalies,
"df_correlation_price_change": df_corr,
}fx_price_cols = [
"AUD-USD Price",
"CAD-USD Price",
"EUR-USD Price",
"GBP-USD Price",
"JPY-USD Price",
"SEK-USD Price",
]
autonity_price_cols = [
"ATN-USD Price",
"NTN-USD Price",
"NTN-ATN Price",
]
results = analyze_confidence_value_anomalies(
submission_glob="../submission-data/Oracle_Submission_*.csv",
fx_pairs=fx_price_cols,
autonity_pairs=autonity_price_cols,
)5.4 What are the result?
Below directly reference results generated by the analysis.
Confidence Value Distribution
results["df_confidence_distribution"]| Validator Address | count_rows | min_conf | max_conf | mean_conf | std_conf | distinct_values_count | top_freq_values_str | top_freq_values_list | zero_variation | only_one_value | has_zero_conf |
|---|---|---|---|---|---|---|---|---|---|---|---|
| str | u32 | i64 | i64 | f64 | f64 | u32 | str | list[i64] | bool | bool | bool |
| "0x00a96aaED75015Bb44cED878D927… | 25785 | 90 | 100 | 99.902269 | 0.983768 | 3 | "100(25533), 90(252), null(0)" | [100, 90] | false | false | false |
| "0x01F788E4371a70D579C178Ea7F48… | 25146 | 90 | 100 | 99.904557 | 0.972293 | 3 | "100(24906), 90(240), null(0)" | [100, 90] | false | false | false |
| "0x100E38f7BCEc53937BDd79ADE46F… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0x1476A65D7B5739dE1805d5130441… | 22149 | 100 | 100 | 100.0 | 0.0 | 2 | "100(22149), null(0)" | [100] | true | false | false |
| "0x197B2c44b887c4aC01243BDE7E4b… | 25920 | 90 | 100 | 99.902778 | 0.981227 | 2 | "100(25668), 90(252)" | [100, 90] | false | false | false |
| … | … | … | … | … | … | … | … | … | … | … | … |
| "0xd625d50B0d087861c286d726eC51… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0xdF239e0D5b4E6e820B0cFEF6972A… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0xf10f56Bf0A28E0737c7e6bB0aF92… | 25920 | 90 | 100 | 99.902778 | 0.981227 | 2 | "100(25668), 90(252)" | [100, 90] | false | false | false |
| "0xf34CD6c09a59d7D3d1a6C3dC231a… | 25092 | 100 | 100 | 100.0 | 0.0 | 2 | "100(25092), null(0)" | [100] | true | false | false |
| "0xfD97FB8835d25740A2Da27c69762… | 25920 | 90 | 100 | 99.902778 | 0.981227 | 2 | "100(25668), 90(252)" | [100, 90] | false | false | false |
- Key indicators:
- Mean/std: Low or zero standard deviation indicates fixed or rarely-changing confidence.
- Distinct values count: Few distinct values may indicate hard-coded or rarely adjusted confidence.
Identified Anomalies (Zero or Single-Value Confidence)
results["df_confidence_anomalies"]| Validator Address | count_rows | min_conf | max_conf | mean_conf | std_conf | distinct_values_count | top_freq_values_str | top_freq_values_list | zero_variation | only_one_value | has_zero_conf |
|---|---|---|---|---|---|---|---|---|---|---|---|
| str | u32 | i64 | i64 | f64 | f64 | u32 | str | list[i64] | bool | bool | bool |
| "0x100E38f7BCEc53937BDd79ADE46F… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0x1476A65D7B5739dE1805d5130441… | 22149 | 100 | 100 | 100.0 | 0.0 | 2 | "100(22149), null(0)" | [100] | true | false | false |
| "0x26E2724dBD14Fbd52be430B97043… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0x383A3c437d3F12f60E5fC9901194… | 25920 | 100 | 100 | 100.0 | 0.0 | 1 | "100(25920)" | [100] | true | true | false |
| "0x3fe573552E14a0FC11Da25E43Fef… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| … | … | … | … | … | … | … | … | … | … | … | … |
| "0xDF2D0052ea56A860443039619f6D… | 25902 | 100 | 100 | 100.0 | 0.0 | 2 | "100(25902), null(0)" | [100] | true | false | false |
| "0xcdEed21b471b0Dc54faF74480A0E… | 25920 | 100 | 100 | 100.0 | 0.0 | 1 | "100(25920)" | [100] | true | true | false |
| "0xd625d50B0d087861c286d726eC51… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0xdF239e0D5b4E6e820B0cFEF6972A… | 0 | null | null | null | null | 1 | "null(0)" | [] | null | true | false |
| "0xf34CD6c09a59d7D3d1a6C3dC231a… | 25092 | 100 | 100 | 100.0 | 0.0 | 2 | "100(25092), null(0)" | [100] | true | false | false |
- Rows indicate validators consistently providing identical confidence, suggesting potential misconfiguration or logic errors.
Correlation Between Confidence and Price Changes
results["df_correlation_price_change"].filter(pl.col("num_points") > 2)| Validator Address | pair_label | corr_conf_price_change | num_points |
|---|---|---|---|
| str | str | f64 | i64 |
| "0x00a96aaED75015Bb44cED878D927… | "ATN-USD" | null | 2865 |
| "0x01F788E4371a70D579C178Ea7F48… | "ATN-USD" | null | 2794 |
| "0x1476A65D7B5739dE1805d5130441… | "ATN-USD" | null | 2461 |
| "0x197B2c44b887c4aC01243BDE7E4b… | "ATN-USD" | null | 2880 |
| "0x19E356ebC20283fc74AF0BA4C179… | "ATN-USD" | null | 2786 |
| … | … | … | … |
| "0xcf716b3930d7cf6f2ADAD90A27c3… | "SEK-USD" | 0.006676 | 2880 |
| "0xd61a48b0e11B0Dc6b7Bd713B1012… | "SEK-USD" | null | 2866 |
| "0xf10f56Bf0A28E0737c7e6bB0aF92… | "SEK-USD" | 0.00658 | 2880 |
| "0xf34CD6c09a59d7D3d1a6C3dC231a… | "SEK-USD" | null | 2788 |
| "0xfD97FB8835d25740A2Da27c69762… | "SEK-USD" | -0.112979 | 2880 |
Note: You may observe many null values in the corr_conf_price_change column. This can happen if a validator always provides the same confidence (zero variance in confidence values) or the price change for the given pair is often 0 or extremely small (zero variance in price).
- Correlation (
corr_conf_price_change) near zero implies confidence metrics not aligned with real market volatility. Strong correlation (positive or negative) suggests meaningful responsiveness.
Interpretation of Results
df_corr_fixed = results["df_correlation_price_change"].with_columns(
pl.col("corr_conf_price_change").cast(pl.Float64)
)
num_anomalies = results["df_confidence_anomalies"].height
print(f"Validators with fixed confidence values: {num_anomalies}")
low_corr_count = df_corr_fixed.filter(
(pl.col("corr_conf_price_change").abs() < 0.1)
& (pl.col("corr_conf_price_change").is_not_null())
).height
print(f"Number of validator-currency pair combinations with low correlation (<0.1): {low_corr_count}")
if num_anomalies > 0:
print("Identified validators with potentially hard-coded or fixed confidence values.")
else:
print("No significant anomalies in confidence values identified.")
if low_corr_count > 0:
print("Confidence values for many validators do not adequately reflect market volatility.")
else:
print("Confidence values generally align well with price changes.")Validators with fixed confidence values: 11
Number of validator-currency pair combinations with low correlation (<0.1): 143
Identified validators with potentially hard-coded or fixed confidence values.
Confidence values for many validators do not adequately reflect market volatility.
List of all Validators and their Standard Deviations
def compute_variation_metrics(df: pl.DataFrame, confidence_cols: List[str]) -> pl.DataFrame:
"""
For each validator, compute:
- min_conf, max_conf, mean_conf, std_conf
- distinct_values_count, fraction_zero
"""
# Keep relevant columns
keep_cols = ["Validator Address", "Timestamp_dt"] + confidence_cols
df_small = df.select([c for c in keep_cols if c in df.columns])
# Reshape into long form
df_long = df_small.melt(
id_vars=["Validator Address", "Timestamp_dt"],
value_vars=confidence_cols,
variable_name="confidence_col",
value_name="confidence_val",
)
# Group by validator only
metrics_lf = (
df_long.lazy()
.group_by(["Validator Address"])
.agg([
pl.min("confidence_val").alias("min_conf"),
pl.max("confidence_val").alias("max_conf"),
pl.mean("confidence_val").alias("mean_conf"),
pl.std("confidence_val").alias("std_conf"),
pl.n_unique("confidence_val").alias("distinct_values_count"),
(pl.col("confidence_val") == 0).sum().alias("count_zero"),
pl.count("confidence_val").alias("count_total"),
])
.with_columns([
(pl.col("count_zero") / pl.col("count_total")).alias("fraction_zero")
])
)
return metrics_lf.collect().sort(["Validator Address"])
df_all_variation = load_and_preprocess_submissions("../submission-data/Oracle_Submission_*.csv")
all_conf_cols = list_confidence_columns(df_all_variation)
df_variation_metrics = compute_variation_metrics(df_all_variation, all_conf_cols)
df_variation_metrics = df_variation_metrics.sort("std_conf", descending=False)
for row in df_variation_metrics.to_dicts():
mean_conf = row['mean_conf']
if mean_conf is None:
mean_conf = "0"
else:
mean_conf = round(mean_conf, 1)
std_conf = row['std_conf']
if std_conf is None:
std_conf = "0"
else:
std_conf = round(std_conf, 1)
print(
f"Validator {row['Validator Address']}: "
f"min_conf={row['min_conf']}, "
f"max_conf={row['max_conf']}, "
f"mean_conf={mean_conf}, "
f"std_conf={std_conf}, "
)Validator 0x100E38f7BCEc53937BDd79ADE46F34362470577B: min_conf=None, max_conf=None, mean_conf=0, std_conf=0,
Validator 0x26E2724dBD14Fbd52be430B97043AA4c83F05852: min_conf=None, max_conf=None, mean_conf=0, std_conf=0,
Validator 0x3fe573552E14a0FC11Da25E43Fef11e16a785068: min_conf=None, max_conf=None, mean_conf=0, std_conf=0,
Validator 0xd625d50B0d087861c286d726eC51Cf4Bd9c54357: min_conf=None, max_conf=None, mean_conf=0, std_conf=0,
Validator 0xdF239e0D5b4E6e820B0cFEF6972A90893c2073AB: min_conf=None, max_conf=None, mean_conf=0, std_conf=0,
Validator 0x1476A65D7B5739dE1805d5130441A94022Ee49fe: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0x383A3c437d3F12f60E5fC990119468D3561EfBfc: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0x6747c02DE7eb2099265e55715Ba2E03e8563D051: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0xDF2D0052ea56A860443039619f6DAe4434bc0Ac4: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0xcdEed21b471b0Dc54faF74480A0E700fCc42a7b6: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0xf34CD6c09a59d7D3d1a6C3dC231a7834E5615D6A: min_conf=100, max_conf=100, mean_conf=100.0, std_conf=0.0,
Validator 0x01F788E4371a70D579C178Ea7F48E04e8B2CD743: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x19E356ebC20283fc74AF0BA4C179502A1F62fA7B: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xbfDcAF35f52F9ef423ac8F2621F9eef8be6dEd17: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x2928FE5b911BCAf837cAd93eB9626E86a189f1dd: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x4cD134001EEF0843B9c69Ba9569d11fDcF4bd495: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x197B2c44b887c4aC01243BDE7E4bBa8bd95BC3a8: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x23b4Be9536F93b8D550214912fD0e38417Ff7209: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x24915749B793375a8C93090AF19928aFF1CAEcb6: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x358488a4EdCA493FCD87610dcd50c62c8A3Dd658: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x5603caFE3313D0cf56Fd4bE4A2f606dD6E43F8Eb: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x59031767f20EA8F4a3d90d33aB0DAA2ca469Fd9a: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x64F83c2538A646A550Ad9bEEb63427a377359DEE: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x791A7F840ac11841cCB0FaA968B2e3a0Db930fCe: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x99E2B4B27BDe92b42D04B6CF302cF564D2C13b74: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x9C7dAABb5101623340C925CFD6fF74088ff5672e: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xC1F9acAF1824F6C906b35A0D2584D6E25077C7f5: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xD9fDab408dF7Ae751691BeC2efE3b713ba3f9C36: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xcf716b3930d7cf6f2ADAD90A27c39fDc9D643BBd: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xf10f56Bf0A28E0737c7e6bB0aF92f3DDad34aE6a: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0xfD97FB8835d25740A2Da27c69762D74F6A931858: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x8584A78A9b94f332A34BBf24D2AF83367Da31894: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x984A46Ec685Bb41A7BBb2bc39f80C78410ff4057: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x00a96aaED75015Bb44cED878D927dcb15ec1FF54: min_conf=90, max_conf=100, mean_conf=99.9, std_conf=1.0,
Validator 0x36142A4f36974e2935192A1111C39330aA296D3C: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0x718361fc3637199F24a2437331677D6B89a40519: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0x94d28f08Ff81A80f4716C0a8EfC6CAC2Ec74d09E: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0xE9FFF86CAdC3136b3D94948B8Fd23631EDaa2dE3: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0xc5B9d978715F081E226cb28bADB7Ba4cde5f9775: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0x6a395dE946c0493157404E2b1947493c633f569E: min_conf=90, max_conf=100, mean_conf=93.3, std_conf=4.7,
Validator 0xF9B38D02959379d43C764064dE201324d5e12931: min_conf=70, max_conf=100, mean_conf=93.1, std_conf=5.2,
Validator 0xBE287C82A786218E008FF97320b08244BE4A282c: min_conf=70, max_conf=100, mean_conf=93.1, std_conf=5.2,
Validator 0x7232e75a8bFd8c9ab002BB3A00eAa885BC72A6dd: min_conf=70, max_conf=100, mean_conf=93.1, std_conf=5.2,
Validator 0x3AaF7817618728ffEF81898E11A3171C33faAE41: min_conf=70, max_conf=100, mean_conf=93.1, std_conf=5.2,
Validator 0x5E17e837DcBa2728C94f95c38fA8a47CB9C8818F: min_conf=70, max_conf=100, mean_conf=93.1, std_conf=5.2,
Validator 0x1Be7f70BCf8393a7e4A5BcC66F6f15d6e35cfBBC: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x22A76e194A49c9e5508Cd4A3E1cD555D088ECB08: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x3597d2D42f8Fbbc82E8b1046048773aD6DDB717E: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x527192F3D2408C84087607b7feE1d0f907821E17: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x551f3300FCFE0e392178b3542c009948008B2a9F: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x94470A842Ea4f44e668EB9C2AB81367b6Ce01772: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x9d28e40E9Ec4789f9A0D17e421F76D8D0868EA44: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xB5d8be2AB4b6d7E6be7Ea28E91b370223a06289f: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xBBf36374eb23968F25aecAEbb97BF3118f3c2fEC: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xDCA5DFF3D42f2db3C18dBE823380A0A81db49A7E: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xE4686A4C6E63A8ab51B458c52EB779AEcf0B74f7: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x8f91e0ADF8065C3fFF92297267E02DF32C2978FF: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xEf0Ba5e345C2C3937df5667A870Aae5105CAa3a5: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0xd61a48b0e11B0Dc6b7Bd713B1012563c52591BAA: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Validator 0x831B837C3DA1B6c2AB68a690206bDfF368877E19: min_conf=50, max_conf=100, mean_conf=66.7, std_conf=23.6,
Please note, min_conf, max_conf, mean_conf and std_conf are the minimum, maximum, mean and standard deviation of the confidence values this validator provided across all submissions.