import zeroeval as ze
ze.init()
dataset = ze.Dataset(
"qa-text-e2e",
data=[
{"row_id": "1", "question": "Capital of France?", "answer": "Paris"},
{"row_id": "2", "question": "Capital of Germany?", "answer": "Berlin"},
{"row_id": "3", "question": "Capital of Japan?", "answer": "Tokyo"},
],
)
dataset.push()
@ze.task(outputs=["prediction"])
def answer(row):
# Replace with provider call.
return {"prediction": row.answer}
@ze.evaluation(mode="row", outputs=["exact_match"])
def exact_match(row, answer_col, prediction_col):
return {"exact_match": int(answer_col == prediction_col)}
@ze.evaluation(mode="column", outputs=["accuracy"])
def accuracy(exact_match_col):
total = len(exact_match_col)
return {"accuracy": (sum(exact_match_col) / total) if total else 0.0}
run = dataset.eval(
answer,
execution=ze.ExecutionConfig(
workers=8,
timeout_s=30,
retry=ze.RetryPolicy(max_attempts=3),
),
checkpoint=ze.CheckpointConfig(flush_every_rows=50, flush_every_seconds=5.0),
)
run = run.score(
[exact_match, accuracy],
column_map={
"exact_match": {
"answer_col": "answer",
"prediction_col": "prediction",
},
"accuracy": {"exact_match_col": "exact_match"},
},
)
print("eval_id:", run.id)
print("metrics:", run.metrics)
print("health:", run.health)