|
import os |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import pandas as pd |
|
from huggingface_hub import hf_hub_download |
|
|
|
|
|
print('current dir:', os.getcwd()) |
|
|
|
print('list all files and folders in the current directory:', os.listdir()) |
|
|
|
|
|
|
|
import numpy as np |
|
import tqdm |
|
import torch |
|
import re |
|
from torch import bfloat16 |
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import json |
|
from tqdm import tqdm |
|
|
|
|
|
|
|
import random |
|
from dotenv import load_dotenv |
|
|
|
from genai.client import Client |
|
from genai.credentials import Credentials |
|
from genai.schema import ( |
|
DecodingMethod, |
|
HumanMessage, |
|
ModerationHAP, |
|
ModerationHAPInput, |
|
ModerationHAPOutput, |
|
ModerationParameters, |
|
SystemMessage, |
|
TextGenerationParameters, |
|
) |
|
from genai.text.generation import CreateExecutionOptions |
|
|
|
load_dotenv() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def heading(text: str) -> str: |
|
"""Helper function for centering text.""" |
|
return "\n" + f" {text} ".center(80, "=") + "\n" |
|
|
|
|
|
def chat_llm_batch(model_id, prompts, limit= 20): |
|
parameters = TextGenerationParameters( |
|
|
|
decoding_method=DecodingMethod.SAMPLE, max_new_tokens=128, min_new_tokens=30, temperature=0, top_k=1, top_p=1, random_seed=42, |
|
|
|
) |
|
client = Client(credentials=Credentials.from_env()) |
|
response_list = [] |
|
for response in client.text.generation.create( |
|
model_id = model_id, |
|
inputs = prompts, |
|
execution_options=CreateExecutionOptions(concurrency_limit=limit, ordered=True), |
|
parameters=parameters, |
|
): |
|
response_list.append(response.results[0].generated_text) |
|
return response_list |
|
|
|
def creat_prompt(model_id, tokenizer=None, question=None, answer=None, pred=None,): |
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": |
|
"You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " |
|
"Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:\n" |
|
"------\n" |
|
"##INSTRUCTIONS:\n" |
|
"- Focus on the meaningful match between the predicted answer and the correct answer.\n" |
|
"- Consider synonyms or paraphrases as valid matches.\n" |
|
"- Evaluate the correctness of the prediction compared to the answer." |
|
}, |
|
{ |
|
"role": "user", |
|
"content": |
|
"Please evaluate the following question-answer pair:\n\n" |
|
f"Question: {question.capitalize()}\n" |
|
f"Correct Answer: {answer.lower()}\n" |
|
f"Predicted Answer: {pred.lower()}\n\n" |
|
"Evaluate if the answer is correct with yes/no and assign a correctness score between 0 and 5, where 0 indicates incorrect answer, and 5 signifies the highest meaningful match. " |
|
"Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING. " |
|
"DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " |
|
"For example, your response should look like this: {'pred': 'no', 'score': 0}." |
|
} |
|
] |
|
|
|
if 'mistralai' in model_id: |
|
prompt = f'<s>[INST] {messages[0]["content"].strip()}\n\n{messages[1]["content"].strip()} [/INST]' |
|
elif 'NousResearch' in model_id: |
|
prompt = tokenizer.apply_chat_template(messages, tokenize=False) |
|
prompt = prompt + '<|im_start|>assistant' |
|
elif 'api' in model_id: |
|
prompt = messages |
|
else: |
|
raise NotImplementedError |
|
return prompt |
|
|
|
|
|
def mixtral_eval_api(submission_dict_of_dict, solution_dict_of_dict, category=None, |
|
model_id = "mistralai/mixtral-8x7b-instruct-v01", batch_size = 16, |
|
category_to_sample_ids_dict = None, answer_dict_for_mixtral_eval = None, limit = None, |
|
return_score = False): |
|
id_list = category_to_sample_ids_dict[category] |
|
examples_to_eval = [] |
|
evals = [] |
|
|
|
sample_id_list = [] |
|
|
|
for output_id, output in submission_dict_of_dict.items(): |
|
if output_id in id_list: |
|
sample_id_list.append(output_id) |
|
|
|
|
|
|
|
steps = int(np.ceil(len(sample_id_list) / batch_size)) |
|
for step in tqdm(range(steps)): |
|
prompts = [] |
|
|
|
|
|
for item in sample_id_list[step * batch_size: (step + 1) * batch_size]: |
|
|
|
|
|
|
|
output_id = item |
|
sample_key = output_id |
|
answer = str(submission_dict_of_dict[output_id]['pred']) |
|
label = str(solution_dict_of_dict[output_id]['pred']) |
|
|
|
gt_answer = label |
|
pred_answer = answer |
|
question = answer_dict_for_mixtral_eval[sample_key]['question'] |
|
examples_to_eval.append({ |
|
'id': sample_key, |
|
"question_type": answer_dict_for_mixtral_eval[sample_key]['question_type'], |
|
'answer': gt_answer, |
|
'question': question, |
|
'parsed_pred': pred_answer, |
|
}) |
|
|
|
prompt = creat_prompt(model_id='mistralai', question=question, answer=gt_answer, pred=pred_answer) |
|
prompts.append(prompt) |
|
response_list = chat_llm_batch(model_id, prompts, limit=limit) |
|
evals.extend(response_list) |
|
|
|
judge_dict = {} |
|
pred_correct = 0 |
|
score_sum = 0 |
|
for sample_data, sample_eval in zip(examples_to_eval, evals): |
|
try: |
|
sample_eval = re.match(r".*(\{.*?\}).*", sample_eval, re.S).group(1) |
|
sample_eval = sample_eval.replace("'", '"') |
|
sample_eval = json.loads(sample_eval) |
|
pred = sample_eval['pred'] |
|
sample_score = sample_eval['score'] |
|
if pred == 'yes': |
|
judge_dict[sample_data['id']] = {'pred': 'Correct', 'score': sample_score} |
|
pred_correct += 1 |
|
else: |
|
judge_dict[sample_data['id']] = {'pred': 'Wrong', 'score': sample_score} |
|
score_sum += sample_score |
|
except: |
|
judge_dict[sample_data['id']] = {'pred': 'Wrong', 'score': 0} |
|
if len(examples_to_eval) == 0: |
|
return {'acc': 0, 'avg_score': 0} |
|
|
|
if return_score: |
|
return pred_correct / len(examples_to_eval), score_sum / len(examples_to_eval) |
|
else: |
|
return pred_correct / len(examples_to_eval) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def extract_numbers(string): |
|
""" |
|
Exact all forms of numbers from a string with regex. |
|
""" |
|
|
|
pattern_commas = r'-?\b\d{1,3}(?:,\d{3})+\b' |
|
|
|
pattern_scientific = r'-?\d+(?:\.\d+)?[eE][+-]?\d+' |
|
|
|
pattern_simple = r'-?(?:\d+\.\d+|\.\d+|\d+\b)(?![eE][+-]?\d+)(?![,\d])' |
|
|
|
|
|
numbers_with_commas = re.findall(pattern_commas, string) |
|
|
|
numbers_scientific = re.findall(pattern_scientific, string) |
|
|
|
numbers_simple = re.findall(pattern_simple, string) |
|
|
|
|
|
all_numbers = numbers_with_commas + numbers_scientific + numbers_simple |
|
return all_numbers |
|
|
|
def parse_open_response(response): |
|
""" |
|
Parse the prediction from the generated response. |
|
Return a list of predicted strings or numbers. |
|
""" |
|
|
|
|
|
def get_key_subresponses(response): |
|
key_responses = [] |
|
response = response.strip().strip(".").lower() |
|
sub_responses = re.split(r'\.\s(?=[A-Z])|\n', response) |
|
indicators_of_keys = ['could be ', 'so ', 'is ', |
|
'thus ', 'therefore ', 'final ', 'answer ', 'result '] |
|
key_responses = [] |
|
for index, resp in enumerate(sub_responses): |
|
|
|
if index == len(sub_responses) - 1: |
|
indicators_of_keys.extend(['=']) |
|
shortest_key_response = None |
|
for indicator in indicators_of_keys: |
|
if indicator in resp: |
|
if not shortest_key_response: |
|
shortest_key_response = resp.split(indicator)[-1].strip() |
|
else: |
|
if len(resp.split(indicator)[-1].strip()) < len(shortest_key_response): |
|
shortest_key_response = resp.split(indicator)[-1].strip() |
|
|
|
|
|
if shortest_key_response: |
|
|
|
if shortest_key_response.strip() not in [":", ",", ".", "!", "?", ";", ":", "'"]: |
|
key_responses.append(shortest_key_response) |
|
if len(key_responses) == 0: |
|
return [response] |
|
return key_responses |
|
|
|
|
|
key_responses = get_key_subresponses(response) |
|
|
|
pred_list = key_responses.copy() |
|
for resp in key_responses: |
|
pred_list.extend(extract_numbers(resp)) |
|
|
|
tmp_pred_list = [] |
|
for i in range(len(pred_list)): |
|
tmp_pred_list.extend(normalize_str(pred_list[i])) |
|
pred_list = tmp_pred_list |
|
|
|
|
|
pred_list = list(set(pred_list)) |
|
|
|
return pred_list |
|
def check_is_number(string): |
|
""" |
|
Check if the given string a number. |
|
""" |
|
try: |
|
float(string.replace(',', '')) |
|
return True |
|
except ValueError: |
|
|
|
return False |
|
def normalize_str(string): |
|
""" |
|
Normalize the str to lower case and make them float numbers if possible. |
|
""" |
|
|
|
|
|
|
|
string = string.strip() |
|
|
|
is_number = check_is_number(string) |
|
|
|
if is_number: |
|
string = string.replace(',', '') |
|
string = float(string) |
|
|
|
string = round(string, 2) |
|
return [string] |
|
else: |
|
|
|
string = string.lower() |
|
if len(string) == 1: |
|
return [" " + string, string + " "] |
|
return [string] |
|
def eval_open(gold_i, pred_i): |
|
""" |
|
Evaluate an open question instance |
|
""" |
|
correct = False |
|
if isinstance(gold_i, list): |
|
|
|
norm_answers = [] |
|
for answer in gold_i: |
|
norm_answers.extend(normalize_str(answer)) |
|
else: |
|
norm_answers = normalize_str(gold_i) |
|
for pred in pred_i: |
|
if isinstance(pred, str): |
|
for norm_ans in norm_answers: |
|
|
|
if isinstance(norm_ans, str) and norm_ans in pred: |
|
if not correct: |
|
correct = True |
|
break |
|
else: |
|
if pred in norm_answers: |
|
if not correct: |
|
correct = True |
|
break |
|
return correct |
|
def eval_multi_choice(gold_i, pred_i): |
|
""" |
|
Evaluate a multiple choice instance. |
|
""" |
|
correct = False |
|
|
|
if isinstance(gold_i, list): |
|
for answer in gold_i: |
|
if answer == pred_i: |
|
correct = True |
|
break |
|
else: |
|
if gold_i == pred_i: |
|
correct = True |
|
return correct |
|
|
|
def evaluate(samples): |
|
""" |
|
Batch evaluation for multiple choice and open questions. |
|
""" |
|
pred_correct = 0 |
|
judge_dict = dict() |
|
for sample in samples: |
|
gold_i = sample['answer'] |
|
pred_i = sample['parsed_pred'] |
|
if sample['question_type'] == 'multiple-choice': |
|
correct = eval_multi_choice(gold_i, pred_i) |
|
else: |
|
correct = eval_open(gold_i, pred_i) |
|
|
|
if correct: |
|
judge_dict[sample['id']] = 'Correct' |
|
pred_correct += 1 |
|
else: |
|
judge_dict[sample['id']] = 'Wrong' |
|
|
|
if len(samples) == 0: |
|
return None, {'acc': 0} |
|
return judge_dict, {'acc': pred_correct / len(samples)} |
|
|
|
|
|
def mmmu_eval(submission_dict_of_dict, solution_dict_of_dict, category=None, category_to_sample_ids_dict = None): |
|
""" |
|
MMMU evaluation on 6 datasets in the 1st phase: iconqa_fill_in_blank,funsd,iconqa_choose_txt,wildreceipt,textbookqa,tabfact |
|
only checking whether gt answer is contained in the prediction |
|
|
|
:param submission_dict_of_dict: outputs are predictions of all samples of all datasets |
|
:param solution_dict_of_dict: targets are gt of all samples of all datasets |
|
:return: |
|
""" |
|
id_list = category_to_sample_ids_dict[category] |
|
examples_to_eval = [] |
|
|
|
|
|
for output_id, output in submission_dict_of_dict.items(): |
|
if output_id in id_list: |
|
|
|
answer = str(output['pred']) |
|
|
|
label = str(solution_dict_of_dict[output_id]['pred']) |
|
|
|
|
|
|
|
|
|
|
|
parse_pred = parse_open_response(answer) |
|
examples_to_eval.append({ |
|
'id': output['id'], |
|
"question_type": "short-answer", |
|
'answer': label, |
|
'parsed_pred': parse_pred, |
|
}) |
|
|
|
|
|
|
|
judge_dict, metric_dict = evaluate(examples_to_eval) |
|
metric_dict.update({'num_example': len(examples_to_eval)}) |
|
return metric_dict['acc'] |
|
|
|
|
|
|
|
def compute(params): |
|
random.seed(42) |
|
np.random.seed(42) |
|
|
|
|
|
category_to_sample_ids_dict_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename="category_to_sample_ids_dict.json", |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
|
|
answer_dict_for_mixtral_eval_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename="answer_dict_for_mixtral_eval.json", |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
|
|
category_to_sample_ids_dict = json.load(open(category_to_sample_ids_dict_file)) |
|
answer_dict_for_mixtral_eval = json.load(open(answer_dict_for_mixtral_eval_file)) |
|
|
|
print(f'params {params}') |
|
print(f'params.submission_id_col {params.submission_id_col}') |
|
|
|
print('Downloading solution files ...') |
|
solution_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename="solution.csv", |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
|
|
solution_df = pd.read_csv(solution_file) |
|
|
|
print('Downloading submission files ...') |
|
submission_filename = f"submissions/{params.team_id}-{params.submission_id}.csv" |
|
submission_file = hf_hub_download( |
|
repo_id=params.competition_id, |
|
filename=submission_filename, |
|
token=params.token, |
|
repo_type="dataset", |
|
) |
|
|
|
submission_df = pd.read_csv(submission_file) |
|
|
|
|
|
|
|
public_ids = solution_df[solution_df.split == "public"][params.submission_id_col].values |
|
private_ids = solution_df[solution_df.split == "private"][params.submission_id_col].values |
|
|
|
public_solution_df = solution_df[solution_df[params.submission_id_col].isin(public_ids)] |
|
public_submission_df = submission_df[submission_df[params.submission_id_col].isin(public_ids)] |
|
|
|
private_solution_df = solution_df[solution_df[params.submission_id_col].isin(private_ids)] |
|
private_submission_df = submission_df[submission_df[params.submission_id_col].isin(private_ids)] |
|
|
|
public_solution_df = public_solution_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
public_submission_df = public_submission_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
|
|
private_solution_df = private_solution_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
private_submission_df = private_submission_df.sort_values(params.submission_id_col).reset_index(drop=True) |
|
|
|
print('public_solution_df', public_solution_df) |
|
print('private_solution_df', private_solution_df) |
|
|
|
|
|
|
|
|
|
def csv_to_dict_of_dict(df): |
|
dict_of_dict = {} |
|
for row, output in df.iterrows(): |
|
dict_of_dict[output['id']] = output |
|
return dict_of_dict |
|
public_solution_dict_of_dict = csv_to_dict_of_dict(public_solution_df) |
|
public_submission_dict_of_dict = csv_to_dict_of_dict(public_submission_df) |
|
private_solution_dict_of_dict = csv_to_dict_of_dict(private_solution_df) |
|
private_submission_dict_of_dict = csv_to_dict_of_dict(private_submission_df) |
|
|
|
|
|
|
|
|
|
|
|
phase1_datasets_for_mmmu_eval = ['iconqa_fill','funsd','iconqa_choose','wildreceipt','textbookqa','tabfact'] |
|
phase1_datasets_for_mixtral_eval = ['docvqa','infographicvqa','websrc','wtq'] |
|
|
|
|
|
|
|
|
|
|
|
phase2_datasets_for_mmmu_eval = [] |
|
phase2_datasets_for_mixtral_eval = ['mydoc', 'mychart', 'myinfographic' ] |
|
|
|
|
|
|
|
all_keys = [ |
|
|
|
'iconqa_fill_acc','funsd_acc','iconqa_choose_acc','wildreceipt_acc','textbookqa_acc','tabfact_acc', |
|
'docvqa_acc','infographicvqa_acc','websrc_acc','wtq_acc', |
|
'phase1_overall_acc', |
|
|
|
'mydoc_acc', |
|
'mydoc_rating', |
|
'mychart_acc', |
|
'mychart_rating', |
|
'myinfographic_acc', |
|
'myinfographic_rating', |
|
|
|
'phase2_overall_acc', |
|
'phase2_overall_rating' |
|
] |
|
mixtral_eval_batch_size = 16 |
|
limit = 100 |
|
public_category_score_dict = {} |
|
private_category_score_dict = {} |
|
for key_ in all_keys: |
|
public_category_score_dict.update({key_: -1}) |
|
private_category_score_dict.update({key_: -1}) |
|
|
|
|
|
missing_sample_ids = [] |
|
for datasetname, sample_ids in category_to_sample_ids_dict.items(): |
|
for sample_id in sample_ids: |
|
if sample_id not in public_submission_dict_of_dict: |
|
missing_sample_ids.append(sample_id) |
|
if len(missing_sample_ids) > 0: |
|
print(f'Error: missing sample ids in the submission file: {missing_sample_ids}') |
|
del public_category_score_dict['mydoc_rating'] |
|
del public_category_score_dict['mychart_rating'] |
|
del public_category_score_dict['myinfographic_rating'] |
|
del public_category_score_dict['phase2_overall_rating'] |
|
|
|
del private_category_score_dict['mydoc_rating'] |
|
del private_category_score_dict['mychart_rating'] |
|
del private_category_score_dict['myinfographic_rating'] |
|
del private_category_score_dict['phase2_overall_rating'] |
|
metric_dict = {"public_score": public_category_score_dict, |
|
"private_score": private_category_score_dict} |
|
|
|
return metric_dict |
|
|
|
|
|
|
|
|
|
|
|
|
|
for datasetname in phase1_datasets_for_mmmu_eval: |
|
print(f'#################################################### Phase 1 Eval: MMMU evaluation for {datasetname} ###########################################') |
|
public_category_score_dict[f'{datasetname}_acc'] = mmmu_eval(public_submission_dict_of_dict, public_solution_dict_of_dict, datasetname, category_to_sample_ids_dict= category_to_sample_ids_dict) |
|
|
|
for datasetname in phase1_datasets_for_mixtral_eval: |
|
print(f'#################################################### Phase 1 Eval: Mixtral evaluation for {datasetname} ###########################################') |
|
|
|
public_category_score_dict[f'{datasetname}_acc'] = mixtral_eval_api(public_submission_dict_of_dict, public_solution_dict_of_dict, datasetname, |
|
category_to_sample_ids_dict =category_to_sample_ids_dict, answer_dict_for_mixtral_eval = answer_dict_for_mixtral_eval, batch_size=mixtral_eval_batch_size, limit=limit) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for datasetname in phase2_datasets_for_mixtral_eval: |
|
print(f'#################################################### Phase 2 Eval: Mixtral evaluation for {datasetname} ###########################################') |
|
public_category_score_dict[f'{datasetname}_acc'], public_category_score_dict[f'{datasetname}_rating'] = mixtral_eval_api(public_submission_dict_of_dict, public_solution_dict_of_dict, datasetname, |
|
category_to_sample_ids_dict=category_to_sample_ids_dict, |
|
answer_dict_for_mixtral_eval=answer_dict_for_mixtral_eval, batch_size=mixtral_eval_batch_size, |
|
limit=limit, return_score=True) |
|
|
|
|
|
|
|
|
|
|
|
print('################# Phase 1 Evaluation #################') |
|
|
|
for metric_type in ['acc']: |
|
n_total = 0 |
|
metric_sum = 0 |
|
print('category'.ljust(29, ' '), 'n_samples'.ljust(10, ' '), metric_type.ljust(9, ' ')) |
|
for datasetname in phase1_datasets_for_mmmu_eval + phase1_datasets_for_mixtral_eval: |
|
metric_name = f'{datasetname}_{metric_type}' |
|
metric = public_category_score_dict[metric_name] |
|
dataset_size = len(category_to_sample_ids_dict[datasetname]) |
|
dataset_metric_sum = metric * dataset_size |
|
|
|
print(f'{metric_name:<30}{dataset_size:<10}{metric:<10.4f}') |
|
metric_sum += dataset_metric_sum |
|
n_total += dataset_size |
|
overall_metric = 0 if n_total == 0 else metric_sum / float(n_total) |
|
print(f'overall_{metric_type}'.ljust(29, ' '), f'{n_total}'.ljust(10, ' '), f'{overall_metric:.4f}'.ljust(9, ' ')) |
|
public_category_score_dict[f'phase1_overall_{metric_type}'] = overall_metric |
|
print('\n') |
|
|
|
|
|
print('################# Phase 2 Evaluation #################') |
|
for metric_type in ['acc', 'rating']: |
|
n_total = 0 |
|
metric_sum = 0 |
|
print('category'.ljust(29, ' '), 'n_samples'.ljust(10, ' '), metric_type.ljust(9, ' ')) |
|
for datasetname in phase2_datasets_for_mixtral_eval: |
|
metric_name = f'{datasetname}_{metric_type}' |
|
metric = public_category_score_dict[metric_name] |
|
dataset_size = len(category_to_sample_ids_dict[datasetname]) |
|
dataset_metric_sum = metric * dataset_size |
|
|
|
print(f'{metric_name:<30}{dataset_size:<10}{metric:<10.4f}') |
|
metric_sum += dataset_metric_sum |
|
n_total += dataset_size |
|
|
|
overall_metric = 0 if n_total == 0 else metric_sum / float(n_total) |
|
print(f'overall_{metric_type}'.ljust(29, ' '), f'{n_total}'.ljust(10, ' '), f'{overall_metric:.4f}'.ljust(9, ' ')) |
|
public_category_score_dict[f'phase2_overall_{metric_type}'] = overall_metric |
|
print('\n') |
|
|
|
""" |
|
|
|
metric_dict = { |
|
"public_score": # the user will see the results immediately after submitting |
|
{ |
|
phase1dataset1_acc: xx, |
|
phase1dataset2_acc: xx, |
|
... |
|
phase1_overall_acc: xx |
|
|
|
phase2dataset1_acc: xx, |
|
phase2dataset1_rating: xx, |
|
phase2dataset2_acc: xx, |
|
phase2dataset2_rating: xx, |
|
... |
|
phase2_overall_acc: xx, |
|
phase2_overall_rating: xx, |
|
}, |
|
|
|
"private_score": |
|
{ |
|
phase1dataset1_acc: 0, |
|
phase1dataset2_acc: 0, |
|
... |
|
phase1_overall_acc: 0, |
|
|
|
phase2dataset1_acc: 0, |
|
phase2dataset1_rating: 0, |
|
phase2dataset2_acc: 0, |
|
phase2dataset2_rating: 0, |
|
... |
|
phase2_overall_acc: 0, |
|
phase2_overall_rating: 0 |
|
} |
|
} |
|
""" |
|
|
|
del public_category_score_dict['mydoc_rating'] |
|
del public_category_score_dict['mychart_rating'] |
|
del public_category_score_dict['myinfographic_rating'] |
|
del public_category_score_dict['phase2_overall_rating'] |
|
|
|
del private_category_score_dict['mydoc_rating'] |
|
del private_category_score_dict['mychart_rating'] |
|
del private_category_score_dict['myinfographic_rating'] |
|
del private_category_score_dict['phase2_overall_rating'] |
|
|
|
for key in public_category_score_dict.keys(): |
|
public_category_score_dict[key] = round(public_category_score_dict[key], 5) |
|
|
|
metric_dict = {"public_score": public_category_score_dict, |
|
"private_score": private_category_score_dict } |
|
|
|
print("metric_dict:=========") |
|
print(metric_dict) |
|
|
|
return metric_dict |