BK-Lee commited on
Commit
8a25753
1 Parent(s): fbee232
Files changed (3) hide show
  1. config.py +57 -0
  2. meteor/load_meteor.py +0 -1
  3. utils/utils.py +0 -6
config.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenAI Key
2
+ OPENAI_KEY = ""
3
+
4
+ # Dataset root
5
+ DATASET_ROOT=""
6
+
7
+ # Pre Meteor Dataset
8
+ METEOR_DATASET= "Meteor.json"
9
+
10
+ # Various json and parquet files
11
+ SHAREGPT4V_CAPTION = "sharegpt4v_instruct_gpt4-vision_cap100k.json"
12
+ SHAREGPT4V_INSTRUCTION = "sharegpt4v_mix665k_cap23k_coco-ap9k_lcs3k_sam9k_div2k.json"
13
+ MINIGEMINI_INSTRUCTION = "minigemini_instruction.json"
14
+ DOCDOWNSTREAM = 'train.jsonl'
15
+ DOCREASON = 'detailed_explanation.jsonl'
16
+ GLLAVA_ALIGN = "gllava_align.parquet"
17
+ GLLAVA_QA = "gllava_qa.parquet"
18
+ MATHVISION = "mathvision.parquet"
19
+ MATHINSTRUCT = "MathInstruct.json"
20
+ MATHPLUS = "mathplus.parquet"
21
+
22
+ # Json files for Evaluation
23
+ VQAV2 = "VQAv2/v2_OpenEnded_mscoco_test2015_questions.json"
24
+ GQA = "gqa/testdev_balanced_questions.json"
25
+ SQA = "ScienceQA/problems.json"
26
+ SQA_SPLIT = "ScienceQA/pid_splits.json"
27
+ VIZWIZ = "VizWiz/test.json"
28
+ TEXTVQA = "TextVQA/llava_textvqa_val_v051_ocr.json"
29
+ TEXTVQA_ANNOTATIONS = "TextVQA/TextVQA_0.5.1_val.json"
30
+ POPE_POPULAR = "POPE/coco_pope_popular.json"
31
+ POPE_ADVERSARIAL = "POPE/coco_pope_adversarial.json"
32
+ POPE_RANDOM = "POPE/coco_pope_random.json"
33
+ MME = "MME_Benchmark_release_version/llava_mme.json"
34
+ MME_DIR = "MME_Benchmark_release_version"
35
+ MMBENCH = "MMBench/MMBench_TEST_EN_legacy.tsv"
36
+ MMBENCH_CN = "MMBench/MMBench_TEST_CN_legacy.tsv"
37
+ MMBENCH_DEV = "MMBench/mmbench_dev_20230712.tsv"
38
+ MMBENCH_CN_DEV = "MMBench/mmbench_dev_cn_20231003.tsv"
39
+ QBENCH = "LLVisionQA-QBench/llvisionqa_dev.json"
40
+ QBENCH_CN = "LLVisionQA-QBench/质衡-问答-验证集.json"
41
+ MMVET = "mm-vet/mm-vet.json"
42
+ MMMU = "MMMU/*/validation*"
43
+ MATHVISTA = "MathVista/testmini-00000-of-00001-725687bf7a18d64b.parquet"
44
+ AI2D = "ai2d/ai2d_test.json"
45
+ HALLUSIONBENCH = "HallusionBench/HallusionBench.json"
46
+ CHARTQA = "chartqa/test/test_augmented.json"
47
+ SEED = "SEED-Bench/SEED-Bench.json"
48
+ LLAVA = "llava-bench-in-the-wild/questions.jsonl"
49
+ # BLINK =
50
+ MATHVERSE = "MathVerse/testmini.json"
51
+ MATHVERSE_TEXT_ONLY = "MathVerse/testmini_text_only.json"
52
+ MMSTAR = "MMStar/mmstar.parquet"
53
+
54
+ # Available evaluation datasets
55
+ EVAL_DATASETS = ["qbench", "sqa", "ai2d", "chartqa", "seed", "pope", "hallusionbench", "mme", \
56
+ "mathvista", "mmbench", "mmbench_cn", "mmvet", "llava", "mmstar", "mathverse"]
57
+
meteor/load_meteor.py CHANGED
@@ -1,6 +1,5 @@
1
  import torch
2
  import warnings
3
- from config import *
4
  from transformers import BitsAndBytesConfig
5
  from .arch.modeling_meteor import MeteorForCausalLM
6
  from .arch.tokenization_internlm2 import InternLM2Tokenizer
 
1
  import torch
2
  import warnings
 
3
  from transformers import BitsAndBytesConfig
4
  from .arch.modeling_meteor import MeteorForCausalLM
5
  from .arch.tokenization_internlm2 import InternLM2Tokenizer
utils/utils.py CHANGED
@@ -1,11 +1,5 @@
1
- import os
2
  import gc
3
- import math
4
  import torch
5
- import base64
6
- import numpy as np
7
- from config import *
8
- import torch.nn.functional as F
9
 
10
  def memory_optimization():
11
  # memory deallocation
 
 
1
  import gc
 
2
  import torch
 
 
 
 
3
 
4
  def memory_optimization():
5
  # memory deallocation