zzc0208 commited on
Commit
4182c57
1 Parent(s): c12aa39

Upload 7 files

Browse files
Files changed (6) hide show
  1. .gitignore +155 -155
  2. app.py +0 -1
  3. group.jpg +0 -0
  4. requirements.txt +0 -1
  5. useapi.py +171 -171
  6. utils.py +100 -100
.gitignore CHANGED
@@ -1,155 +1,155 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
- __pypackages__/
106
-
107
- # Celery stuff
108
- celerybeat-schedule
109
- celerybeat.pid
110
-
111
- # SageMath parsed files
112
- *.sage.py
113
-
114
- # Environments
115
- .env
116
- .venv
117
- env/
118
- venv/
119
- ENV/
120
- env.bak/
121
- venv.bak/
122
-
123
- # Spyder project settings
124
- .spyderproject
125
- .spyproject
126
-
127
- # Rope project settings
128
- .ropeproject
129
-
130
- # mkdocs documentation
131
- /site
132
-
133
- # mypy
134
- .mypy_cache/
135
- .dmypy.json
136
- dmypy.json
137
-
138
- # Pyre type checker
139
- .pyre/
140
-
141
- # pytype static type analyzer
142
- .pytype/
143
-
144
- # Cython debug symbols
145
- cython_debug/
146
-
147
- # PyCharm
148
- # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
149
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
150
- # and can be added to the global gitignore or merged into this file. For a more nuclear
151
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
152
- #.idea/
153
-
154
- #database
155
- *.csv
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
+ __pypackages__/
106
+
107
+ # Celery stuff
108
+ celerybeat-schedule
109
+ celerybeat.pid
110
+
111
+ # SageMath parsed files
112
+ *.sage.py
113
+
114
+ # Environments
115
+ .env
116
+ .venv
117
+ env/
118
+ venv/
119
+ ENV/
120
+ env.bak/
121
+ venv.bak/
122
+
123
+ # Spyder project settings
124
+ .spyderproject
125
+ .spyproject
126
+
127
+ # Rope project settings
128
+ .ropeproject
129
+
130
+ # mkdocs documentation
131
+ /site
132
+
133
+ # mypy
134
+ .mypy_cache/
135
+ .dmypy.json
136
+ dmypy.json
137
+
138
+ # Pyre type checker
139
+ .pyre/
140
+
141
+ # pytype static type analyzer
142
+ .pytype/
143
+
144
+ # Cython debug symbols
145
+ cython_debug/
146
+
147
+ # PyCharm
148
+ # JetBrains specific template is maintainted in a separate JetBrains.gitignore that can
149
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
150
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
151
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
152
+ #.idea/
153
+
154
+ #database
155
+ *.csv
app.py CHANGED
@@ -2,7 +2,6 @@
2
  import random
3
  import requests
4
  import io
5
- import gradio_i18n
6
  import gradio as gr
7
  import pandas as pd
8
  from PIL import Image
 
2
  import random
3
  import requests
4
  import io
 
5
  import gradio as gr
6
  import pandas as pd
7
  from PIL import Image
group.jpg CHANGED
requirements.txt CHANGED
@@ -2,4 +2,3 @@ gradio>=4.40.0
2
  jinja2>=3.1.2
3
  httpx
4
  pymongo
5
- gradio-i18n==0.0.10
 
2
  jinja2>=3.1.2
3
  httpx
4
  pymongo
 
useapi.py CHANGED
@@ -1,172 +1,172 @@
1
- import asyncio
2
- import httpx
3
- import json
4
- import requests
5
- import math
6
- import os
7
- client = httpx.AsyncClient()
8
- # 请求URL
9
- recommand_base_url = "https://" + os.getenv("recommand_base_url")
10
- chat_url = "https://" + os.getenv("chat_url")
11
- model_url = "https://" + os.getenv("model_url")
12
- character_url = "https://" + os.getenv("character_url")
13
- avatar_url = "https://" + os.getenv("avatar_url")
14
- image_url = "https://" + os.getenv("image_url")
15
- auth = os.getenv("auth")
16
- #headers
17
- def create_headers(language):
18
- # 映射
19
- language_mapping = {
20
- 'Chinese': 'zh',
21
- 'English': 'en',
22
- 'Japanese': 'ja',
23
- 'Korean': 'ko'
24
- }
25
-
26
- # 获取对应的语言代码,如果不存在则默认为 'zh'
27
- language_code = language_mapping.get(language, 'zh')
28
-
29
- return {
30
- 'X-Refresh-Token': '',
31
- 'X-Language': language_code,
32
- 'accept-language': '',
33
- 'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
34
- 'Authorization': auth,
35
- 'Accept': '*/*',
36
- 'Connection': 'keep-alive'
37
- }
38
-
39
- def recommand_character(language):
40
- response = requests.get(character_url, headers=create_headers(language))
41
- json_data = response.json()
42
- characters = [{
43
- "name": item["name"],
44
- "_id": item["_id"],
45
- "avatar_url": str(avatar_url + item['_id'] + "_avatar.webp")
46
- } for item in json_data['data']]
47
- return characters
48
-
49
- def id_to_avatar(char_id):
50
- return str(avatar_url + char_id + "_avatar.webp")
51
-
52
- #GET模型列表
53
- def get_models():
54
- class ModelStorage:
55
- def __init__(self):
56
- self.models = []
57
-
58
- def add_models(self, models):
59
- for model_info in models:
60
- # 过滤掉 'gpt-4o' 和 'gpt-4o-mini'
61
- if model_info['model'] not in ['gpt-4o', 'gpt-4o-mini', 'mythomax-13b']:
62
- if model_info['model'] in ['qwen-2-7b', 'gemma-2-9b', 'llama-3.1-8b', 'glm-4-9b']:
63
- weight = 12 # Assign a low weight to reduce their frequency
64
- else:
65
- weight = int(math.ceil(24 / model_info['price'] + 0.5))
66
- self.models.extend([model_info['model']] * weight)
67
-
68
- model_storage = ModelStorage()
69
-
70
- # 从指定的 URL 获取 JSON 数据
71
- response = requests.get(model_url)
72
-
73
- if response.status_code == 200:
74
- data = response.json()
75
- # 添加模型到 self.models
76
- model_storage.add_models(data['data'])
77
- return model_storage.models
78
-
79
- #解析推荐json
80
- def extract_recommand(data):
81
- return [
82
- {
83
- "character_id": item["character_id"],
84
- "avatar_url" : str(avatar_url+item["character_id"]+"_avatar.webp"),
85
- "_id": item["_id"],
86
- "image_url" : str(image_url+item["_id"]+"_large.webp"),
87
- "description": item["description"],
88
- "name": item["title"],
89
- "opening": item["opening"]
90
- }
91
- for item in data["data"]["moments"]
92
- ]
93
-
94
- #请求推荐API
95
- async def recommand(char_id, language):
96
- recommand_url = str(recommand_base_url + char_id)
97
- response = await client.get(recommand_url, headers=create_headers(language))
98
- json_data = response.json()
99
- return extract_recommand(json_data)
100
-
101
- async def fetch_stream(query, model, moment_id, session_id, bio, request_name, queue, language):
102
- payload = {"query": query, "model": model, "bio": bio, "moment_id": moment_id}
103
- if session_id:
104
- payload["session_id"] = session_id
105
- async with client.stream(
106
- "POST", chat_url, json=payload, headers=create_headers(language)
107
- ) as response:
108
- # 获取并返回 header
109
- if response.status_code != 200:
110
- await queue.put((request_name, "content", "Error Occur!"))
111
- await queue.put((request_name, "end", None))
112
- return
113
- response_headers = dict(response.headers)
114
- session_id = response_headers.get("x-session-id")
115
- await queue.put((request_name, "header", response_headers))
116
-
117
- # 流式处理响应内容
118
- async for chunk in response.aiter_bytes():
119
- await queue.put((request_name, "content", chunk.decode()))
120
-
121
- # 标记流结束
122
- await queue.put((request_name, "end", None))
123
-
124
- return session_id
125
-
126
-
127
- async def combine_streams(
128
- query_a,
129
- query_b,
130
- model_a,
131
- model_b,
132
- moment_id_a,
133
- moment_id_b,
134
- session_id_a,
135
- session_id_b,
136
- bio_a,
137
- bio_b,
138
- language
139
- ):
140
- queue = asyncio.Queue()
141
- task_a = asyncio.create_task(
142
- fetch_stream(
143
- query_a, model_a, moment_id_a, session_id_a, bio_a, "requestA", queue, language
144
- )
145
- )
146
- task_b = asyncio.create_task(
147
- fetch_stream(
148
- query_b, model_b, moment_id_b, session_id_b, bio_b, "requestB", queue, language
149
- )
150
- )
151
-
152
- headers = {}
153
- content = {"requestA": "", "requestB": ""}
154
- active_streams = 2
155
-
156
- while active_streams > 0:
157
- request_name, data_type, data = await queue.get()
158
-
159
- if data_type == "header":
160
- headers[f"{request_name}_header"] = data
161
- if len(headers) == 2:
162
- yield headers
163
- elif data_type == "content":
164
- content[request_name] = data.strip()
165
- if content["requestA"] or content["requestB"]:
166
- yield content
167
- content = {"requestA": "", "requestB": ""}
168
- elif data_type == "end":
169
- active_streams -= 1
170
-
171
- session_id_a = await task_a
172
  session_id_b = await task_b
 
1
+ import asyncio
2
+ import httpx
3
+ import json
4
+ import requests
5
+ import math
6
+ import os
7
+ client = httpx.AsyncClient()
8
+ # 请求URL
9
+ recommand_base_url = "https://" + os.getenv("recommand_base_url")
10
+ chat_url = "https://" + os.getenv("chat_url")
11
+ model_url = "https://" + os.getenv("model_url")
12
+ character_url = "https://" + os.getenv("character_url")
13
+ avatar_url = "https://" + os.getenv("avatar_url")
14
+ image_url = "https://" + os.getenv("image_url")
15
+ auth = os.getenv("auth")
16
+ #headers
17
+ def create_headers(language):
18
+ # 映射
19
+ language_mapping = {
20
+ 'Chinese': 'zh',
21
+ 'English': 'en',
22
+ 'Japanese': 'ja',
23
+ 'Korean': 'ko'
24
+ }
25
+
26
+ # 获取对应的语言代码,如果不存在则默认为 'zh'
27
+ language_code = language_mapping.get(language, 'zh')
28
+
29
+ return {
30
+ 'X-Refresh-Token': '',
31
+ 'X-Language': language_code,
32
+ 'accept-language': '',
33
+ 'User-Agent': 'Apifox/1.0.0 (https://apifox.com)',
34
+ 'Authorization': auth,
35
+ 'Accept': '*/*',
36
+ 'Connection': 'keep-alive'
37
+ }
38
+
39
+ def recommand_character(language):
40
+ response = requests.get(character_url, headers=create_headers(language))
41
+ json_data = response.json()
42
+ characters = [{
43
+ "name": item["name"],
44
+ "_id": item["_id"],
45
+ "avatar_url": str(avatar_url + item['_id'] + "_avatar.webp")
46
+ } for item in json_data['data']]
47
+ return characters
48
+
49
+ def id_to_avatar(char_id):
50
+ return str(avatar_url + char_id + "_avatar.webp")
51
+
52
+ #GET模型列表
53
+ def get_models():
54
+ class ModelStorage:
55
+ def __init__(self):
56
+ self.models = []
57
+
58
+ def add_models(self, models):
59
+ for model_info in models:
60
+ # 过滤掉 'gpt-4o' 和 'gpt-4o-mini'
61
+ if model_info['model'] not in ['gpt-4o', 'gpt-4o-mini', 'mythomax-13b']:
62
+ if model_info['model'] in ['qwen-2-7b', 'gemma-2-9b', 'llama-3.1-8b', 'internLM-2.5-7b']:
63
+ weight = 12 # Assign a low weight to reduce their frequency
64
+ else:
65
+ weight = int(math.ceil(24 / model_info['price'] + 0.5))
66
+ self.models.extend([model_info['model']] * weight)
67
+
68
+ model_storage = ModelStorage()
69
+
70
+ # 从指定的 URL 获取 JSON 数据
71
+ response = requests.get(model_url)
72
+
73
+ if response.status_code == 200:
74
+ data = response.json()
75
+ # 添加模型到 self.models
76
+ model_storage.add_models(data['data'])
77
+ return model_storage.models
78
+
79
+ #解析推荐json
80
+ def extract_recommand(data):
81
+ return [
82
+ {
83
+ "character_id": item["character_id"],
84
+ "avatar_url" : str(avatar_url+item["character_id"]+"_avatar.webp"),
85
+ "_id": item["_id"],
86
+ "image_url" : str(image_url+item["_id"]+"_large.webp"),
87
+ "description": item["description"],
88
+ "name": item["title"],
89
+ "opening": item["opening"]
90
+ }
91
+ for item in data["data"]["moments"]
92
+ ]
93
+
94
+ #请求推荐API
95
+ async def recommand(char_id, language):
96
+ recommand_url = str(recommand_base_url + char_id)
97
+ response = await client.get(recommand_url, headers=create_headers(language))
98
+ json_data = response.json()
99
+ return extract_recommand(json_data)
100
+
101
+ async def fetch_stream(query, model, moment_id, session_id, bio, request_name, queue, language):
102
+ payload = {"query": query, "model": model, "bio": bio, "moment_id": moment_id}
103
+ if session_id:
104
+ payload["session_id"] = session_id
105
+ async with client.stream(
106
+ "POST", chat_url, json=payload, headers=create_headers(language)
107
+ ) as response:
108
+ # 获取并返回 header
109
+ if response.status_code != 200:
110
+ await queue.put((request_name, "content", "Error Occur!"))
111
+ await queue.put((request_name, "end", None))
112
+ return
113
+ response_headers = dict(response.headers)
114
+ session_id = response_headers.get("x-session-id")
115
+ await queue.put((request_name, "header", response_headers))
116
+
117
+ # 流式处理响应内容
118
+ async for chunk in response.aiter_bytes():
119
+ await queue.put((request_name, "content", chunk.decode()))
120
+
121
+ # 标记流结束
122
+ await queue.put((request_name, "end", None))
123
+
124
+ return session_id
125
+
126
+
127
+ async def combine_streams(
128
+ query_a,
129
+ query_b,
130
+ model_a,
131
+ model_b,
132
+ moment_id_a,
133
+ moment_id_b,
134
+ session_id_a,
135
+ session_id_b,
136
+ bio_a,
137
+ bio_b,
138
+ language
139
+ ):
140
+ queue = asyncio.Queue()
141
+ task_a = asyncio.create_task(
142
+ fetch_stream(
143
+ query_a, model_a, moment_id_a, session_id_a, bio_a, "requestA", queue, language
144
+ )
145
+ )
146
+ task_b = asyncio.create_task(
147
+ fetch_stream(
148
+ query_b, model_b, moment_id_b, session_id_b, bio_b, "requestB", queue, language
149
+ )
150
+ )
151
+
152
+ headers = {}
153
+ content = {"requestA": "", "requestB": ""}
154
+ active_streams = 2
155
+
156
+ while active_streams > 0:
157
+ request_name, data_type, data = await queue.get()
158
+
159
+ if data_type == "header":
160
+ headers[f"{request_name}_header"] = data
161
+ if len(headers) == 2:
162
+ yield headers
163
+ elif data_type == "content":
164
+ content[request_name] = data.strip()
165
+ if content["requestA"] or content["requestB"]:
166
+ yield content
167
+ content = {"requestA": "", "requestB": ""}
168
+ elif data_type == "end":
169
+ active_streams -= 1
170
+
171
+ session_id_a = await task_a
172
  session_id_b = await task_b
utils.py CHANGED
@@ -1,101 +1,101 @@
1
- import time
2
- from pymongo import MongoClient
3
- import pandas as pd
4
- import math
5
- import os
6
-
7
- # MongoDB连接配置
8
- client = MongoClient(os.getenv("client_link"))
9
- db = client.get_database('roleplay')
10
- collection = db.get_collection('model_stats')
11
-
12
- def update_model_stats(model1_name, model2_name, winner, turn, anony, language):
13
- # 获取当前时间戳
14
- tstamp = time.time()
15
-
16
- # 插入数据到MongoDB
17
- collection.insert_one({
18
- "Model 1": model1_name,
19
- "Model 2": model2_name,
20
- "Winner": winner,
21
- "Turn": turn,
22
- "Anony": anony,
23
- "Language": language,
24
- "tstamp": tstamp
25
- })
26
-
27
- def calculate_elo(winner_elo, loser_elo, k=30, outcome=1):
28
- """
29
- winner_elo: Elo score of the winner before the game
30
- loser_elo: Elo score of the loser before the game
31
- k: K-factor in Elo calculation
32
- outcome: 1 if winner won, 0.5 if tie, 0 if loser won (inverted)
33
- """
34
- expected_win = 1 / (1 + math.pow(10, (loser_elo - winner_elo) / 400))
35
- new_winner_elo = winner_elo + k * (outcome - expected_win)
36
- return new_winner_elo
37
-
38
- def load_dataframe():
39
- # 从MongoDB读取数据
40
- cursor = collection.find({})
41
-
42
- # 将游标中的数据转换为DataFrame
43
- data = pd.DataFrame(list(cursor))
44
-
45
- # 创建模型名称的唯一列表
46
- models = pd.unique(data[['Model 1', 'Model 2']].values.ravel('K'))
47
-
48
- # 初始化结果字典
49
- results = {'模型名称': [], '参赛次数': [], '胜利次数': [], 'ELO': []}
50
- elo_dict = {model: 1000 for model in models} # 初始化ELO分数为1000
51
-
52
- for _, row in data.iterrows():
53
- model1 = row['Model 1']
54
- model2 = row['Model 2']
55
- winner = row['Winner']
56
-
57
- if winner == 'Model 1':
58
- elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=1)
59
- elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0)
60
- elif winner == 'Model 2':
61
- elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=1)
62
- elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0)
63
- elif winner == 'tie':
64
- elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0.8)
65
- elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0.8)
66
- elif winner == 'bothbad':
67
- elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0.1)
68
- elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0.1)
69
-
70
- for model in models:
71
- count = data['Model 1'].value_counts().get(model, 0) + data['Model 2'].value_counts().get(model, 0)
72
- win_count = 0
73
- win_count += len(data[(data['Winner'] == 'Model 1') & (data['Model 1'] == model)])
74
- win_count += len(data[(data['Winner'] == 'Model 2') & (data['Model 2'] == model)])
75
- win_count += len(data[(data['Winner'] == 'tie') & ((data['Model 1'] == model) | (data['Model 2'] == model))])
76
- results['模型名称'].append(model)
77
- results['参赛次数'].append(count)
78
- results['胜利次数'].append(win_count)
79
- results['ELO'].append(round(elo_dict[model]))
80
-
81
- # 将结果字典转换为DataFrame
82
- result_df = pd.DataFrame(results)
83
-
84
- # 计算胜率并排序
85
- result_df["模型胜率"] = (result_df['胜利次数'] / result_df['参赛次数']) * 100
86
- result_df = result_df.sort_values(by="模型胜率", ascending=False)
87
- result_df["模型胜率"] = result_df["模型胜率"].map("{:.2f}%".format)
88
-
89
- return result_df
90
-
91
- def change_name(old,new):
92
- collection.update_many(
93
- { "Model 1": old },
94
- { "$set": { "Model 1": new } }
95
- )
96
-
97
- # 更新 Model 2 字段
98
- collection.update_many(
99
- { "Model 2": old },
100
- { "$set": { "Model 2": new } }
101
  )
 
1
+ import time
2
+ from pymongo import MongoClient
3
+ import pandas as pd
4
+ import math
5
+ import os
6
+
7
+ # MongoDB连接配置
8
+ client = MongoClient(os.getenv("client_link"))
9
+ db = client.get_database('roleplay')
10
+ collection = db.get_collection('model_stats')
11
+
12
+ def update_model_stats(model1_name, model2_name, winner, turn, anony, language):
13
+ # 获取当前时间戳
14
+ tstamp = time.time()
15
+
16
+ # 插入数据到MongoDB
17
+ collection.insert_one({
18
+ "Model 1": model1_name,
19
+ "Model 2": model2_name,
20
+ "Winner": winner,
21
+ "Turn": turn,
22
+ "Anony": anony,
23
+ "Language": language,
24
+ "tstamp": tstamp
25
+ })
26
+
27
+ def calculate_elo(winner_elo, loser_elo, k=30, outcome=1):
28
+ """
29
+ winner_elo: Elo score of the winner before the game
30
+ loser_elo: Elo score of the loser before the game
31
+ k: K-factor in Elo calculation
32
+ outcome: 1 if winner won, 0.5 if tie, 0 if loser won (inverted)
33
+ """
34
+ expected_win = 1 / (1 + math.pow(10, (loser_elo - winner_elo) / 400))
35
+ new_winner_elo = winner_elo + k * (outcome - expected_win)
36
+ return new_winner_elo
37
+
38
+ def load_dataframe():
39
+ # 从MongoDB读取数据
40
+ cursor = collection.find({})
41
+
42
+ # 将游标中的数据转换为DataFrame
43
+ data = pd.DataFrame(list(cursor))
44
+
45
+ # 创建模型名称的唯一列表
46
+ models = pd.unique(data[['Model 1', 'Model 2']].values.ravel('K'))
47
+
48
+ # 初始化结果字典
49
+ results = {'模型名称': [], '参赛次数': [], '胜利次数': [], 'ELO': []}
50
+ elo_dict = {model: 1000 for model in models} # 初始化ELO分数为1000
51
+
52
+ for _, row in data.iterrows():
53
+ model1 = row['Model 1']
54
+ model2 = row['Model 2']
55
+ winner = row['Winner']
56
+
57
+ if winner == 'Model 1':
58
+ elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=1)
59
+ elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0)
60
+ elif winner == 'Model 2':
61
+ elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=1)
62
+ elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0)
63
+ elif winner == 'tie':
64
+ elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0.8)
65
+ elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0.8)
66
+ elif winner == 'bothbad':
67
+ elo_dict[model1] = calculate_elo(elo_dict[model1], elo_dict[model2], outcome=0.1)
68
+ elo_dict[model2] = calculate_elo(elo_dict[model2], elo_dict[model1], outcome=0.1)
69
+
70
+ for model in models:
71
+ count = data['Model 1'].value_counts().get(model, 0) + data['Model 2'].value_counts().get(model, 0)
72
+ win_count = 0
73
+ win_count += len(data[(data['Winner'] == 'Model 1') & (data['Model 1'] == model)])
74
+ win_count += len(data[(data['Winner'] == 'Model 2') & (data['Model 2'] == model)])
75
+ win_count += len(data[(data['Winner'] == 'tie') & ((data['Model 1'] == model) | (data['Model 2'] == model))])
76
+ results['模型名称'].append(model)
77
+ results['参赛次数'].append(count)
78
+ results['胜利次数'].append(win_count)
79
+ results['ELO'].append(round(elo_dict[model]))
80
+
81
+ # 将结果字典转换为DataFrame
82
+ result_df = pd.DataFrame(results)
83
+
84
+ # 计算胜率并排序
85
+ result_df["模型胜率"] = (result_df['胜利次数'] / result_df['参赛次数']) * 100
86
+ result_df = result_df.sort_values(by="模型胜率", ascending=False)
87
+ result_df["模型胜率"] = result_df["模型胜率"].map("{:.2f}%".format)
88
+
89
+ return result_df
90
+
91
+ def change_name(old,new):
92
+ collection.update_many(
93
+ { "Model 1": old },
94
+ { "$set": { "Model 1": new } }
95
+ )
96
+
97
+ # 更新 Model 2 字段
98
+ collection.update_many(
99
+ { "Model 2": old },
100
+ { "$set": { "Model 2": new } }
101
  )