Shitao commited on
Commit
fabc9f6
1 Parent(s): bac849b

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +0 -195
README.md CHANGED
@@ -71,57 +71,6 @@ cd FlagEmbedding
71
  pip install -e .
72
  ```
73
 
74
- #### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )
75
-
76
- Get relevance scores (higher scores indicate more relevance):
77
-
78
- ```python
79
- from FlagEmbedding import FlagReranker
80
- reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
81
-
82
- score = reranker.compute_score(['query', 'passage'])
83
- print(score) # -5.65234375
84
-
85
- # You can map the scores into 0-1 by set "normalize=True", which will apply sigmoid function to the score
86
- score = reranker.compute_score(['query', 'passage'], normalize=True)
87
- print(score) # 0.003497010252573502
88
-
89
- scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])
90
- print(scores) # [-8.1875, 5.26171875]
91
-
92
- # You can map the scores into 0-1 by set "normalize=True", which will apply sigmoid function to the score
93
- scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], normalize=True)
94
- print(scores) # [0.00027803096387751553, 0.9948403768236574]
95
- ```
96
-
97
- #### For LLM-based reranker
98
-
99
- ```python
100
- from FlagEmbedding import FlagLLMReranker
101
- reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
102
- # reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
103
-
104
- score = reranker.compute_score(['query', 'passage'])
105
- print(score)
106
-
107
- scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])
108
- print(scores)
109
- ```
110
-
111
- #### For LLM-based layerwise reranker
112
-
113
- ```python
114
- from FlagEmbedding import LayerWiseFlagLLMReranker
115
- reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation
116
- # reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation
117
-
118
- score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.
119
- print(score)
120
-
121
- scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], cutoff_layers=[28])
122
- print(scores)
123
- ```
124
-
125
  #### For LLM-based lightweight reranker
126
 
127
  ```python
@@ -137,150 +86,6 @@ print(scores)
137
 
138
  ### Using Huggingface transformers
139
 
140
- #### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )
141
-
142
- Get relevance scores (higher scores indicate more relevance):
143
-
144
- ```python
145
- import torch
146
- from transformers import AutoModelForSequenceClassification, AutoTokenizer
147
-
148
- tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-m3')
149
- model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-v2-m3')
150
- model.eval()
151
-
152
- pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
153
- with torch.no_grad():
154
- inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)
155
- scores = model(**inputs, return_dict=True).logits.view(-1, ).float()
156
- print(scores)
157
- ```
158
-
159
- #### For LLM-based reranker
160
-
161
- ```python
162
- import torch
163
- from transformers import AutoModelForCausalLM, AutoTokenizer
164
-
165
- def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
166
- if prompt is None:
167
- prompt = "Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'."
168
- sep = "\n"
169
- prompt_inputs = tokenizer(prompt,
170
- return_tensors=None,
171
- add_special_tokens=False)['input_ids']
172
- sep_inputs = tokenizer(sep,
173
- return_tensors=None,
174
- add_special_tokens=False)['input_ids']
175
- inputs = []
176
- for query, passage in pairs:
177
- query_inputs = tokenizer(f'A: {query}',
178
- return_tensors=None,
179
- add_special_tokens=False,
180
- max_length=max_length * 3 // 4,
181
- truncation=True)
182
- passage_inputs = tokenizer(f'B: {passage}',
183
- return_tensors=None,
184
- add_special_tokens=False,
185
- max_length=max_length,
186
- truncation=True)
187
- item = tokenizer.prepare_for_model(
188
- [tokenizer.bos_token_id] + query_inputs['input_ids'],
189
- sep_inputs + passage_inputs['input_ids'],
190
- truncation='only_second',
191
- max_length=max_length,
192
- padding=False,
193
- return_attention_mask=False,
194
- return_token_type_ids=False,
195
- add_special_tokens=False
196
- )
197
- item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs
198
- item['attention_mask'] = [1] * len(item['input_ids'])
199
- inputs.append(item)
200
- return tokenizer.pad(
201
- inputs,
202
- padding=True,
203
- max_length=max_length + len(sep_inputs) + len(prompt_inputs),
204
- pad_to_multiple_of=8,
205
- return_tensors='pt',
206
- )
207
-
208
- tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-gemma')
209
- model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-gemma')
210
- yes_loc = tokenizer('Yes', add_special_tokens=False)['input_ids'][0]
211
- model.eval()
212
-
213
- pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
214
- with torch.no_grad():
215
- inputs = get_inputs(pairs, tokenizer)
216
- scores = model(**inputs, return_dict=True).logits[:, -1, yes_loc].view(-1, ).float()
217
- print(scores)
218
- ```
219
-
220
- #### For LLM-based layerwise reranker
221
-
222
- ```python
223
- import torch
224
- from transformers import AutoModelForCausalLM, AutoTokenizer
225
-
226
- def get_inputs(pairs, tokenizer, prompt=None, max_length=1024):
227
- if prompt is None:
228
- prompt = "Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'."
229
- sep = "\n"
230
- prompt_inputs = tokenizer(prompt,
231
- return_tensors=None,
232
- add_special_tokens=False)['input_ids']
233
- sep_inputs = tokenizer(sep,
234
- return_tensors=None,
235
- add_special_tokens=False)['input_ids']
236
- inputs = []
237
- for query, passage in pairs:
238
- query_inputs = tokenizer(f'A: {query}',
239
- return_tensors=None,
240
- add_special_tokens=False,
241
- max_length=max_length * 3 // 4,
242
- truncation=True)
243
- passage_inputs = tokenizer(f'B: {passage}',
244
- return_tensors=None,
245
- add_special_tokens=False,
246
- max_length=max_length,
247
- truncation=True)
248
- item = tokenizer.prepare_for_model(
249
- [tokenizer.bos_token_id] + query_inputs['input_ids'],
250
- sep_inputs + passage_inputs['input_ids'],
251
- truncation='only_second',
252
- max_length=max_length,
253
- padding=False,
254
- return_attention_mask=False,
255
- return_token_type_ids=False,
256
- add_special_tokens=False
257
- )
258
- item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs
259
- item['attention_mask'] = [1] * len(item['input_ids'])
260
- inputs.append(item)
261
- return tokenizer.pad(
262
- inputs,
263
- padding=True,
264
- max_length=max_length + len(sep_inputs) + len(prompt_inputs),
265
- pad_to_multiple_of=8,
266
- return_tensors='pt',
267
- )
268
-
269
- tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True)
270
- model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)
271
- model = model.to('cuda')
272
- model.eval()
273
-
274
- pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]
275
- with torch.no_grad():
276
- inputs = get_inputs(pairs, tokenizer).to(model.device)
277
- all_scores = model(**inputs, return_dict=True, cutoff_layers=[28])
278
- all_scores = [scores[:, -1].view(-1, ).float() for scores in all_scores[0]]
279
- print(all_scores)
280
- ```
281
-
282
- #### For LLM-based lightweight reranker
283
-
284
  ```python
285
  import torch
286
  from transformers import AutoModelForCausalLM, AutoTokenizer
 
71
  pip install -e .
72
  ```
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  #### For LLM-based lightweight reranker
75
 
76
  ```python
 
86
 
87
  ### Using Huggingface transformers
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  ```python
90
  import torch
91
  from transformers import AutoModelForCausalLM, AutoTokenizer