Spaces:
Running
on
Zero
Running
on
Zero
Adding "test type" for redteaming notes.
#3
by
meg
HF staff
- opened
- app_dialogue.py +10 -0
app_dialogue.py
CHANGED
@@ -204,6 +204,7 @@ def model_inference(
|
|
204 |
max_new_tokens,
|
205 |
repetition_penalty,
|
206 |
top_p,
|
|
|
207 |
):
|
208 |
if user_prompt["text"].strip() == "" and not user_prompt["files"]:
|
209 |
gr.Error("Please input a query and optionally image(s).")
|
@@ -288,6 +289,7 @@ FEATURES = datasets.Features(
|
|
288 |
"max_new_tokens": datasets.Value("int32"),
|
289 |
"repetition_penalty": datasets.Value("float32"),
|
290 |
"top_p": datasets.Value("int32"),
|
|
|
291 |
}
|
292 |
)
|
293 |
|
@@ -299,6 +301,7 @@ def flag_dope(
|
|
299 |
max_new_tokens,
|
300 |
repetition_penalty,
|
301 |
top_p,
|
|
|
302 |
):
|
303 |
images = []
|
304 |
conversation = []
|
@@ -318,6 +321,7 @@ def flag_dope(
|
|
318 |
"max_new_tokens": [max_new_tokens],
|
319 |
"repetition_penalty": [repetition_penalty],
|
320 |
"top_p": [top_p],
|
|
|
321 |
}
|
322 |
try:
|
323 |
ds = datasets.load_dataset("HuggingFaceM4/dope-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
|
@@ -336,6 +340,7 @@ def flag_problematic(
|
|
336 |
max_new_tokens,
|
337 |
repetition_penalty,
|
338 |
top_p,
|
|
|
339 |
):
|
340 |
images = []
|
341 |
conversation = []
|
@@ -355,6 +360,7 @@ def flag_problematic(
|
|
355 |
"max_new_tokens": [max_new_tokens],
|
356 |
"repetition_penalty": [repetition_penalty],
|
357 |
"top_p": [top_p],
|
|
|
358 |
}
|
359 |
try:
|
360 |
ds = datasets.load_dataset("HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
|
@@ -364,6 +370,8 @@ def flag_problematic(
|
|
364 |
hf_dataset = datasets.Dataset.from_dict(data, features=FEATURES)
|
365 |
hf_dataset.push_to_hub( "HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN, private=True)
|
366 |
|
|
|
|
|
367 |
|
368 |
# Hyper-parameters for generation
|
369 |
max_new_tokens = gr.Slider(
|
@@ -517,6 +525,7 @@ with gr.Blocks(
|
|
517 |
max_new_tokens,
|
518 |
repetition_penalty,
|
519 |
top_p,
|
|
|
520 |
],
|
521 |
outputs=None,
|
522 |
preprocess=False,
|
@@ -531,6 +540,7 @@ with gr.Blocks(
|
|
531 |
max_new_tokens,
|
532 |
repetition_penalty,
|
533 |
top_p,
|
|
|
534 |
],
|
535 |
outputs=None,
|
536 |
preprocess=False,
|
|
|
204 |
max_new_tokens,
|
205 |
repetition_penalty,
|
206 |
top_p,
|
207 |
+
test_type
|
208 |
):
|
209 |
if user_prompt["text"].strip() == "" and not user_prompt["files"]:
|
210 |
gr.Error("Please input a query and optionally image(s).")
|
|
|
289 |
"max_new_tokens": datasets.Value("int32"),
|
290 |
"repetition_penalty": datasets.Value("float32"),
|
291 |
"top_p": datasets.Value("int32"),
|
292 |
+
"test_type": datasets.Value("string")
|
293 |
}
|
294 |
)
|
295 |
|
|
|
301 |
max_new_tokens,
|
302 |
repetition_penalty,
|
303 |
top_p,
|
304 |
+
test_type=""
|
305 |
):
|
306 |
images = []
|
307 |
conversation = []
|
|
|
321 |
"max_new_tokens": [max_new_tokens],
|
322 |
"repetition_penalty": [repetition_penalty],
|
323 |
"top_p": [top_p],
|
324 |
+
"test_type": [test_type]
|
325 |
}
|
326 |
try:
|
327 |
ds = datasets.load_dataset("HuggingFaceM4/dope-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
|
|
|
340 |
max_new_tokens,
|
341 |
repetition_penalty,
|
342 |
top_p,
|
343 |
+
test_type=""
|
344 |
):
|
345 |
images = []
|
346 |
conversation = []
|
|
|
360 |
"max_new_tokens": [max_new_tokens],
|
361 |
"repetition_penalty": [repetition_penalty],
|
362 |
"top_p": [top_p],
|
363 |
+
"test_type": [test_type]
|
364 |
}
|
365 |
try:
|
366 |
ds = datasets.load_dataset("HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN)
|
|
|
370 |
hf_dataset = datasets.Dataset.from_dict(data, features=FEATURES)
|
371 |
hf_dataset.push_to_hub( "HuggingFaceM4/problematic-dataset-red-teaming", split="train", token=HF_WRITE_TOKEN, private=True)
|
372 |
|
373 |
+
# Change this to say whatever in order to have notes on the specific redteaming test added to the datasets.
|
374 |
+
test_type = gr.Textbox(label="Optional: Type of redteaming test.")
|
375 |
|
376 |
# Hyper-parameters for generation
|
377 |
max_new_tokens = gr.Slider(
|
|
|
525 |
max_new_tokens,
|
526 |
repetition_penalty,
|
527 |
top_p,
|
528 |
+
test_type,
|
529 |
],
|
530 |
outputs=None,
|
531 |
preprocess=False,
|
|
|
540 |
max_new_tokens,
|
541 |
repetition_penalty,
|
542 |
top_p,
|
543 |
+
test_type,
|
544 |
],
|
545 |
outputs=None,
|
546 |
preprocess=False,
|