diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 2fe92d747e81..2eee80a907b8 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -556,8 +556,18 @@ def postprocess( output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None ) + pre_topk = ( + top_k * 2 + 10 if align_to_words else top_k + ) # Some candidates may be deleted if we align to words starts, ends, scores, min_null_score = select_starts_ends( - start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len + start_, + end_, + p_mask, + attention_mask, + min_null_score, + pre_topk, + handle_impossible_answer, + max_answer_len, ) if not self.tokenizer.is_fast: diff --git a/tests/pipelines/test_pipelines_question_answering.py b/tests/pipelines/test_pipelines_question_answering.py index 2de1de20d2ed..d46dd489c515 100644 --- a/tests/pipelines/test_pipelines_question_answering.py +++ b/tests/pipelines/test_pipelines_question_answering.py @@ -168,10 +168,11 @@ def test_small_model_pt(self): ) outputs = question_answerer( - question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." + question="Where was HuggingFace founded ?", + context="HuggingFace was founded in Paris.", ) - self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_fp16(self): @@ -182,10 +183,11 @@ def test_small_model_pt_fp16(self): ) outputs = question_answerer( - question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." + question="Where was HuggingFace founded ?", + context="HuggingFace was founded in Paris.", ) - self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_bf16(self): @@ -196,10 +198,11 @@ def test_small_model_pt_bf16(self): ) outputs = question_answerer( - question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." + question="Where was HuggingFace founded ?", + context="HuggingFace was founded in Paris.", ) - self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + self.assertEqual(nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_iterator(self): @@ -211,7 +214,9 @@ def data(): yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."} for outputs in pipe(data()): - self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + self.assertEqual( + nested_simplify(outputs), {"score": 0.063, "start": 0, "end": 11, "answer": "HuggingFace"} + ) @require_torch def test_small_model_pt_softmax_trick(self): @@ -242,10 +247,11 @@ def ensure_large_logits_postprocess( question_answerer.postprocess = ensure_large_logits_postprocess outputs = question_answerer( - question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." + question="Where was HuggingFace founded ?", + context="HuggingFace was founded in Paris.", ) - self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"}) + self.assertEqual(nested_simplify(outputs), {"score": 0.111, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch