@@ -112,7 +112,6 @@ def __init__(
112112 self ._matching_pipeline = None
113113
114114 if len (self .pattern ) > 0 and "lookup" in self .pattern [0 ]:
115-
116115 if self .ds is None :
117116 raise RuntimeError (
118117 "Created pattern with lookup in TokenPatternAnnotator, but "
@@ -218,7 +217,6 @@ def annotate(self, doc: dd.Document) -> list[dd.Annotation]:
218217 )
219218
220219 for token in tokens :
221-
222220 annotation = self ._match_sequence (
223221 doc .text , self .pattern , token , direction = "right" , skip = self .skip
224222 )
@@ -252,12 +250,10 @@ def __init__(
252250 def _apply_context_pattern (
253251 self , text : str , annotations : dd .AnnotationSet , context_pattern : dict
254252 ) -> dd .AnnotationSet :
255-
256253 direction = context_pattern ["direction" ]
257254 skip = set (context_pattern .get ("skip" , []))
258255
259256 for annotation in annotations .copy ():
260-
261257 tag = list (_DIRECTION_MAP [direction ]["order" ](annotation .tag .split ("+" )))[
262258 - 1
263259 ]
@@ -319,7 +315,6 @@ def _annotate(self, text: str, annotations: dd.AnnotationSet) -> dd.AnnotationSe
319315 )
320316
321317 if self .iterative :
322-
323318 changed = dd .AnnotationSet (annotations .difference (original_annotations ))
324319 annotations = dd .AnnotationSet (
325320 annotations .intersection (original_annotations )
@@ -356,7 +351,6 @@ class implements logic for detecting first name(s), initials and surnames.
356351 """
357352
358353 def __init__ (self , tokenizer : Tokenizer , * args , ** kwargs ) -> None :
359-
360354 self .tokenizer = tokenizer
361355 self .skip = ["." , "-" , " " ]
362356
@@ -366,9 +360,7 @@ def __init__(self, tokenizer: Tokenizer, *args, **kwargs) -> None:
366360 def _match_first_names (
367361 doc : dd .Document , token : dd .Token
368362 ) -> Optional [tuple [dd .Token , dd .Token ]]:
369-
370363 for first_name in doc .metadata ["patient" ].first_names :
371-
372364 if str_match (token .text , first_name ) or (
373365 len (token .text ) > 3
374366 and str_match (token .text , first_name , max_edit_distance = 1 )
@@ -381,7 +373,6 @@ def _match_first_names(
381373 def _match_initial_from_name (
382374 doc : dd .Document , token : dd .Token
383375 ) -> Optional [tuple [dd .Token , dd .Token ]]:
384-
385376 for _ , first_name in enumerate (doc .metadata ["patient" ].first_names ):
386377 if str_match (token .text , first_name [0 ]):
387378 next_token = token .next ()
@@ -397,7 +388,6 @@ def _match_initial_from_name(
397388 def _match_initials (
398389 doc : dd .Document , token : dd .Token
399390 ) -> Optional [tuple [dd .Token , dd .Token ]]:
400-
401391 if str_match (token .text , doc .metadata ["patient" ].initials ):
402392 return token , token
403393
@@ -417,7 +407,6 @@ def next_with_skip(self, token: dd.Token) -> Optional[dd.Token]:
417407 def _match_surname (
418408 self , doc : dd .Document , token : dd .Token
419409 ) -> Optional [tuple [dd .Token , dd .Token ]]:
420-
421410 if doc .metadata ["surname_pattern" ] is None :
422411 doc .metadata ["surname_pattern" ] = self .tokenizer .tokenize (
423412 doc .metadata ["patient" ].surname
@@ -473,9 +462,7 @@ def annotate(self, doc: Document) -> list[Annotation]:
473462 annotations = []
474463
475464 for token in doc .get_tokens ():
476-
477465 for matcher , tag in matchers :
478-
479466 match = matcher (doc , token )
480467
481468 if match is None :
@@ -518,7 +505,6 @@ def __init__(
518505 lowercase : bool = True ,
519506 ** kwargs ,
520507 ) -> None :
521-
522508 self .pre_pseudo = set (pre_pseudo or [])
523509 self .post_pseudo = set (post_pseudo or [])
524510 self .lowercase = lowercase
@@ -553,7 +539,6 @@ def _get_previous_word(self, char_index: int, text: str) -> str:
553539 result = ""
554540
555541 for ch in text [::- 1 ]:
556-
557542 if not self ._is_word_char (ch ):
558543 break
559544
@@ -576,7 +561,6 @@ def _get_next_word(self, char_index: int, text: str) -> str:
576561 result = ""
577562
578563 for ch in text :
579-
580564 if not self ._is_word_char (ch ):
581565 break
582566
@@ -648,7 +632,6 @@ def annotate(self, doc: Document) -> list[Annotation]:
648632 annotations = []
649633
650634 for match in self .bsn_regexp .finditer (doc .text ):
651-
652635 text = match .group (self .capture_group )
653636 digits = re .sub (r"\D" , "" , text )
654637
0 commit comments