Skip to content

Commit 6ec5ac4

Browse files
authored
Merge pull request #153 from vmenger/update-dependencies
Update dependencies
2 parents 1d7593b + 92d01f6 commit 6ec5ac4

File tree

13 files changed

+583
-474
lines changed

13 files changed

+583
-474
lines changed

CHANGELOG.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## 3.0.5 (2025-06-18)
9+
10+
### Changed
11+
12+
- Updated dependencies
13+
814
## 3.0.4 (2025-05-06)
915

1016
### Changed

deduce/__init__.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,6 @@
11
from deduce.deduce import Deduce, __version__
2+
3+
__all__ = [
4+
"Deduce",
5+
"__version__",
6+
]

deduce/annotator.py

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,6 @@ def __init__(
112112
self._matching_pipeline = None
113113

114114
if len(self.pattern) > 0 and "lookup" in self.pattern[0]:
115-
116115
if self.ds is None:
117116
raise RuntimeError(
118117
"Created pattern with lookup in TokenPatternAnnotator, but "
@@ -218,7 +217,6 @@ def annotate(self, doc: dd.Document) -> list[dd.Annotation]:
218217
)
219218

220219
for token in tokens:
221-
222220
annotation = self._match_sequence(
223221
doc.text, self.pattern, token, direction="right", skip=self.skip
224222
)
@@ -252,12 +250,10 @@ def __init__(
252250
def _apply_context_pattern(
253251
self, text: str, annotations: dd.AnnotationSet, context_pattern: dict
254252
) -> dd.AnnotationSet:
255-
256253
direction = context_pattern["direction"]
257254
skip = set(context_pattern.get("skip", []))
258255

259256
for annotation in annotations.copy():
260-
261257
tag = list(_DIRECTION_MAP[direction]["order"](annotation.tag.split("+")))[
262258
-1
263259
]
@@ -319,7 +315,6 @@ def _annotate(self, text: str, annotations: dd.AnnotationSet) -> dd.AnnotationSe
319315
)
320316

321317
if self.iterative:
322-
323318
changed = dd.AnnotationSet(annotations.difference(original_annotations))
324319
annotations = dd.AnnotationSet(
325320
annotations.intersection(original_annotations)
@@ -356,7 +351,6 @@ class implements logic for detecting first name(s), initials and surnames.
356351
"""
357352

358353
def __init__(self, tokenizer: Tokenizer, *args, **kwargs) -> None:
359-
360354
self.tokenizer = tokenizer
361355
self.skip = [".", "-", " "]
362356

@@ -366,9 +360,7 @@ def __init__(self, tokenizer: Tokenizer, *args, **kwargs) -> None:
366360
def _match_first_names(
367361
doc: dd.Document, token: dd.Token
368362
) -> Optional[tuple[dd.Token, dd.Token]]:
369-
370363
for first_name in doc.metadata["patient"].first_names:
371-
372364
if str_match(token.text, first_name) or (
373365
len(token.text) > 3
374366
and str_match(token.text, first_name, max_edit_distance=1)
@@ -381,7 +373,6 @@ def _match_first_names(
381373
def _match_initial_from_name(
382374
doc: dd.Document, token: dd.Token
383375
) -> Optional[tuple[dd.Token, dd.Token]]:
384-
385376
for _, first_name in enumerate(doc.metadata["patient"].first_names):
386377
if str_match(token.text, first_name[0]):
387378
next_token = token.next()
@@ -397,7 +388,6 @@ def _match_initial_from_name(
397388
def _match_initials(
398389
doc: dd.Document, token: dd.Token
399390
) -> Optional[tuple[dd.Token, dd.Token]]:
400-
401391
if str_match(token.text, doc.metadata["patient"].initials):
402392
return token, token
403393

@@ -417,7 +407,6 @@ def next_with_skip(self, token: dd.Token) -> Optional[dd.Token]:
417407
def _match_surname(
418408
self, doc: dd.Document, token: dd.Token
419409
) -> Optional[tuple[dd.Token, dd.Token]]:
420-
421410
if doc.metadata["surname_pattern"] is None:
422411
doc.metadata["surname_pattern"] = self.tokenizer.tokenize(
423412
doc.metadata["patient"].surname
@@ -473,9 +462,7 @@ def annotate(self, doc: Document) -> list[Annotation]:
473462
annotations = []
474463

475464
for token in doc.get_tokens():
476-
477465
for matcher, tag in matchers:
478-
479466
match = matcher(doc, token)
480467

481468
if match is None:
@@ -518,7 +505,6 @@ def __init__(
518505
lowercase: bool = True,
519506
**kwargs,
520507
) -> None:
521-
522508
self.pre_pseudo = set(pre_pseudo or [])
523509
self.post_pseudo = set(post_pseudo or [])
524510
self.lowercase = lowercase
@@ -553,7 +539,6 @@ def _get_previous_word(self, char_index: int, text: str) -> str:
553539
result = ""
554540

555541
for ch in text[::-1]:
556-
557542
if not self._is_word_char(ch):
558543
break
559544

@@ -576,7 +561,6 @@ def _get_next_word(self, char_index: int, text: str) -> str:
576561
result = ""
577562

578563
for ch in text:
579-
580564
if not self._is_word_char(ch):
581565
break
582566

@@ -648,7 +632,6 @@ def annotate(self, doc: Document) -> list[Annotation]:
648632
annotations = []
649633

650634
for match in self.bsn_regexp.finditer(doc.text):
651-
652635
text = match.group(self.capture_group)
653636
digits = re.sub(r"\D", "", text)
654637

deduce/str/__init__.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,13 @@
77
UpperCase,
88
UpperCaseFirstChar,
99
)
10+
11+
__all__ = [
12+
"Acronimify",
13+
"FilterBasedOnLookupSet",
14+
"RemoveValues",
15+
"TakeLastToken",
16+
"TitleCase",
17+
"UpperCase",
18+
"UpperCaseFirstChar",
19+
]

deduce/tokenizer.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ def _merge(
8686
i = 0
8787

8888
while i < len(tokens):
89-
9089
if tokens_text[i] not in self._start_words:
9190
tokens_merged.append(tokens[i])
9291
i += 1

deduce/utils.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,9 +66,7 @@ def initialize_class(cls: type, args: dict, extras: dict) -> object:
6666
cls_params = inspect.signature(cls).parameters
6767

6868
for arg_name, arg in extras.items():
69-
7069
if arg_name in cls_params:
71-
7270
args[arg_name] = arg
7371

7472
return cls(**args)
@@ -209,7 +207,6 @@ def apply_transform(items: set[str], transform_config: dict) -> set[str]:
209207
transforms = transform_config.get("transforms", {})
210208

211209
for _, transform in transforms.items():
212-
213210
to_add = []
214211

215212
for item in items:

docs/emojize.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,9 @@ def emojize_all(s: str) -> str:
2020

2121

2222
if __name__ == "__main__":
23-
2423
dir = argv[1]
2524

2625
for file in glob.glob(dir + "/*.html"):
27-
2826
with open(file, "r") as f:
2927
html = f.readlines()
3028

0 commit comments

Comments
 (0)