@@ -248,7 +248,8 @@ def mapping(cls) -> Dict[str, Tuple[Enum, Optional[Set[Enum]]]]:
248
248
return cls .__mapping
249
249
250
250
ESCAPE_REGEX = re .compile (
251
- r"(?P<t>[^\\]+)|(?P<x>\\([^xuU]|x[0-0a-f]{2}|u[0-9a-f]{4}|U[0-9a-f]{8}){0,1})" , re .MULTILINE | re .DOTALL
251
+ r"(?P<t>[^\\]+)|(?P<x>\\(?:[\\nrt]|x[0-9A-Fa-f]{2}|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8}))|(?P<e>\\(?:[^\\nrt\\xuU]|[\\xuU][^0-9a-fA-F]))" ,
252
+ re .MULTILINE | re .DOTALL ,
252
253
)
253
254
BDD_TOKEN_REGEX = re .compile (r"^(Given|When|Then|And|But)\s" , flags = re .IGNORECASE )
254
255
@@ -324,7 +325,7 @@ async def generate_sem_sub_tokens(
324
325
for g in cls .ESCAPE_REGEX .finditer (token .value ):
325
326
yield SemTokenInfo .from_token (
326
327
token ,
327
- sem_type if g .group ("x" ) is None or g . end () - g . start () == 1 else RobotSemTokenTypes .ESCAPE ,
328
+ sem_type if g .group ("x" ) is None else RobotSemTokenTypes .ESCAPE ,
328
329
sem_mod ,
329
330
col_offset + g .start (),
330
331
g .end () - g .start (),
@@ -433,7 +434,20 @@ async def generate_sem_sub_tokens(
433
434
else :
434
435
yield SemTokenInfo .from_token (token , sem_type , sem_mod , col_offset + kw_index , len (kw ))
435
436
elif token .type == RobotToken .NAME and isinstance (node , (LibraryImport , ResourceImport , VariablesImport )):
436
- yield SemTokenInfo .from_token (token , RobotSemTokenTypes .NAMESPACE , sem_mod , col_offset , length )
437
+ if "\\ " in token .value :
438
+ if col_offset is None :
439
+ col_offset = token .col_offset
440
+
441
+ for g in cls .ESCAPE_REGEX .finditer (token .value ):
442
+ yield SemTokenInfo .from_token (
443
+ token ,
444
+ RobotSemTokenTypes .NAMESPACE if g .group ("x" ) is None else RobotSemTokenTypes .ESCAPE ,
445
+ sem_mod ,
446
+ col_offset + g .start (),
447
+ g .end () - g .start (),
448
+ )
449
+ else :
450
+ yield SemTokenInfo .from_token (token , RobotSemTokenTypes .NAMESPACE , sem_mod , col_offset , length )
437
451
elif get_robot_version () >= (5 , 0 ) and token .type == RobotToken .OPTION :
438
452
from robot .parsing .model .statements import ExceptHeader , WhileHeader
439
453
@@ -479,10 +493,14 @@ async def generate_sem_tokens(
479
493
builtin_library_doc : Optional [LibraryDoc ],
480
494
) -> AsyncIterator [SemTokenInfo ]:
481
495
from robot .parsing .lexer .tokens import Token as RobotToken
482
- from robot .parsing .model .statements import Arguments , Variable
496
+ from robot .parsing .model .statements import Arguments , LibraryImport , ResourceImport , Variable , VariablesImport
483
497
from robot .utils .escaping import split_from_equals
484
498
485
- if token .type in {RobotToken .ARGUMENT , RobotToken .TESTCASE_NAME , RobotToken .KEYWORD_NAME }:
499
+ if (
500
+ token .type in {RobotToken .ARGUMENT , RobotToken .TESTCASE_NAME , RobotToken .KEYWORD_NAME }
501
+ or token .type == RobotToken .NAME
502
+ and isinstance (node , (VariablesImport , LibraryImport , ResourceImport ))
503
+ ):
486
504
if (
487
505
isinstance (node , Variable ) and token .type == RobotToken .ARGUMENT and node .name and node .name [0 ] == "&"
488
506
) or (isinstance (node , Arguments )):
0 commit comments