A RetroSearch Logo

Home - News ( United States | United Kingdom | Italy | Germany ) - Football scores

Search Query:

Showing content from http://robot-framework.readthedocs.org/en/master/autodoc/robot.parsing.lexer.html below:

Website Navigation


robot.parsing.lexer package — Robot Framework 7.3.3.dev1 documentation

robot.parsing.lexer package Submodules robot.parsing.lexer.blocklexers module
class robot.parsing.lexer.blocklexers.BlockLexer(ctx: LexingContext)[source]

Bases: Lexer, ABC

accepts_more(statement: List[Token]) bool[source]
input(statement: List[Token])[source]
lexer_for(statement: List[Token]) Lexer[source]
lexer_classes() tuple[type[Lexer], ...][source]
lex()[source]
class robot.parsing.lexer.blocklexers.FileLexer(ctx: LexingContext)[source]

Bases: BlockLexer

lex()[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.SectionLexer(ctx: LexingContext)[source]

Bases: BlockLexer, ABC

ctx: FileContext
accepts_more(statement: List[Token]) bool[source]
class robot.parsing.lexer.blocklexers.SettingSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.VariableSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.TestCaseSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.TaskSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.KeywordSectionLexer(ctx: LexingContext)[source]

Bases: SettingSectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.blocklexers.InvalidSectionLexer(ctx: LexingContext)[source]

Bases: SectionLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.TestOrKeywordLexer(ctx: LexingContext)[source]

Bases: BlockLexer, ABC

name_type: str
accepts_more(statement: List[Token]) bool[source]
input(statement: List[Token])[source]
class robot.parsing.lexer.blocklexers.TestCaseLexer(ctx: SuiteFileContext)[source]

Bases: TestOrKeywordLexer

name_type: str = 'TESTCASE NAME'
lex()[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.KeywordLexer(ctx: FileContext)[source]

Bases: TestOrKeywordLexer

name_type: str = 'KEYWORD NAME'
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.NestedBlockLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: BlockLexer, ABC

ctx: TestCaseContext | KeywordContext
accepts_more(statement: List[Token]) bool[source]
input(statement: List[Token])[source]
class robot.parsing.lexer.blocklexers.ForLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.WhileLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.TryLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.GroupLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.IfLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
class robot.parsing.lexer.blocklexers.InlineIfLexer(ctx: TestCaseContext | KeywordContext)[source]

Bases: NestedBlockLexer

handles(statement: List[Token]) bool[source]
accepts_more(statement: List[Token]) bool[source]
lexer_classes() tuple[type[Lexer], ...][source]
input(statement: List[Token])[source]
robot.parsing.lexer.context module
class robot.parsing.lexer.context.LexingContext(settings: Settings, languages: Languages)[source]

Bases: object

lex_setting(statement: List[Token])[source]
class robot.parsing.lexer.context.FileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: LexingContext

settings: FileSettings
add_language(lang: Language | str | Path)[source]
keyword_context() KeywordContext[source]
setting_section(statement: List[Token]) bool[source]
variable_section(statement: List[Token]) bool[source]
test_case_section(statement: List[Token]) bool[source]
task_section(statement: List[Token]) bool[source]
keyword_section(statement: List[Token]) bool[source]
lex_invalid_section(statement: List[Token])[source]
class robot.parsing.lexer.context.SuiteFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings: SuiteFileSettings
test_case_context() TestCaseContext[source]
test_case_section(statement: List[Token]) bool[source]
task_section(statement: List[Token]) bool[source]
class robot.parsing.lexer.context.ResourceFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings: ResourceFileSettings
class robot.parsing.lexer.context.InitFileContext(lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None)[source]

Bases: FileContext

settings: InitFileSettings
class robot.parsing.lexer.context.TestCaseContext(settings: TestCaseSettings)[source]

Bases: LexingContext

settings: TestCaseSettings
property template_set: bool
class robot.parsing.lexer.context.KeywordContext(settings: KeywordSettings)[source]

Bases: LexingContext

settings: KeywordSettings
property template_set: bool
robot.parsing.lexer.lexer module
robot.parsing.lexer.lexer.get_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) Iterator[Token][source]

Parses the given source to tokens.

Parameters:
  • source – The source where to read the data. Can be a path to a source file as a string or as pathlib.Path object, an already opened file object, or Unicode text containing the date directly. Source files must be UTF-8 encoded.

  • data_only – When False (default), returns all tokens. When set to True, omits separators, comments, continuation markers, and other non-data tokens.

  • tokenize_variables – When True, possible variables in keyword arguments and elsewhere are tokenized. See the tokenize_variables() method for details.

  • lang – Additional languages to be supported during parsing. Can be a string matching any of the supported language codes or names, an initialized Language subclass, a list containing such strings or instances, or a Languages instance.

Returns a generator that yields Token instances.

robot.parsing.lexer.lexer.get_resource_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) Iterator[Token][source]

Parses the given source to resource file tokens.

Same as get_tokens() otherwise, but the source is considered to be a resource file. This affects, for example, what settings are valid.

robot.parsing.lexer.lexer.get_init_tokens(source: Path | str | TextIO, data_only: bool = False, tokenize_variables: bool = False, lang: Languages | Language | str | Path | Iterable[Language | str | Path] | None = None) Iterator[Token][source]

Parses the given source to init file tokens.

Same as get_tokens() otherwise, but the source is considered to be a suite initialization file. This affects, for example, what settings are valid.

class robot.parsing.lexer.lexer.Lexer(ctx: LexingContext, data_only: bool = False, tokenize_variables: bool = False)[source]

Bases: object

input(source: Path | str | TextIO)[source]
get_tokens() Iterator[Token][source]
robot.parsing.lexer.settings module
class robot.parsing.lexer.settings.Settings(languages: Languages)[source]

Bases: ABC

names: tuple[str, ...] = ()
aliases: dict[str, str] = {}
multi_use = ('Metadata', 'Library', 'Resource', 'Variables')
single_value = ('Resource', 'Test Timeout', 'Test Template', 'Timeout', 'Template', 'Name')
name_and_arguments = ('Metadata', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Setup', 'Teardown', 'Template', 'Resource', 'Variables')
name_arguments_and_with_name = ('Library',)
lex(statement: List[Token])[source]
class robot.parsing.lexer.settings.FileSettings(languages: Languages)[source]

Bases: Settings, ABC

class robot.parsing.lexer.settings.SuiteFileSettings(languages: Languages)[source]

Bases: FileSettings

names: tuple[str, ...] = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Template', 'Test Timeout', 'Test Tags', 'Default Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')
aliases: dict[str, str] = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Template': 'Test Template', 'Task Timeout': 'Test Timeout'}
class robot.parsing.lexer.settings.InitFileSettings(languages: Languages)[source]

Bases: FileSettings

names: tuple[str, ...] = ('Documentation', 'Metadata', 'Name', 'Suite Setup', 'Suite Teardown', 'Test Setup', 'Test Teardown', 'Test Timeout', 'Test Tags', 'Keyword Tags', 'Library', 'Resource', 'Variables')
aliases: dict[str, str] = {'Force Tags': 'Test Tags', 'Task Setup': 'Test Setup', 'Task Tags': 'Test Tags', 'Task Teardown': 'Test Teardown', 'Task Timeout': 'Test Timeout'}
class robot.parsing.lexer.settings.ResourceFileSettings(languages: Languages)[source]

Bases: FileSettings

names: tuple[str, ...] = ('Documentation', 'Keyword Tags', 'Library', 'Resource', 'Variables')
class robot.parsing.lexer.settings.TestCaseSettings(parent: SuiteFileSettings)[source]

Bases: Settings

names: tuple[str, ...] = ('Documentation', 'Tags', 'Setup', 'Teardown', 'Template', 'Timeout')
property template_set: bool
class robot.parsing.lexer.settings.KeywordSettings(parent: FileSettings)[source]

Bases: Settings

names: tuple[str, ...] = ('Documentation', 'Arguments', 'Setup', 'Teardown', 'Timeout', 'Tags', 'Return')
robot.parsing.lexer.statementlexers module
class robot.parsing.lexer.statementlexers.Lexer(ctx: LexingContext)[source]

Bases: ABC

handles(statement: List[Token]) bool[source]
abstractmethod accepts_more(statement: List[Token]) bool[source]
abstractmethod input(statement: List[Token])[source]
abstractmethod lex()[source]
class robot.parsing.lexer.statementlexers.StatementLexer(ctx: LexingContext)[source]

Bases: Lexer, ABC

token_type: str
accepts_more(statement: List[Token]) bool[source]
input(statement: List[Token])[source]
abstractmethod lex()[source]
class robot.parsing.lexer.statementlexers.SingleType(ctx: LexingContext)[source]

Bases: StatementLexer, ABC

lex()[source]
class robot.parsing.lexer.statementlexers.TypeAndArguments(ctx: LexingContext)[source]

Bases: StatementLexer, ABC

lex()[source]

Bases: SingleType, ABC

handles(statement: List[Token]) bool[source]

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SectionHeaderLexer

Bases: SingleType

Bases: CommentLexer

class robot.parsing.lexer.statementlexers.SettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx: FileContext
lex()[source]
class robot.parsing.lexer.statementlexers.TestCaseSettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx: TestCaseContext
lex()[source]
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.KeywordSettingLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx: KeywordContext
lex()[source]
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.VariableLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

ctx: FileContext
token_type: str = 'VARIABLE'
lex()[source]
class robot.parsing.lexer.statementlexers.KeywordCallLexer(ctx: LexingContext)[source]

Bases: StatementLexer

ctx: TestCaseContext | KeywordContext
lex()[source]

Bases: StatementLexer

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]

Bases: StatementLexer

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]

Bases: StatementLexer

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]

Bases: StatementLexer

handles(statement: List[Token]) bool[source]

Bases: TypeAndArguments

handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.EndLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type: str = 'END'
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.VarLexer(ctx: LexingContext)[source]

Bases: StatementLexer

token_type: str = 'VAR'
handles(statement: List[Token]) bool[source]
lex()[source]
class robot.parsing.lexer.statementlexers.ReturnLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type: str = 'RETURN STATEMENT'
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.ContinueLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type: str = 'CONTINUE'
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.BreakLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type: str = 'BREAK'
handles(statement: List[Token]) bool[source]
class robot.parsing.lexer.statementlexers.SyntaxErrorLexer(ctx: LexingContext)[source]

Bases: TypeAndArguments

token_type: str = 'ERROR'
handles(statement: List[Token]) bool[source]
lex()[source]
robot.parsing.lexer.tokenizer module
class robot.parsing.lexer.tokenizer.Tokenizer[source]

Bases: object

tokenize(data: str, data_only: bool = False) Iterator[list[Token]][source]
robot.parsing.lexer.tokens module
class robot.parsing.lexer.tokens.Token(type: str | None = None, value: str | None = None, lineno: int = -1, col_offset: int = -1, error: str | None = None)[source]

Bases: object

Token representing piece of Robot Framework data.

Each token has type, value, line number, column offset and end column offset in type, value, lineno, col_offset and end_col_offset attributes, respectively. Tokens representing error also have their error message in error attribute.

Token types are declared as class attributes such as SETTING_HEADER and EOL. Values of these constants have changed slightly in Robot Framework 4.0, and they may change again in the future. It is thus safer to use the constants, not their values, when types are needed. For example, use Token(Token.EOL) instead of Token('EOL') and token.type == Token.EOL instead of token.type == 'EOL'.

If value is not given and type is a special marker like IF or :attr:`EOL, the value is set automatically.

TESTCASE_NAME = 'TESTCASE NAME'
KEYWORD_NAME = 'KEYWORD NAME'
SUITE_NAME = 'SUITE NAME'
DOCUMENTATION = 'DOCUMENTATION'
SUITE_SETUP = 'SUITE SETUP'
SUITE_TEARDOWN = 'SUITE TEARDOWN'
METADATA = 'METADATA'
TEST_SETUP = 'TEST SETUP'
TEST_TEARDOWN = 'TEST TEARDOWN'
TEST_TEMPLATE = 'TEST TEMPLATE'
TEST_TIMEOUT = 'TEST TIMEOUT'
TEST_TAGS = 'TEST TAGS'
FORCE_TAGS = 'TEST TAGS'
DEFAULT_TAGS = 'DEFAULT TAGS'
KEYWORD_TAGS = 'KEYWORD TAGS'
LIBRARY = 'LIBRARY'
RESOURCE = 'RESOURCE'
VARIABLES = 'VARIABLES'
SETUP = 'SETUP'
TEARDOWN = 'TEARDOWN'
TEMPLATE = 'TEMPLATE'
TIMEOUT = 'TIMEOUT'
TAGS = 'TAGS'
ARGUMENTS = 'ARGUMENTS'
RETURN = 'RETURN'
RETURN_SETTING = 'RETURN'
AS = 'AS'
WITH_NAME = 'AS'
NAME = 'NAME'
VARIABLE = 'VARIABLE'
ARGUMENT = 'ARGUMENT'
ASSIGN = 'ASSIGN'
KEYWORD = 'KEYWORD'
FOR = 'FOR'
FOR_SEPARATOR = 'FOR SEPARATOR'
END = 'END'
IF = 'IF'
INLINE_IF = 'INLINE IF'
ELSE_IF = 'ELSE IF'
ELSE = 'ELSE'
TRY = 'TRY'
EXCEPT = 'EXCEPT'
FINALLY = 'FINALLY'
WHILE = 'WHILE'
VAR = 'VAR'
RETURN_STATEMENT = 'RETURN STATEMENT'
CONTINUE = 'CONTINUE'
BREAK = 'BREAK'
OPTION = 'OPTION'
GROUP = 'GROUP'
SEPARATOR = 'SEPARATOR'
CONTINUATION = 'CONTINUATION'
CONFIG = 'CONFIG'
EOL = 'EOL'
EOS = 'EOS'
ERROR = 'ERROR'
FATAL_ERROR = 'FATAL ERROR'
NON_DATA_TOKENS = {'COMMENT', 'CONTINUATION', 'EOL', 'EOS', 'SEPARATOR'}
SETTING_TOKENS = {'ARGUMENTS', 'DEFAULT TAGS', 'DOCUMENTATION', 'KEYWORD TAGS', 'LIBRARY', 'METADATA', 'RESOURCE', 'RETURN', 'SETUP', 'SUITE NAME', 'SUITE SETUP', 'SUITE TEARDOWN', 'TAGS', 'TEARDOWN', 'TEMPLATE', 'TEST SETUP', 'TEST TAGS', 'TEST TEARDOWN', 'TEST TEMPLATE', 'TEST TIMEOUT', 'TIMEOUT', 'VARIABLES'}
ALLOW_VARIABLES = {'ARGUMENT', 'KEYWORD NAME', 'NAME', 'TESTCASE NAME'}
type
value
lineno
col_offset
error
property end_col_offset: int
set_error(error: str)[source]
tokenize_variables() Iterator[Token][source]

Tokenizes possible variables in token value.

Yields the token itself if the token does not allow variables (see Token.ALLOW_VARIABLES) or its value does not contain variables. Otherwise, yields variable tokens as well as tokens before, after, or between variables so that they have the same type as the original token.

class robot.parsing.lexer.tokens.EOS(lineno: int = -1, col_offset: int = -1)[source]

Bases: Token

Token representing end of a statement.

classmethod from_token(token: Token, before: bool = False) EOS[source]
class robot.parsing.lexer.tokens.END(lineno: int = -1, col_offset: int = -1, virtual: bool = False)[source]

Bases: Token

Token representing END token used to signify block ending.

Virtual END tokens have ‘’ as their value, with “real” END tokens the value is ‘END’.

classmethod from_token(token: Token, virtual: bool = False) END[source]

RetroSearch is an open source project built by @garambo | Open a GitHub Issue

Search and Browse the WWW like it's 1997 | Search results from DuckDuckGo

HTML: 3.2 | Encoding: UTF-8 | Version: 0.7.4