From 4e16079578377c215c7c387eaa2948252e54b108 Mon Sep 17 00:00:00 2001 From: Jordy Ruiz <56157703+jordr@users.noreply.github.com> Date: Sun, 8 Jan 2023 12:14:46 +0100 Subject: Feature/add pragma support (#487) * Support _Pragma, a C99 alternative to #pragma See https://gcc.gnu.org/onlinedocs/cpp/Pragmas.html * Test cases for _Pragma * Add explanatory comment for _PRAGMA and PPPRAGMA --- pycparser/c_lexer.py | 1 + pycparser/c_parser.py | 10 +++++++++- tests/test_c_lexer.py | 17 ++++++++++++++--- tests/test_c_parser.py | 5 +++++ 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/pycparser/c_lexer.py b/pycparser/c_lexer.py index d68d8eb..22c64bc 100644 --- a/pycparser/c_lexer.py +++ b/pycparser/c_lexer.py @@ -112,6 +112,7 @@ class CLexer(object): '_BOOL', '_COMPLEX', '_NORETURN', '_THREAD_LOCAL', '_STATIC_ASSERT', '_ATOMIC', '_ALIGNOF', '_ALIGNAS', + '_PRAGMA', ) keyword_map = {} diff --git a/pycparser/c_parser.py b/pycparser/c_parser.py index c9782e0..d31574a 100644 --- a/pycparser/c_parser.py +++ b/pycparser/c_parser.py @@ -571,11 +571,19 @@ class CParser(PLYParser): self._parse_error('Directives not supported yet', self._token_coord(p, 1)) + # This encompasses two types of C99-compatible pragmas: + # - The #pragma directive: + # # pragma character_sequence + # - The _Pragma unary operator: + # _Pragma ( " string_literal " ) def p_pppragma_directive(self, p): """ pppragma_directive : PPPRAGMA | PPPRAGMA PPPRAGMASTR + | _PRAGMA LPAREN unified_string_literal RPAREN """ - if len(p) == 3: + if len(p) == 5: + p[0] = c_ast.Pragma(p[3], self._token_coord(p, 2)) + elif len(p) == 3: p[0] = c_ast.Pragma(p[2], self._token_coord(p, 2)) else: p[0] = c_ast.Pragma("", self._token_coord(p, 1)) diff --git a/tests/test_c_lexer.py b/tests/test_c_lexer.py index 03fd838..2975b80 100644 --- a/tests/test_c_lexer.py +++ b/tests/test_c_lexer.py @@ -357,6 +357,7 @@ class TestCLexerNoErrors(unittest.TestCase): #pragma "string" #pragma somestring="some_other_string" #pragma id 124124 and numbers 0235495 + _Pragma("something else") 59 ''' # Check that pragmas are tokenized, including trailing string @@ -389,9 +390,19 @@ class TestCLexerNoErrors(unittest.TestCase): tb = self.clex.token() self.assertEqual(tb.type, 'PPPRAGMASTR') - t6 = self.clex.token() - self.assertEqual(t6.type, 'INT_CONST_DEC') - self.assertEqual(t6.lineno, 12) + t6a = self.clex.token() + t6l = self.clex.token() + t6b = self.clex.token() + t6r = self.clex.token() + self.assertEqual(t6a.type, '_PRAGMA') + self.assertEqual(t6l.type, 'LPAREN') + self.assertEqual(t6b.type, 'STRING_LITERAL') + self.assertEqual(t6b.value, '"something else"') + self.assertEqual(t6r.type, 'RPAREN') + + t7 = self.clex.token() + self.assertEqual(t7.type, 'INT_CONST_DEC') + self.assertEqual(t7.lineno, 13) diff --git a/tests/test_c_parser.py b/tests/test_c_parser.py index 9b53622..25682ad 100755 --- a/tests/test_c_parser.py +++ b/tests/test_c_parser.py @@ -1736,6 +1736,7 @@ class TestCParser_fundamentals(TestCParser_base): struct s { #pragma baz } s; + _Pragma("other \"string\"") ''' s1_ast = self.parse(s1) self.assertIsInstance(s1_ast.ext[0], Pragma) @@ -1758,6 +1759,10 @@ class TestCParser_fundamentals(TestCParser_base): self.assertEqual(s1_ast.ext[2].type.type.decls[0].string, 'baz') self.assertEqual(s1_ast.ext[2].type.type.decls[0].coord.line, 13) + self.assertIsInstance(s1_ast.ext[3], Pragma) + self.assertEqual(s1_ast.ext[3].string.value, r'"other \"string\""') + self.assertEqual(s1_ast.ext[3].coord.line, 15) + def test_pragmacomp_or_statement(self): s1 = r''' void main() { -- cgit v1.2.1