##### LM ##### # # Tokens # # Any single character can be a literal lex # Ignore whitespace. ignore /[ \t\n\r\v]+/ # Open and close id token id /[a-zA-Z_][a-zA-Z0-9_]*/ token open_paren /'('/ { parse_stop NC: nested_comment[ input ] print( 'discarding: ' NC '\n' ) } end # # Token translation # lex literal `( `) token nc_data /[^()]+/ end def nc_item [nc_data] | [nested_comment] def nested_comment [`( nc_item* `)] def nested [id*] # # Accumulator. # context accum_bt NestedParser: accum lex ignore /[ \t]+/ token word /[a-zA-Z0-9/*+_\-]+/ token stuff /[a-zA-Z0-9()/*+_\- ]+/ literal `! `; token NL /'\n'/ end def A1 [] { print( "A1\n" ) } def A2 [] { print( "A2\n" ) } def item [word] { send NestedParser [' '] send NestedParser [$r1] send NestedParser [' '] } | [stuff] { send NestedParser [' '] send NestedParser [$r1] send NestedParser [' '] } def two [A1 item* `! NL] | [A2 item* `; NL] end # accum_bt cons AccumBt: accum_bt[] AccumBt.NestedParser = cons parser[] parse Two: accum_bt::two(AccumBt)[ stdin ] send AccumBt.NestedParser [] eos Nested: nested = AccumBt.NestedParser.tree print( '\n------------\n' ) print( ^Nested '\n' ) print( ^Two '\n' ) ##### IN ##### hello there ( (this is a nested comment /*sdf asd_++_stuff) ) and this is not ; ##### EXP ##### A1 discarding: ( (this is a nested comment /*sdf asd_++_stuff) ) A2 discarding: ( (this is a nested comment /*sdf asd_++_stuff) ) ------------ hello there and this is not hello there ( (this is a nested comment /*sdf asd_++_stuff) ) and this is not ;