parser.y in lrama-0.6.10 vs parser.y in lrama-0.6.11
- old
+ new
@@ -59,30 +59,30 @@
)
}
| symbol_declaration
| rule_declaration
| inline_declaration
- | "%destructor" param generic_symbol+
+ | "%destructor" param (symbol | TAG)+
{
@grammar.add_destructor(
- ident_or_tags: val[2],
+ ident_or_tags: val[2].flatten,
token_code: val[1],
lineno: val[1].line
)
}
- | "%printer" param generic_symbol+
+ | "%printer" param (symbol | TAG)+
{
@grammar.add_printer(
- ident_or_tags: val[2],
+ ident_or_tags: val[2].flatten,
token_code: val[1],
lineno: val[1].line
)
}
- | "%error-token" param generic_symbol+
+ | "%error-token" param (symbol | TAG)+
{
@grammar.add_error_token(
- ident_or_tags: val[2],
+ ident_or_tags: val[2].flatten,
token_code: val[1],
lineno: val[1].line
)
}
| "%after-shift" IDENTIFIER
@@ -113,10 +113,22 @@
hash[:tokens].each {|id|
@grammar.add_type(id: id, tag: hash[:tag])
}
}
}
+ | "%nterm" symbol_declarations
+ {
+ val[1].each {|hash|
+ hash[:tokens].each {|id|
+ if @grammar.find_term_by_s_value(id.s_value)
+ on_action_error("symbol #{id.s_value} redeclared as a nonterminal", id)
+ else
+ @grammar.add_type(id: id, tag: hash[:tag])
+ end
+ }
+ }
+ }
| "%left" token_declarations_for_precedence
{
val[1].each {|hash|
hash[:tokens].each {|id|
sym = @grammar.add_term(id: id)
@@ -154,18 +166,12 @@
}
}
@precedence_number += 1
}
- token_declarations: token_declaration+
+ token_declarations: TAG? token_declaration+
{
- val[0].each {|token_declaration|
- @grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: nil, replace: true)
- }
- }
- | TAG token_declaration+
- {
val[1].each {|token_declaration|
@grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: val[0], replace: true)
}
}
| token_declarations TAG token_declaration+
@@ -206,11 +212,11 @@
{
builder = val[2]
result = val[0].append(builder)
}
- rule_rhs: empty
+ rule_rhs: "%empty"?
{
reset_precs
result = Grammar::ParameterizingRule::Rhs.new
}
| rule_rhs symbol named_ref?
@@ -248,15 +254,20 @@
builder = val[0]
builder.precedence_sym = sym
result = builder
}
- alias: # empty
- | string_as_id { result = val[0].s_value }
+ alias: string_as_id? { result = val[0].s_value if val[0] }
- symbol_declarations: symbol+ { result = [{tag: nil, tokens: val[0]}] }
- | TAG symbol+ { result = [{tag: val[0], tokens: val[1]}] }
+ symbol_declarations: TAG? symbol+
+ {
+ result = if val[0]
+ [{tag: val[0], tokens: val[1]}]
+ else
+ [{tag: nil, tokens: val[1]}]
+ end
+ }
| symbol_declarations TAG symbol+ { result = val[0].append({tag: val[1], tokens: val[2]}) }
symbol: id
| string_as_id
@@ -309,11 +320,11 @@
builder.line = @lexer.line - 1
end
result = val[0].append(builder)
}
- rhs: empty
+ rhs: "%empty"?
{
reset_precs
result = @grammar.create_rule_builder(@rule_counter, @midrule_action_counter)
}
| rhs symbol named_ref?
@@ -360,13 +371,19 @@
parameterizing_suffix: "?" { result = "option" }
| "+" { result = "nonempty_list" }
| "*" { result = "list" }
- parameterizing_args: symbol { result = [val[0]] }
+ parameterizing_args: symbol parameterizing_suffix?
+ {
+ result = if val[1]
+ [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[0])]
+ else
+ [val[0]]
+ end
+ }
| parameterizing_args ',' symbol { result = val[0].append(val[2]) }
- | symbol parameterizing_suffix { result = [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[1].s_value, location: @lexer.location, args: val[0])] }
| IDENTIFIER "(" parameterizing_args ")" { result = [Lrama::Lexer::Token::InstantiateRule.new(s_value: val[0].s_value, location: @lexer.location, args: val[2])] }
midrule_action: "{"
{
if @prec_seen
@@ -401,15 +418,9 @@
value: # empty
| IDENTIFIER
| STRING
| "{...}"
-
- generic_symbol: symbol
- | TAG
-
- empty: /* empty */
- | "%empty"
string_as_id: STRING { result = Lrama::Lexer::Token::Ident.new(s_value: val[0]) }
end
---- inner