Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
p.add(T.OPERAND, "")
else:
word = p.words[i]
if word in hw_registers:
p.add(T.HW_REGISTER, word)
elif ("a" == word) or ("$" in word) or ("," in word):
p.add(T.OPERAND, word)
elif word.isidentifier() or (
word[0] == "." and word[1:].isidentifier()
):
p.add(T.OPERAND_LABEL, word)
i += 1
# Comment section.
if p.words[i] != ";":
raise ParserError("Missing comment section.", p.line_n)
p.add(T.PC, p.words[i + 1])
try:
comment = p.line.split("|", maxsplit=1)[1].strip()
except IndexError:
raise ParserError("Expected | before comment.", p.line_n)
p.add_line(T.COMMENT, comment)
else:
raise ParserError("Unable to parse line.", p.line_n)
return p.tokens
check_call([*os.environ["EDITOR"].split(), filename])
new_text = open(filename).read()
os.remove(filename)
global_renames: Dict[str, str] = {}
subroutine_texts = new_text.split(self.HEADER)
# Apply all the changes local to the individual subroutines.
for i, disassembly in enumerate(self.disassemblies):
new_tokens = disassembly._text_to_tokens(subroutine_texts[i + 1])
renames = disassembly._apply_changes(original_tokens[i], new_tokens)
# Gather global renames.
for old, new in renames.items():
if global_renames.get(old, new) != new:
raise ParserError(f'Ambiguous label change: "{old}" -> "{new}".')
global_renames[old] = new
# Apply the global renames.
apply_renames(self.log, global_renames)
def add_line_rest(self, token_typ: TokenType, after: str, words_limit=1) -> None:
parts = after.split()
if not self.words[: len(parts)] == parts:
raise ParserError("Unable to parse line.", self.line_n)
after_parts = self.words[len(parts) :]
if words_limit > 0 and len(after_parts) > words_limit:
raise ParserError("Unable to parse line.", self.line_n)
rest = " ".join(after_parts)
self.add_line(token_typ, rest)
):
p.add(T.OPERAND_LABEL, word)
i += 1
# Comment section.
if p.words[i] != ";":
raise ParserError("Missing comment section.", p.line_n)
p.add(T.PC, p.words[i + 1])
try:
comment = p.line.split("|", maxsplit=1)[1].strip()
except IndexError:
raise ParserError("Expected | before comment.", p.line_n)
p.add_line(T.COMMENT, comment)
else:
raise ParserError("Unable to parse line.", p.line_n)
return p.tokens
def match_line(self, token_typ: TokenType, s: str) -> None:
if not self.maybe_match_line(s):
raise ParserError("Unable to parse line.", self.line_n)
self.add_line(token_typ)
new_tokens: List[Token],
renamed_labels: Dict[str, str],
) -> int:
"""Compare a collection of tokens describing an instruction, with a new one
with potentially updated content. Apply changes where possible."""
for orig, new in zip_longest(original_tokens, new_tokens):
# Handle equivalent tokens.
if orig:
orig.typ = EQUIVALENT_TOKENS.get(orig.typ, orig.typ)
# Count lines.
if new and new.typ == T.NEWLINE:
line_n += 1
# Error cases.
elif (orig is None) or (new is None):
raise ParserError("Added or deleted token.", line_n)
elif orig.typ not in EDITABLE_TOKEN_TYPES and orig.typ != new.typ:
raise ParserError("Changed the type of a token.", line_n)
elif orig.typ not in EDITABLE_TOKENS and orig.val != new.val:
raise ParserError(
f'Can\'t edit token of type "{orig.typ.name}".', line_n
)
# Keep track of the PC of the instruction.
elif orig.typ == T.PC:
pc = int(orig.val[1:], 16)
# Assertion type.
elif new.typ not in (T.SUGGESTED_ASSERTION, T.SUGGESTED_ASSERTION_TYPE):
if orig.typ in (T.ASSERTION_TYPE, T.SUGGESTED_ASSERTION_TYPE):
orig_assert_type = (
"none" if orig.typ == T.SUGGESTED_ASSERTION_TYPE else orig.val
# Assertion type.
elif new.typ not in (T.SUGGESTED_ASSERTION, T.SUGGESTED_ASSERTION_TYPE):
if orig.typ in (T.ASSERTION_TYPE, T.SUGGESTED_ASSERTION_TYPE):
orig_assert_type = (
"none" if orig.typ == T.SUGGESTED_ASSERTION_TYPE else orig.val
)
new_assert_type = new.val
assertion_type_changed = orig_assert_type != new_assert_type
# Assertion.
elif orig.typ in (T.ASSERTION, T.SUGGESTED_ASSERTION):
assertion_changed = orig.val != new.val
anything_changed = assertion_type_changed or assertion_changed
state_change = StateChange.from_expr(new.val)
if anything_changed and state_change.unknown:
raise ParserError("Invalid assertion state.", line_n)
if assertion_type_changed:
if "instruction".startswith(orig_assert_type):
self.log.deassert_instruction_state_change(pc)
elif "subroutine".startswith(orig_assert_type):
self.log.deassert_subroutine_state_change(
self.subroutine.pc, pc
)
if anything_changed:
if new_assert_type == "":
continue
elif "instruction".startswith(new_assert_type):
self.log.assert_instruction_state_change(pc, state_change)
elif "subroutine".startswith(new_assert_type):
self.log.assert_subroutine_state_change(
self.subroutine, pc, state_change
)
with potentially updated content. Apply changes where possible."""
for orig, new in zip_longest(original_tokens, new_tokens):
# Handle equivalent tokens.
if orig:
orig.typ = EQUIVALENT_TOKENS.get(orig.typ, orig.typ)
# Count lines.
if new and new.typ == T.NEWLINE:
line_n += 1
# Error cases.
elif (orig is None) or (new is None):
raise ParserError("Added or deleted token.", line_n)
elif orig.typ not in EDITABLE_TOKEN_TYPES and orig.typ != new.typ:
raise ParserError("Changed the type of a token.", line_n)
elif orig.typ not in EDITABLE_TOKENS and orig.val != new.val:
raise ParserError(
f'Can\'t edit token of type "{orig.typ.name}".', line_n
)
# Keep track of the PC of the instruction.
elif orig.typ == T.PC:
pc = int(orig.val[1:], 16)
# Assertion type.
elif new.typ not in (T.SUGGESTED_ASSERTION, T.SUGGESTED_ASSERTION_TYPE):
if orig.typ in (T.ASSERTION_TYPE, T.SUGGESTED_ASSERTION_TYPE):
orig_assert_type = (
"none" if orig.typ == T.SUGGESTED_ASSERTION_TYPE else orig.val
)
new_assert_type = new.val
assertion_type_changed = orig_assert_type != new_assert_type
# Assertion.
) -> int:
"""Compare a collection of tokens describing an instruction, with a new one
with potentially updated content. Apply changes where possible."""
for orig, new in zip_longest(original_tokens, new_tokens):
# Handle equivalent tokens.
if orig:
orig.typ = EQUIVALENT_TOKENS.get(orig.typ, orig.typ)
# Count lines.
if new and new.typ == T.NEWLINE:
line_n += 1
# Error cases.
elif (orig is None) or (new is None):
raise ParserError("Added or deleted token.", line_n)
elif orig.typ not in EDITABLE_TOKEN_TYPES and orig.typ != new.typ:
raise ParserError("Changed the type of a token.", line_n)
elif orig.typ not in EDITABLE_TOKENS and orig.val != new.val:
raise ParserError(
f'Can\'t edit token of type "{orig.typ.name}".', line_n
)
# Keep track of the PC of the instruction.
elif orig.typ == T.PC:
pc = int(orig.val[1:], 16)
# Assertion type.
elif new.typ not in (T.SUGGESTED_ASSERTION, T.SUGGESTED_ASSERTION_TYPE):
if orig.typ in (T.ASSERTION_TYPE, T.SUGGESTED_ASSERTION_TYPE):
orig_assert_type = (
"none" if orig.typ == T.SUGGESTED_ASSERTION_TYPE else orig.val
)
new_assert_type = new.val
def _apply_changes(
self, original_tokens: List[List[Token]], new_tokens: List[List[Token]]
) -> Dict[str, str]:
"""Compare a collection of tokens describing a subroutine, with a new one
with potentially updated content. Apply changes where possible."""
line_n = self.base_line_n
renamed_labels: Dict[str, str] = {}
for orig_instr_tokens, new_instr_tokens in zip_longest(
original_tokens, new_tokens
):
if (orig_instr_tokens is None) or (new_instr_tokens is None):
raise ParserError("Added or deleted an instruction.", line_n)
line_n = self._apply_instruction_changes(
line_n, orig_instr_tokens, new_instr_tokens, renamed_labels
)
global_renames = apply_local_renames(self.subroutine, renamed_labels)
return global_renames