Skip to content
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
906cb05
add definitions.json generation script
mvadari Nov 14, 2024
4474908
fix model generation
mvadari Nov 20, 2024
65658f8
Merge branch 'main' into definitions-generation
mvadari Dec 11, 2024
c58cd8e
Merge branch 'main' into definitions-generation
mvadari Dec 20, 2024
76897e9
fix Hash192
mvadari Dec 20, 2024
738601e
Merge branch 'main' into definitions-generation
mvadari Jan 2, 2025
fe906c6
update script to follow server_definitions format
mvadari Feb 6, 2025
5ab6aad
oops wrong branch
mvadari Feb 6, 2025
8efdc44
add basic Github support
mvadari Feb 6, 2025
b31251f
pipe automatically to file
mvadari Feb 6, 2025
a072e49
add Github support to model generation
mvadari Feb 6, 2025
98c267d
add poe script
mvadari Feb 6, 2025
63aface
Merge branch 'main' into definitions-generation
mvadari Feb 6, 2025
1dda243
clean up
mvadari Feb 6, 2025
e6c1742
more cleanup
mvadari Feb 7, 2025
df302ca
Merge branch 'main' into definitions-generation
mvadari Feb 11, 2025
201820b
respond to comments
mvadari Feb 12, 2025
b059f44
remove unneeded rename
mvadari Feb 12, 2025
b6ba563
change wording
mvadari Feb 12, 2025
9cfae18
Merge branch 'main' into definitions-generation
mvadari Feb 12, 2025
561c388
fix wording
mvadari Feb 12, 2025
ee68c0a
respond to comments
mvadari Feb 13, 2025
be569fc
Merge branch 'main' into definitions-generation
mvadari Feb 18, 2025
685cbf6
Merge branch 'main' into definitions-generation
mvadari Mar 18, 2025
2fad0c3
update contributing
mvadari Mar 19, 2025
4a26858
Merge branch 'main' into definitions-generation
mvadari Mar 19, 2025
29b058b
Merge branch 'main' into definitions-generation
mvadari May 6, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,14 @@ lint = "poetry run flake8 xrpl tests snippets"
cmd = "python3 -m unittest ${FILE_PATHS}"
args = [{ name = "FILE_PATHS", positional = true, multiple = true }]

[tool.poe.tasks.generate]
help = "Generate the models and definitions for a new amendment"
sequence = [
{ cmd = "python3 tools/generate_definitions.py ${FILE_OR_GITHUB_PATH}" },
{ cmd = "python3 tools/generate_tx_models.py ${FILE_OR_GITHUB_PATH}" },
]
args = [{ name = "FILE_OR_GITHUB_PATH", positional = true, required = true }]

[tool.poe.tasks.test_coverage]
sequence = [
{ cmd = "coverage run -m unittest discover" },
Expand Down
364 changes: 364 additions & 0 deletions tools/generate_definitions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,364 @@
"""Script to generate the definitions.json file from rippled source code."""

import os
import re
import sys

import httpx

CAPITALIZATION_EXCEPTIONS = {
"NFTOKEN": "NFToken",
"URITOKEN": "URIToken",
"URI": "URI",
"UNL": "UNL",
"XCHAIN": "XChain",
"DID": "DID",
"ID": "ID",
"AMM": "AMM",
}

if len(sys.argv) != 2 and len(sys.argv) != 3:
print("Usage: python " + sys.argv[0] + " path/to/rippled [path/to/pipe/to]")
print(
"Usage: python "
+ sys.argv[0]
+ " github.com/user/rippled/tree/feature-branch [path/to/pipe/to]"
)
sys.exit(1)

########################################################################
# Get all necessary files from rippled
########################################################################


def _read_file_from_github(repo: str, filename: str) -> str:
url = repo.replace("github.com", "raw.githubusercontent.com")
url = url.replace("tree", "refs/heads")
url += filename
if not url.startswith("http"):
url = "https://" + url
response = httpx.get(url)
return response.text


def _read_file(folder: str, filename: str) -> str:
with open(folder + filename, "r") as f:
return f.read()


func = _read_file_from_github if "github.com" in sys.argv[1] else _read_file

sfield_h = func(sys.argv[1], "/include/xrpl/protocol/SField.h")
sfield_macro_file = func(sys.argv[1], "/include/xrpl/protocol/detail/sfields.macro")
ledger_entries_file = func(
sys.argv[1], "/include/xrpl/protocol/detail/ledger_entries.macro"
)
ter_h = func(sys.argv[1], "/include/xrpl/protocol/TER.h")
transactions_file = func(
sys.argv[1], "/include/xrpl/protocol/detail/transactions.macro"
)


# Translate from rippled string format to what the binary codecs expect
def _translate(inp: str) -> str:
if re.match(r"^UINT", inp):
if re.search(r"256|160|128|192", inp):
return inp.replace("UINT", "Hash")
else:
return inp.replace("UINT", "UInt")
if inp == "OBJECT" or inp == "ARRAY":
return "ST" + inp[0:1].upper() + inp[1:].lower()
if inp == "ACCOUNT":
return "AccountID"
if inp == "LEDGERENTRY":
return "LedgerEntry"
if inp == "NOTPRESENT":
return "NotPresent"
if inp == "PATHSET":
return "PathSet"
if inp == "VL":
return "Blob"
if inp == "DIR_NODE":
return "DirectoryNode"
if inp == "PAYCHAN":
return "PayChannel"

parts = inp.split("_")
result = ""
for part in parts:
if part in CAPITALIZATION_EXCEPTIONS:
result += CAPITALIZATION_EXCEPTIONS[part]
else:
result += part[0:1].upper() + part[1:].lower()
return result


output = ""


def _add_line(line: str) -> None:
global output
output += line + "\n"


# start
_add_line("{")

########################################################################
# SField processing
########################################################################
_add_line(' "FIELDS": [')

# The ones that are harder to parse directly from SField.cpp
_add_line(
""" [
"Generic",
{
"isSerialized": false,
"isSigningField": false,
"isVLEncoded": false,
"nth": 0,
"type": "Unknown"
}
],
[
"Invalid",
{
"isSerialized": false,
"isSigningField": false,
"isVLEncoded": false,
"nth": -1,
"type": "Unknown"
}
],
[
"ObjectEndMarker",
{
"isSerialized": true,
"isSigningField": true,
"isVLEncoded": false,
"nth": 1,
"type": "STObject"
}
],
[
"ArrayEndMarker",
{
"isSerialized": true,
"isSigningField": true,
"isVLEncoded": false,
"nth": 1,
"type": "STArray"
}
],
[
"taker_gets_funded",
{
"isSerialized": false,
"isSigningField": false,
"isVLEncoded": false,
"nth": 258,
"type": "Amount"
}
],
[
"taker_pays_funded",
{
"isSerialized": false,
"isSigningField": false,
"isVLEncoded": false,
"nth": 259,
"type": "Amount"
}
],"""
)

type_hits = re.findall(
r"^ *STYPE\(STI_([^ ]*?) *, *([0-9-]+) *\) *\\?$", sfield_h, re.MULTILINE
)
if len(type_hits) == 0:
type_hits = re.findall(
r"^ *STI_([^ ]*?) *= *([0-9-]+) *,?$", sfield_h, re.MULTILINE
)
type_map = {x[0]: x[1] for x in type_hits}


def _is_vl_encoded(t: str) -> str:
if t == "VL" or t == "ACCOUNT" or t == "VECTOR256":
return "true"
return "false"


def _is_serialized(t: str, name: str) -> str:
if t == "LEDGERENTRY" or t == "TRANSACTION" or t == "VALIDATION" or t == "METADATA":
return "false"
if name == "hash" or name == "index":
return "false"
return "true"


def _is_signing_field(t: str, not_signing_field: str) -> str:
if not_signing_field == "notSigning":
return "false"
if t == "LEDGERENTRY" or t == "TRANSACTION" or t == "VALIDATION" or t == "METADATA":
return "false"
return "true"


# Parse SField.cpp for all the SFields and their serialization info
sfield_hits = re.findall(
r"^ *[A-Z]*TYPED_SFIELD *\( *sf([^,\n]*),[ \n]*([^, \n]+)[ \n]*,[ \n]*"
r"([0-9]+)(,.*?(notSigning))?",
sfield_macro_file,
re.MULTILINE,
)
sfield_hits += [
("hash", "UINT256", "257", "", "notSigning"),
("index", "UINT256", "258", "", "notSigning"),
]
sfield_hits.sort(key=lambda x: int(type_map[x[1]]) * 2**16 + int(x[2]))
for x in range(len(sfield_hits)):
_add_line(" [")
_add_line(' "' + sfield_hits[x][0] + '",')
_add_line(" {")
_add_line(
' "isSerialized": '
+ _is_serialized(sfield_hits[x][1], sfield_hits[x][0])
+ ","
)
_add_line(
' "isSigningField": '
+ _is_signing_field(sfield_hits[x][1], sfield_hits[x][4])
+ ","
)
_add_line(' "isVLEncoded": ' + _is_vl_encoded(sfield_hits[x][1]) + ",")
_add_line(' "nth": ' + sfield_hits[x][2] + ",")
_add_line(' "type": "' + _translate(sfield_hits[x][1]) + '"')
_add_line(" }")
_add_line(" ]" + ("," if x < len(sfield_hits) - 1 else ""))

_add_line(" ],")

########################################################################
# Ledger entry type processing
########################################################################
_add_line(' "LEDGER_ENTRY_TYPES": {')


def _unhex(x: str) -> str:
if (x + "")[0:2] == "0x":
return str(int(x, 16))
return x


lt_hits = re.findall(
r"^ *LEDGER_ENTRY[A-Z_]*\(lt[A-Z_]+ *, *([x0-9a-f]+) *, *([^,]+), *([^,]+), \({$",
ledger_entries_file,
re.MULTILINE,
)
lt_hits.append(("-1", "Invalid"))
lt_hits.sort(key=lambda x: x[1])
for x in range(len(lt_hits)):
_add_line(
' "'
+ lt_hits[x][1]
+ '": '
+ _unhex(lt_hits[x][0])
+ ("," if x < len(lt_hits) - 1 else "")
)
_add_line(" },")

########################################################################
# TER code processing
########################################################################
_add_line(' "TRANSACTION_RESULTS": {')
ter_h = str(ter_h).replace("[[maybe_unused]]", "")

ter_code_hits = re.findall(
r"^ *((tel|tem|tef|ter|tes|tec)[A-Z_]+)( *= *([0-9-]+))? *,? *(\/\/[^\n]*)?$",
ter_h,
re.MULTILINE,
)
ter_codes = []
upto = -1

for x in range(len(ter_code_hits)):
if ter_code_hits[x][3] != "":
upto = int(ter_code_hits[x][3])
ter_codes.append((ter_code_hits[x][0], upto))

upto += 1

ter_codes.sort(key=lambda x: x[0])
current_type = ""
for x in range(len(ter_codes)):
if current_type == "":
current_type = ter_codes[x][0][:3]
elif current_type != ter_codes[x][0][:3]:
_add_line("")
current_type = ter_codes[x][0][:3]

_add_line(
' "'
+ ter_codes[x][0]
+ '": '
+ str(ter_codes[x][1])
+ ("," if x < len(ter_codes) - 1 else "")
)

_add_line(" },")

########################################################################
# Transaction type processing
########################################################################
_add_line(' "TRANSACTION_TYPES": {')

tx_hits = re.findall(
r"^ *TRANSACTION\(tt[A-Z_]+ *,* ([0-9]+) *, *([A-Za-z]+).*$",
transactions_file,
re.MULTILINE,
)
tx_hits.append(("-1", "Invalid"))
tx_hits.sort(key=lambda x: x[1])
for x in range(len(tx_hits)):
_add_line(
' "'
+ tx_hits[x][1]
+ '": '
+ tx_hits[x][0]
+ ("," if x < len(tx_hits) - 1 else "")
)

_add_line(" },")

########################################################################
# Serialized type processing
########################################################################
_add_line(' "TYPES": {')

type_hits.append(("DONE", "-1"))
type_hits.sort(key=lambda x: _translate(x[0]))
for x in range(len(type_hits)):
_add_line(
' "'
+ _translate(type_hits[x][0])
+ '": '
+ type_hits[x][1]
+ ("," if x < len(type_hits) - 1 else "")
)

_add_line(" }")
_add_line("}")


if len(sys.argv) == 3:
output_file = sys.argv[2]
else:
output_file = os.path.join(
os.path.dirname(__file__),
"../xrpl/core/binarycodec/definitions/definitions.json",
)

with open(output_file, "w") as f:
f.write(output)
print("File written successfully to " + output_file)
Loading